diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 1a7bd1b2..c2d9061f 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -1,8 +1,14 @@ name: build -on: ["push", "pull_request"] +on: + pull_request: + branches: + - main + push: + branches: + - main jobs: build: - runs-on: ubuntu-18.04 + runs-on: ubuntu-latest steps: - name: Git checkout uses: actions/checkout@v2 diff --git a/.github/workflows/deploy.yml b/.github/workflows/deploy.yml index c2c9d12e..082a3885 100644 --- a/.github/workflows/deploy.yml +++ b/.github/workflows/deploy.yml @@ -11,7 +11,7 @@ on: jobs: deploy: if: (github.event_name == 'push') || github.event.pull_request.merged == true - runs-on: ubuntu-18.04 + runs-on: ubuntu-latest steps: - name: Git checkout uses: actions/checkout@v2 diff --git a/.github/workflows/install.yml b/.github/workflows/install.yml new file mode 100644 index 00000000..14dd75c3 --- /dev/null +++ b/.github/workflows/install.yml @@ -0,0 +1,65 @@ +# todo: Should check the installation is correct or not! Do it in the kusion repo. +name: installation check +on: + push: + branches: + - main + paths: + - 'static/scripts/**' + pull_request: + branches: + - 'main' + paths: + - 'static/scripts/**' +jobs: + check-scripts-on-macos: + runs-on: macos-latest + steps: + - uses: actions/checkout@v3 + - name: check install script + run: static/scripts/install.sh + - name: check uninstall script + run: static/scripts/uninstall.sh + check-brew-on-macos: + runs-on: macos-latest + steps: + - uses: actions/checkout@v3 + - name: brew install kusion + run: brew install KusionStack/tap/kusion + check-scripts-on-linux: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: check install script + run: static/scripts/install.sh + - name: check uninstall script + run: static/scripts/uninstall.sh + check-brew-on-linux: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: Homebrew/actions/setup-homebrew@master + - name: brew install kusion + run: brew install KusionStack/tap/kusion + shell: bash -ieo pipefail {0} +# check-powershell-on-windows: +# runs-on: windows-latest +# steps: +# - uses: actions/checkout@v3 +# - name: check install script +# run: static/scripts/install.ps1 +# shell: powershell +# - name: check kusion run +# run: C:\kusion\kusion.exe compile -w static/stack -o stdout +# shell: powershell +# check-scoop-on-windows: +# runs-on: windows-latest +# steps: +# - uses: actions/checkout@v3 +# - name: scoop install kusion +# run: | +# iex "& {$(irm get.scoop.sh)} -RunAsAdmin" +# scoop bucket add KusionStack https://github.com/KusionStack/scoop-bucket.git +# scoop install KusionStack/kusion +# # kusion compile -w static/stack -o stdout +# shell: powershell diff --git a/.gitignore b/.gitignore index a9cf4483..81b5d27a 100644 --- a/.gitignore +++ b/.gitignore @@ -21,4 +21,5 @@ yarn-debug.log* yarn-error.log* .vscode/ -.idea/ \ No newline at end of file +.idea/ +.obsidian/ diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 00000000..827e8684 --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,3 @@ +# KusionStack Community Code of Conduct + +KusionStack follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md). \ No newline at end of file diff --git a/README.md b/README.md index e89666f2..3f685137 100644 --- a/README.md +++ b/README.md @@ -9,24 +9,75 @@ Source for kusionstack.io site . Powered by [Docusaurus 2](https://docusaurus.io/). -## Build local +## Write documentation for each sub product -``` +Write documentation for sub products in the `./docs/[subProductName]` directory. + +## Build locally + +```shell +$ rm -rf .docusaurus $ npm install $ npm run start ``` -Open http://localhost:3000 in browser. +Open http://localhost:3000 in the browser. -## Build local with i18n +## Build locally with i18n -``` -$ npm build +```shell +$ npm run build $ npx http-server ./build ``` -Open http://localhost:8080 in browser. +Open http://localhost:8080 in the browser. + +## Tagging a new version for each sub product + +1. First, make sure the current docs version (the `./docs/[subProductName]` directory) is ready to be frozen. +2. Enter the specified sub product name and a new version number: + +```bash +npm run docusaurus docs:version:[subProductName] [vMAJOR.MINOR] + +# If the sub product has an international document and needs to execute the command again, +# it will generate the i18n directory corresponding to that version. +npm run write-translations -- --locale [localeName] --override +``` + +Example: + +```bash +npm run docusaurus docs:version:docs v0.12 +npm run docusaurus docs:version:kuperator v0.2 +npm run docusaurus docs:version:ctrlmesh v0.3 +npm run docusaurus docs:version:karpor v0.4 + +# If the sub product has an international document and needs to execute the command again, +# it will generate the i18n directory corresponding to that version. +npm run write-translations -- --locale zh --override +``` + +Optional sub product names: + +- `docs` (alias for kusion) +- `kuperator` +- `ctrlmesh` +- `karpor` + +Format of version number: `[vMAJOR.MINOR]`, e.g. `v0.3`, `v0.13`. + +When tagging a new version of sub product, the document versioning mechanism will: + +- Copy the full `docs/[subProductName]` folder contents into a new `[subProductName]_versioned_docs/version-[versionName]/` folder. +- Create a versioned sidebars file based from your current [sidebar](docs-introduction.mdx#sidebar) configuration (if it exists) - saved as `[subProductName]_versioned_sidebars/version-[versionName]-sidebars.json`. +- Append the new version number to `[subProductName]_versions.json`. + +More see: + +- [Versioning](https://docusaurus.io/docs/versioning) +- [Docs Multi-instance](https://docusaurus.io/zh-CN/docs/2.x/docs-multi-instance#tagging-new-versions) ## Notice -This website is built under Docusaurus version 2.0.0-beta.17. There may be unknown errors when compiling on other versions. +This website is built under Docusaurus version 2.4.1. There may be unknown errors when compiling on other versions. diff --git a/blog/2022-09-15-declarative-config-overview/_index.md b/blog/2022-09-15-declarative-config-overview/_index.md index dd9e2b9e..19a569b2 100644 --- a/blog/2022-09-15-declarative-config-overview/_index.md +++ b/blog/2022-09-15-declarative-config-overview/_index.md @@ -170,11 +170,11 @@ Fig. 5: KCL core features. - **Easy-to-use**: Originated from high-level languages ​​such as Python and Golang, incorporating functional language features with low side effects. - **Well-designed**: Independent Spec-driven syntax, semantics, runtime and system modules design. -- **Quick modeling**: [Schema](https://kusionstack.io/docs/reference/lang/lang/tour#schema)-centric configuration types and modular abstraction. -- **Rich capabilities**: Configuration with type, logic and policy based on [Config](https://kusionstack.io/docs/reference/lang/lang/codelab/simple), [Schema](https://kusionstack.io/docs/reference/lang/lang/tour/#schema), [Lambda](https://kusionstack.io/docs/reference/lang/lang/tour/#function), [Rule](https://kusionstack.io/docs/reference/lang/lang/tour/#rule). -- **Stability**: Configuration stability built on [static type system](https://kusionstack.io/docs/reference/lang/lang/tour/#type-system), [constraints](https://kusionstack.io/docs/reference/lang/lang/tour/#validation), and [rules](https://kusionstack.io/docs/reference/lang/lang/tour#rule). -- **Scalability**: High scalability through [automatic merge mechanism](https://kusionstack.io/docs/reference/lang/lang/tour/#-operators-1) of isolated config blocks. -- **Fast automation**: Gradient automation scheme of [CRUD APIs](https://kusionstack.io/docs/reference/lang/lang/tour/#kcl-cli-variable-override), [multilingual SDKs](https://kusionstack.io/docs/reference/lang/xlang-api/overview), [language plugin](https://github.com/KusionStack/kcl-plugin) +- **Quick modeling**: [Schema](https://kcl-lang.io/docs/reference/lang/tour/#schema)-centric configuration types and modular abstraction. +- **Rich capabilities**: Configuration with type, logic and policy based on [Config](https://kusionstack.io/docs/reference/lang/lang/codelab/simple), [Schema](https://kcl-lang.io/docs/reference/lang/tour/#schema), [Lambda](https://kcl-lang.io/docs/reference/lang/tour/#function), [Rule](https://kcl-lang.io/docs/reference/lang/tour/#rule). +- **Stability**: Configuration stability built on [static type system](https://kcl-lang.io/docs/reference/lang/tour/#type-system), [constraints](https://kcl-lang.io/docs/reference/lang/tour/#validation), and [rules](https://kcl-lang.io/docs/reference/lang/tour/#rule). +- **Scalability**: High scalability through [automatic merge mechanism](https://kcl-lang.io/docs/reference/lang/tour/#operators) of isolated config blocks. +- **Fast automation**: Gradient automation scheme of [CRUD APIs](https://kcl-lang.io/docs/reference/lang/tour/#kcl-cli-variable-override), [multilingual SDKs](https://kusionstack.io/docs/reference/lang/xlang-api/overview), [language plugin](https://github.com/KusionStack/kcl-plugin) - **High performance**: High compile time and runtime performance using Rust & C and [LLVM](https://llvm.org/), and support compilation to native code and [WASM](https://webassembly.org/). - **API affinity**: Native support API ecological specifications such as [OpenAPI](https://github.com/KusionStack/kcl-openapi), Kubernetes CRD, Kubernetes YAML spec. - **Development friendly**: Friendly development experiences with rich [language tools](https://kusionstack.io/docs/reference/cli/kcl/) (Format, Lint, Test, Vet, Doc, etc.) and [IDE plugins](https://github.com/KusionStack/vscode-kcl). @@ -185,7 +185,7 @@ Fig. 5: KCL core features. Fig. 6: KCL core design. -For more language design and capabilities, see [KCL Documents](https://kusionstack.io/docs/reference/lang/lang/tour). Although KCL is not a general language, it has corresponding application scenarios. As shown in Fig. 6, developers can write **config**, **schema**, **function** and **rule** through KCL, where config is used to define data, schema is used to describe the model definition of data, rule is used to validate data, and schema and rule can also be combined to use models and constraints that fully describe data, In addition, we can also use the lambda pure function in KCL to organize data code, encapsulate common code, and call it directly when needed. +For more language design and capabilities, see [KCL Documents](https://kcl-lang.io/docs/reference/lang/tour/). Although KCL is not a general language, it has corresponding application scenarios. As shown in Fig. 6, developers can write **config**, **schema**, **function** and **rule** through KCL, where config is used to define data, schema is used to describe the model definition of data, rule is used to validate data, and schema and rule can also be combined to use models and constraints that fully describe data, In addition, we can also use the lambda pure function in KCL to organize data code, encapsulate common code, and call it directly when needed. For use cases, KCL can perform structured K-V data validation complex configuration model definition and abstraction, strong constraint verification to avoid configuration errors, automation integration and engineering expansion. These features and use cases are described below. @@ -759,7 +759,7 @@ The blog gives the landscape overview of declarative configuration technology, f - Kustomize: [https://kustomize.io/](https://kustomize.io/) - Kube-linter: [https://github.com/stackrox/kube-linter](https://github.com/stackrox/kube-linter) - Checkov: [https://github.com/bridgecrewio/checkov](https://github.com/bridgecrewio/checkov) -- KCL Documents: [https://kusionstack.io/docs/reference/lang/lang/tour](https://kusionstack.io/docs/reference/lang/lang/tour) +- KCL Documents: [https://kcl-lang.io/docs/reference/lang/tour/](https://kcl-lang.io/docs/reference/lang/tour/) - How Terraform Works: A Visual Intro: [https://betterprogramming.pub/how-terraform-works-a-visual-intro-6328cddbe067](https://betterprogramming.pub/how-terraform-works-a-visual-intro-6328cddbe067) - How Terraform Works: Modules Illustrated: [https://awstip.com/terraform-modules-illustrate-26cbc48be83a](https://awstip.com/terraform-modules-illustrate-26cbc48be83a) - Helm: [https://helm.sh/](https://helm.sh/) diff --git a/blog/2022-09-16-learn-from-scale-practice/index.md b/blog/2022-09-16-learn-from-scale-practice/index.md index 39a20097..df7a6fe1 100644 --- a/blog/2022-09-16-learn-from-scale-practice/index.md +++ b/blog/2022-09-16-learn-from-scale-practice/index.md @@ -9,7 +9,7 @@ tags: [KusionStack, Kusion, Large-scale, Platform Engineering] **Abstract:** This blog attempts to talk about the challenges and best practices in the process of large-scale platform engineering from the perspectives of engineering, programing language, divide-and-conquer, modeling, automation, and collaborative culture. Hopefully, by sharing the concepts and practices of our platform engineering with more companies and teams, we can make some interesting changes happen together. -This blog is based on the platform engineering and automation practice of [KusionStack](https://kusionstack.io/docs/user_docs/intro/kusion-intro) in Ant Group. +This blog is based on the platform engineering and automation practice of [KusionStack](https://kusionstack.io/docs/user_docs/intro/overview) in Ant Group. ## 1. Platform Engineering: Making Enterprise DevOps Happen @@ -44,7 +44,7 @@ In fact, not everyone should be or could be an expert in this specific field, wh Compared with a domain language there's no better way for open, self-service, domain-oriented business problem definitions, as well as meeting the enterprise's internal requirements of automation, low-security-risk, low noise, and easy governance. Just as there are staves for recording music, and time-series databases for storing time-series data, within the problem domain of platform engineering, a set of configuration and policy languages ​​are created to write and manage configurations and policies at scale. Different from high-level general purpose languages ​​with mixed paradigms and engineering capabilities, the core logic of such domain languages ​​is to solve the near-infinite variation and complexity of domain problems with a convergent and limited set of syntax and semantics, and to integrate the ideas and methods of large-scale complex configuration and policies writing into language features. -In the platform engineering practice of Ant Group, we have strengthened the client-side working mode. We write and maintain the models, orchestration, constraints and policies around the application ops life cycle in the shared codebase [Konfig](https://github.com/KusionStack/konfig) through the record and functional language [KCL](https://github.com/KusionStack/KCLVM). KCL is a static and strongly typed language for application developers with programming ability, and provides the writing experience of a modern high-level language with limited functionality around domain purposes. Under such practices, KCL is not a language just for writing K-V pairs, but a language for platform engineering development. Application developers, SREs, and platform developers conduct dev collaboratively based on Konfig. They write configurations, and [schema](https://kusionstack.io/docs/reference/lang/lang/tour/#schema) abstractions, [functions](https://kusionstack.io/docs/reference/lang/lang/tour/#function), [constraints](https://kusionstack.io/docs/reference/lang/lang/tour/#validation) and [rules](https://kusionstack.io/docs/reference/lang/lang/tour/#rule) which are frequent and complex in the PaaS field through KCL native functions, that is, writing stable and scalable business models, business logic, error-proofing constraints, and environmental rules. The Konfig repository becomes a unified programming interface, workspace and business layer, while the KCL-oriented writing paradigm, which is secure and consistent, with low noise, low side effect and easy to automate, are more beneficial for long-term management and governance. +In the platform engineering practice of Ant Group, we have strengthened the client-side working mode. We write and maintain the models, orchestration, constraints and policies around the application ops life cycle in the shared codebase [Konfig](https://github.com/KusionStack/konfig) through the record and functional language [KCL](https://github.com/KusionStack/KCLVM). KCL is a static and strongly typed language for application developers with programming ability, and provides the writing experience of a modern high-level language with limited functionality around domain purposes. Under such practices, KCL is not a language just for writing K-V pairs, but a language for platform engineering development. Application developers, SREs, and platform developers conduct dev collaboratively based on Konfig. They write configurations, and [schema](https://kcl-lang.io/docs/reference/lang/tour/#schema) abstractions, [functions](https://kcl-lang.io/docs/reference/lang/tour/#function), [constraints](https://kcl-lang.io/docs/reference/lang/tour/#validation) and [rules](https://kcl-lang.io/docs/reference/lang/tour/#rule) which are frequent and complex in the PaaS field through KCL native functions, that is, writing stable and scalable business models, business logic, error-proofing constraints, and environmental rules. The Konfig repository becomes a unified programming interface, workspace and business layer, while the KCL-oriented writing paradigm, which is secure and consistent, with low noise, low side effect and easy to automate, are more beneficial for long-term management and governance. ![](/img/blog/2022-09-16-learn-from-scale-practice/kcl-dev.png) @@ -58,8 +58,8 @@ The idea of ​​divide and conquer is the key to solving the scaling problem, In Ant Group's practice, Konfig monorepo is the programming workspace opened by the internal engineering platform to developers, helping application developers to write configurations and policies around the application operation life cycle with a unified programming interface and tech stack, to integrate with existing and future platform and infrastructure, to create and manage cloud-native environments and RBAC-based permissions on demand, and to manage the delivery workflow through GitOps. Konfig monorepo provides an independent white-box programming space for different scenarios, projects and applications, whose intrinsic scalability comes from: - Flexible, scalable, independent client-side [engineering structure design](https://kusionstack.io/docs/user_docs/concepts/konfig) -- The [automatic merging](https://kusionstack.io/docs/reference/lang/lang/tour/#-operators-1) technology of isolated config blocks supports the arbitrary and scalable organization of config blocks -- [Static type system](https://kusionstack.io/docs/reference/lang/lang/tour/#type-system) technology provides reusable and scalable type-based modeling and constraints in a modern programming language manner +- The [automatic merging](https://kcl-lang.io/docs/reference/lang/tour/#operators) technology of isolated config blocks supports the arbitrary and scalable organization of config blocks +- [Static type system](https://kcl-lang.io/docs/reference/lang/tour/#type-system) technology provides reusable and scalable type-based modeling and constraints in a modern programming language manner - Project-grained GitOps CI workflow definition support - Provision technology selection based on [Kusion](https://github.com/KusionStack/kusion) engine @@ -83,8 +83,8 @@ The abstract implicit method is a usual choice for platform engineers for non-ex In Ant Group's practice, we adopt an abstract model for end-users, which refers to application developers, and solve several key problems through the following ideas: - Modeling for typical applications or scenarios (such as Ant Group's Sofa application), these models are developed by platform developers together with platform SREs and maintained together with application developers to achieve a balance between user experience, cost and standard compatibility. In Ant Group's practice, the information entropy convergence ratio of the abstract model is about 1:5, and the marginal benefit of modeling investment is guaranteed through extensive high-frequency usage. -- For non-typical user applications or scenarios, platform developers and platform SRE support application developers to design application models. Mechanisms such as KCL [schema](https://kusionstack.io/docs/reference/lang/lang/tour/#schema) and [mixin](https://kusionstack.io/docs/reference/lang/lang/tour#protocol--mixin) help users to model, abstract, inherit, combine, reuse, and reduce repetitive code. Such modeling design work is one of the key points in the field of application PaaS, and we need a more reasonable division of labor for such a scenario. Finally, a large number of "non-standard" platform applications were adopted and managed in an accordant way within Ant Group for the first time, which solved the long tail problem effectively. In a typical collaborative mode, platform developers and platform SREs write base components of platform capability, thus becoming “Enablers”, and helping application developers "build building blocks” quickly by using base components to complete their application models. -- For platform technology, we provide the [generation tool](https://kusionstack.io/docs/reference/cli/openapi/) from platform API Spec to KCL schema code, natively support the compile-time selection of different Kubernetes API versions through [combined compilation](https://kusionstack.io/docs/reference/lang/lang/tour/#multi-file-compilation), and solve the flexible requirements of mapping application models to different versions of Kubernetes clusters in internal practice. At the same time, KCL supports the writing of in-schema [constraints](https://kusionstack.io/docs/reference/lang/lang/tour/#validation) and independent environmental [rules](https://kusionstack.io/docs/reference/lang/lang/tour/#rule). In addition, KCL also provides the [deprecated decorator](https://kusionstack.io/docs/reference/lang/lang/tour/#decorators) to support the force deprecation of the model or model attribute. Through robust complete modeling and constraint mechanism on the client side, general problems such as configuration errors and schema drift are exposed at compile time. Due to the left-shifted problem found before runtime, runtime errors or failures are avoided while pushing to the cluster, which is also a necessary requirement for the stability of the production environment in the enterprise, especially in an enterprise with high-level risks. +- For non-typical user applications or scenarios, platform developers and platform SRE support application developers to design application models. Mechanisms such as KCL [schema](https://kcl-lang.io/docs/reference/lang/tour/#schema) and [mixin](https://kcl-lang.io/docs/reference/lang/tour/#protocol--mixin) help users to model, abstract, inherit, combine, reuse, and reduce repetitive code. Such modeling design work is one of the key points in the field of application PaaS, and we need a more reasonable division of labor for such a scenario. Finally, a large number of "non-standard" platform applications were adopted and managed in an accordant way within Ant Group for the first time, which solved the long tail problem effectively. In a typical collaborative mode, platform developers and platform SREs write base components of platform capability, thus becoming “Enablers”, and helping application developers "build building blocks” quickly by using base components to complete their application models. +- For platform technology, we provide the [generation tool](https://kusionstack.io/docs/reference/cli/openapi/) from platform API Spec to KCL schema code, natively support the compile-time selection of different Kubernetes API versions through [combined compilation](https://kcl-lang.io/docs/reference/lang/tour/#multi-file-compilation), and solve the flexible requirements of mapping application models to different versions of Kubernetes clusters in internal practice. At the same time, KCL supports the writing of in-schema [constraints](https://kcl-lang.io/docs/reference/lang/tour/#validation) and independent environmental [rules](https://kcl-lang.io/docs/reference/lang/tour/#rule). In addition, KCL also provides the [deprecated decorator](https://kcl-lang.io/docs/reference/lang/tour/#decorators) to support the force deprecation of the model or model attribute. Through robust complete modeling and constraint mechanism on the client side, general problems such as configuration errors and schema drift are exposed at compile time. Due to the left-shifted problem found before runtime, runtime errors or failures are avoided while pushing to the cluster, which is also a necessary requirement for the stability of the production environment in the enterprise, especially in an enterprise with high-level risks. Expert users of the underlying platform technology are usually very familiar with a specific technical domain and prefer to work in an explicit way that faces platform details. The KCL language provides the necessary dynamic and modular support and ensures stability through a static type system and constraint mechanisms. However, the explicit method cannot solve the problem that expert users are not familiar with the details of using cross-domain platform technologies, nor can it solve the problem of the scalability and complexity of platform technologies today. In Ant Group's small-scale YAML-based explicit engineering practice, facing a large number of highly open and configurable platform technologies, the complexity grows continuously with the utilization rate of platform technologies, and ends up in a rigid state that is hard to read, write, constrain, test, and maintain. @@ -93,7 +93,7 @@ Expert users of the underlying platform technology are usually very familiar wit Automation is a classic domain in the field of infrastructure operation. With the wide and rapid adoption of cloud-native concepts and technologies, the ability to automatically integrate turns out to be the basic requirement of enterprise operation practice. Open source, highly configurable CI and CD technologies are gradually adopted by enterprises. The black-box "product" approach that cannot be integrated is gradually weakened and replaced by a flexible orchestration approach. The main advantages of this practice lie in its powerful customize orchestration and linking capabilities, high scalability as well as good portability. Especially in the Kubernetes ecosystem, the GitOps method has a high adoption rate and a natural affinity with configurable CI and CD technologies. Such changes are also promoting the gradual transformation of the work order and product centric workflow into a self-service and engineering efficiency platform centric workflow, and the operational capability of the production environment has become an important part of the automatic workflow. In the open source community, the technology innovation of the abstraction layer for different engineering efficiency platforms is also active in progress. The developers on the platform side hope to get through the CI and CD process applied to the cloud environment through the shortest cognitive and practical path. -In Ant Group's engineering practice, the engineering efficiency platform is deeply involved in the open automation practice of Konfig monorepo, and our practice direction is also highly aligned with the technological roadmap of the engineering efficiency platform. In the collaborative work of several people to dozens or even hundreds of people, workflow design for operation scenarios, high-frequency code submission and pipeline execution, real-time automated testing and deployment pose several challenges to the engineering efficiency platform. Especially the diverse businesses in monorepo require independent and powerful workflow customization and operation support, as well as parallel workflow execution capabilities with high real-time and strong SLO guarantee. The requirements of the single-repository mode are hugely different. Most configuration languages are interpreted languages, while KCL is designed as a compiled language, implemented by Rust, C and LLVM optimizer, to provide [high-performance](https://kusionstack.io/blog/2022-declarative-config-overview#35-performance) compile-time and runtime execution for large-scale KCL files. At the same time, KCL can be compiled to native code and wasm binary to meet various runtimes execution requirements. In addition, the storage and architecture design of Git is different from the [Citc/Piper](https://cacm.acm.org/magazines/2016/7/204032-why-google-stores-billions-of-lines-of-code-in-a-single-repository/fulltext) architecture, and not suitable for monorepo of large-scale code. Fortunately, we have not encountered a big problem with the scale of code today. Meanwhile, we’re working together to solve problems and hope to solve them gradually as the practice deepens. +In Ant Group's engineering practice, the engineering efficiency platform is deeply involved in the open automation practice of Konfig monorepo, and our practice direction is also highly aligned with the technological roadmap of the engineering efficiency platform. In the collaborative work of several people to dozens or even hundreds of people, workflow design for operation scenarios, high-frequency code submission and pipeline execution, real-time automated testing and deployment pose several challenges to the engineering efficiency platform. Especially the diverse businesses in monorepo require independent and powerful workflow customization and operation support, as well as parallel workflow execution capabilities with high real-time and strong SLO guarantee. The requirements of the single-repository mode are hugely different. Most configuration languages are interpreted languages, while KCL is designed as a compiled language, implemented by Rust, C and LLVM optimizer, to provide [high-performance](https://kcl-lang.io/blog/2022-declarative-config-overview#35-performance) compile-time and runtime execution for large-scale KCL files. At the same time, KCL can be compiled to native code and wasm binary to meet various runtimes execution requirements. In addition, the storage and architecture design of Git is different from the [Citc/Piper](https://cacm.acm.org/magazines/2016/7/204032-why-google-stores-billions-of-lines-of-code-in-a-single-repository/fulltext) architecture, and not suitable for monorepo of large-scale code. Fortunately, we have not encountered a big problem with the scale of code today. Meanwhile, we’re working together to solve problems and hope to solve them gradually as the practice deepens. ## 6. Collaborative Culture: A More Important Thing @@ -111,14 +111,14 @@ Finally, I would like to talk about the next step. There’re still possibilitie ## 8. Reference -- [https://kusionstack.io/docs/user_docs/intro/kusion-intro](https://kusionstack.io/docs/user_docs/intro/kusion-intro) +- [https://kusionstack.io/docs/user_docs/intro/overview](https://kusionstack.io/docs/user_docs/intro/overview) - [https://platformengineering.org/blog/what-is-platform-engineering](https://platformengineering.org/blog/what-is-platform-engineering) - [https://internaldeveloperplatform.org/what-is-an-internal-developer-platform/](https://internaldeveloperplatform.org/what-is-an-internal-developer-platform/) - [https://web.devopstopologies.com/#anti-types](https://web.devopstopologies.com/#anti-types) - [https://github.com/KusionStack/kusion](https://github.com/KusionStack/kusion) - [https://github.com/KusionStack/KCLVM](https://github.com/KusionStack/KCLVM) -- [https://kusionstack.io/docs/reference/lang/lang/tour](https://kusionstack.io/docs/reference/lang/lang/tour/#%E9%85%8D%E7%BD%AE%E6%93%8D%E4%BD%9C) +- [https://kcl-lang.io/](https://kcl-lang.io/docs/reference/lang/tour/#%E9%85%8D%E7%BD%AE%E6%93%8D%E4%BD%9C) - [https://kusionstack.io/docs/user_docs/concepts/konfig](https://kusionstack.io/docs/user_docs/concepts/konfig) -- [https://kusionstack.io/blog/2022-declarative-config-overview#35-performance](https://kusionstack.io/blog/2022-declarative-config-overview#35-performance) +- [https://kcl-lang.io/blog/2022-declarative-config-overview#35-performance](https://kcl-lang.io/blog/2022-declarative-config-overview#35-performance) - [https://cacm.acm.org/magazines/2016/7/204032-why-google-stores-billions-of-lines-of-code-in-a-single-repository/fulltext](https://cacm.acm.org/magazines/2016/7/204032-why-google-stores-billions-of-lines-of-code-in-a-single-repository/fulltext) diff --git a/blog/2022-11-24-kusionstack-application-scale-operation-solution-in-the-post-cloudnative-era/index.md b/blog/2022-11-24-kusionstack-application-scale-operation-solution-in-the-post-cloudnative-era/index.md new file mode 100644 index 00000000..bd3ad1c4 --- /dev/null +++ b/blog/2022-11-24-kusionstack-application-scale-operation-solution-in-the-post-cloudnative-era/index.md @@ -0,0 +1,13 @@ +--- +slug: 2022-kusionstack-application-scale-operation-solution-in-the-post-cloudnative-era +title: KusionStack:Application Scale Operation Solution in the "Post CloudNative" Era +authors: + name: Dayuan Li + title: Kusion Creator +tags: [KusionStack, Kusion] + +--- + +[![KusionStack: Application Scale Operation Solution in the "Post CloudNative" Era](/talks/kusionstack-application-scale-operation-solution-in-the-post-cloudnative-era.png)](https://github.com/KusionStack/community/raw/main/2022/talkgo/kusionstack-application-scale-operation-solution-in-the-post-cloudnative-era.pdf) + +[Download PDF](https://github.com/KusionStack/community/raw/main/2022/talkgo/kusionstack-application-scale-operation-solution-in-the-post-cloudnative-era.pdf) diff --git a/blog/2022-12-12-post-cloud-native-era-operation/index.md b/blog/2022-12-12-post-cloud-native-era-operation/index.md new file mode 100644 index 00000000..2da769d3 --- /dev/null +++ b/blog/2022-12-12-post-cloud-native-era-operation/index.md @@ -0,0 +1,101 @@ +--- +slug: 2022-post-cloud-native-era-operation +title: How to scale operation in Post Cloud Native Era? +authors: + name: Dayuan Li + title: Kusion Creator +tags: [KusionStack, Kusion] +--- + +# + +# Post Cloud Native Era +It has been more than eight years since the first commit of Kubernetes, and the cloud-native technology represented by it is no longer new but a "default option" for modern applications. The services that modern applications rely on are far more than just Kubernetes. A slightly more complex application often uses heterogeneous infrastructures such as Kubernetes ecological cloud-native technology, IaaS cloud services, and internal self-built systems. Multi-clouds and hybrid clouds are also usually required. We have entered the "Post cloud-native Era", and the operation tools only for Kubernetes can no longer meet our demands. +

+ +

+ +What's more complicated is that, within the enterprise, different teams generally maintain these services. A large-scale operation requires the cooperation of members of multiple teams. However, there is a lack of efficient communication and collaborative approach among App Dev, Platform Dev, and SRE teams. The complexity of technology and inefficient teamwork has exponentially increased the difficulty of large-scale operation and maintenance in the "Post cloud-native Era". + +# The problem of large-scale operations has always existed +The large-scale operation of complex heterogeneous infrastructure is not a unique problem in the post cloud-native era. It has always been a problem since the birth of distributed systems, but it has become more difficult in the post cloud-native era. The industry proposed the DevOps concept more than ten years ago. Countless companies have built their DevOps platforms based on this concept, hoping to solve this problem, but the actual implementation process is often unsatisfactory. How to cooperate between the Dev team and the Ops team? How are responsibilities divided? How can a platform team of dozens of people support the operation demands of tens of thousands of engineers? The underlying infrastructure is complex and diverse, and capabilities change with each passing day. How to quickly help front-line Devs get technological advantages? These problems still need to be resolved. Recently, some people have suggested that DevOps is dead and Platform Engineering is the future. Regardless of the concept definition, whether DevOps or Platform Engineering, they are essentially different concepts under the same proposition of large-scale operation in enterprises. What we need more is a solution that conforms to the trend of technological development and can solve current problems. + +# Legacy architecture is no longer applicable +In traditional operation and maintenance thinking, the solution to the above problems is generally to build a PaaS platform, such as our early AntGroup PaaS platform, a web console with a UI interface. Users (usually App Dev or SRE) can accomplish operations such as deploying, restarting, scaling, and so on through UI interactions. In terms of technical implementation, the system mainly contains three parts, a frontend system that provides user interactions regarded as the system entrance; a backend system in the middle that connects to various infrastructures; the bottom layer is the APIs of multiple infrastructures. This architecture has been running for nearly ten years and has been running very well. It has a user-friendly interface and can shield the complexity of the infrastructure, and the responsibilities of each team are clearly defined. However, in the post cloud-native era, this architecture is no longer applicable, exposing two fatal flaws, **"manpower-consuming" and "time-consuming"**. +

+ +

+ +To give a typical example, the network team has developed a new load balance algorithm for its Loadbalancer, which needs to be provided to users. Under the above architecture, the entire workflow looks like this: + +1. The network team developed the new load balance algorithm and provided APIs +2. The PaaS backend coding with the underlying APIs to interconnect various infrastructures and shield complexity. Abstract higher-level APIs for users +3. The PaaS frontend modifies the UI according to the new feature and uses the backend APIs to provide the new load balance algorithm to end users + +There is a problem here. Even a tiny feature requires the PaaS backend and frontend to modify the code. The process will take a week to go online at the fastest, and the more infrastructure teams involved, the lower the efficiency. It was not a problem ten years ago but is a big problem today. A post cloud-native era modern application relying on three cloud-native technologies (Kubernetes + Istio + Prometheus), two cloud services ( Loadbalancer + Database), and a self-built internal service has already become prevalent, and complex applications will rely on more. If every infrastructure is hard-coded by the PaaS team, expanding the PaaS team by ten times will not be enough. +

+ +

+ +After talking about "manpower-consuming", let's look at the problem of "time-consuming". A minor feature in the above example requires two cross-team collaborations. The first collaboration is between the infrastructure team and the PaaS backend team, and the second is between the PaaS backend team and the PaaS frontend team. Teamwork is a complicated problem, sometimes more complicated than the technology itself. If you want to accomplish a large-scale operation with 100 applications at a time, how many teams do you need to communicate and collaborate with? How much time will it take? Without suitable coordination mechanisms, this becomes an impossible task. + +# Explore and practice +We have been exploring within Ant Group for nearly two years. We have practiced common tools such as kustomize, helm, argoCD, and Terraform and even developed some auxiliary systems for some tools, but the results are unsatisfactory. Some of these tools are too limited to the Kubernetes ecosystem to operate other types of infrastructure. The others support heterogeneous infrastructure but are not friendly to the Kubernetes ecosystem and cannot take advantage of cloud-native technologies. More importantly, upgrading operation tools has hardly improved teamwork efficiency, and we need a more systematic solution. +Going back to the question itself, we propose two ideas for the problems of "manpower-consuming" and "time-consuming": + + +1. Is it possible for App Dev to use various interconnected infrastructures efficiently self-service instead of making PaaS a transfer? +2. Is it possible to build a centralized collaboration platform using technical means to regulate everyone's behavior and communicate in a standardized manner? + +From a technical point of view, the PaaS platform must provide flexible toolchains and workflows. All capabilities of the infrastructure are exposed in a modular manner. App Dev combines and orchestrates these platforms' basic capabilities to solve their problems, and the process does not require the participation of the platform team. All teams involved in the whole process use a unified language and interface to communicate without manual involvement in the entire process. + +## Our practice +![](https://raw.githubusercontent.com/KusionStack/kusion/main/docs/arch.png) + +After nearly two years of exploration and practice on the AntGroup's internal PaaS platform, we precipitated a complete end-to-end solution named [KusionStack](https://github.com/KusionStack/kusion), which is open source now. KusionStack is designed to solve the traditional PaaS "manpower-consuming consuming" and "time-consuming" problems from the perspective of unified heterogeneous infrastructure operation and team collaboration. The whole system mainly contains three parts: + + +1. [Konfig](https://github.com/KusionStack/konfig): It is a monorepo acting as a centralized platform for multi-team collaboration, storing the operation intentions of each team. +2. [KCL](https://github.com/KusionStack/KCLVM): An self-developed configuration DSL. It is a tool for communication between all teams. +3. [Kusion](https://github.com/KusionStack/kusion): KusionStack's engine, responsible for all operations + +Platform Dev defines the basic capability model through KCL, and App Dev reuses these predefined capabilities in the application configuration model (AppConfig) through language features such as import and mixin. Users can quickly describe operation intentions in Konfig. AppConfig is a well-designed model that only exposes the attributes that App Dev needs to care about, shielding the complexity of the infrastructure. + +Never underestimate the professionalism and complexity of infrastructures. Even Kubernetes, which has become the standard of cloud-native technology, still has a high threshold for ordinary users. A Kubernetes Deployment has dozens of fields, let alone custom labels and annotations. Ordinary users cannot understand them all. In other words, AppDev should not understand Kubernetes, all they need is release, and they do not even need to care whether the underlying infrastructure is Kubernetes. + +AppConfig will generate multiple heterogeneous infrastructure resources after compilation and transfer these resources to the KusionStack engine through CI, CLI, GUI, etc. The engine is the core of KusionStack, responsible for all operations, and makes the operation intentions take effect on the infrastructure. It operates heterogeneous infrastructure in a unified way and performs a series of procedures on these resources, such as verification, arrangement, preview, validation, observation, and health check. + + +It is worth mentioning that the whole process is very friendly to Kubernetes resources. Due to the Kubernetes reconciliation mechanism, the success of the apply command does not mean that resources are available. Applications need to wait for resources to be reconciled successfully. If the reconciliation fails, we need to log in to the cluster and check the specific error message through commands like get, describe, and log. The whole process is very cumbersome. We have simplified these operations through technical means and showed important messages during the reconciliation in a user-friendly way. The animation below is a simple example. After the command is invoked, you can clearly see the reconciliation process of all resources and their associated resources until the resources are actually available. + +

+ +

+ +The whole system has the following characteristics +1. Application-centric + - Comprehensive application configuration management, including all application-related configurations such as computing, network, and storage + - Application life cycle management, from the first line of configuration code to production availability +2. Unified operation of heterogeneous infrastructure for applications in the post cloud-native era + - Kubernetes-friendly workflow, providing high-level capabilities such as observability and health checks for Kubernetes resources and releasing the bonuses of cloud-native technologies + - Reuse Terraform ecology, unified workflow operation, and maintenance multi-runtime resources like Kubernetes and Terraform +3. Large-scale collaborative platform + - Flexible workflow, users can use the basic capabilities of the platform to solve their problems by combining and arranging themselves + - Separate the focus of App Dev and Platform Dev. The infrastructure feature iteration does not require platform intervention and can be directly used by App Dev + - Pure client-side solution, "shift-left" risks, problems can be detected as early as possible + +# It is just the beginning +After nearly two years of exploration, this system has been widely used in AntGroup multi-cloud application delivery, computing and data infrastructure delivery, database operation, and other business fields. Currently, 400+ developers have directly participated in Konfig monorepo contribution; a total of nearly 800K Commits, most of which are machine automation code modifications; an average of 1K pipeline task execution, and about 10K KCL compilation execution per day. After Konfig monorepo compilation, 3M+ lines of YAML text can be generated. + +However, all this has just begun, and the post cloud-native era has just arrived. Our purpose of open-sourcing this system is also to invite all parties in the industry to build a solution that can truly solve the current large-scale operation of enterprises. The AntGroup's PaaS team still has a lot of technology precipitation that has been verified in internal scenarios, and they will be open sourced in the future. We are far from enough, and we sincerely invite everyone to play together. + +# Ref +Github: Welcome to give a Star⭐️ + +- [https://github.com/KusionStack/kusion](https://github.com/KusionStack/kusion) +- [https://github.com/KusionStack/KCLVM](https://github.com/KusionStack/KCLVM) +- [https://github.com/KusionStack/konfig](https://github.com/KusionStack/konfig) + +Website:[https://kusionstack.io](https://kusionstack.io/) + +PPT:[KusionStack: Application Scale Operation Solution in the "Post CloudNative" Era](https://kusionstack.io/blog/2022-kusionstack-application-scale-operation-solution-in-the-post-cloud-native-era) diff --git a/blog/2023-05-26-qcon-guangzhou/index.md b/blog/2023-05-26-qcon-guangzhou/index.md new file mode 100644 index 00000000..66f15f95 --- /dev/null +++ b/blog/2023-05-26-qcon-guangzhou/index.md @@ -0,0 +1,12 @@ +--- +slug: 2023-05-26-qcon-guangzhou +title: Ant Group's Platform Engineering Practice at Scale +authors: + name: Dayuan Li + title: Kusion Creator +tags: [KusionStack, Kusion] +--- + +[![Ant Group's Platform Engineering Practice at Scale](/talks/qcon-cover-en.jpg)](https://github.com/KusionStack/community/raw/main/2023/qcon-guangzhou/Ant-Groups-Platform-Engineering-Practice-at-Scale-en.pdf) + +[Download PPT](https://github.com/KusionStack/community/raw/main/2023/qcon-guangzhou/Ant-Groups-Platform-Engineering-Practice-at-Scale-en.pdf) diff --git a/ctrlmesh_versioned_docs/version-v0.1/_config/_category_.json b/ctrlmesh_versioned_docs/version-v0.1/_config/_category_.json new file mode 100644 index 00000000..1369e116 --- /dev/null +++ b/ctrlmesh_versioned_docs/version-v0.1/_config/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Configuration", + "position": 5 +} diff --git a/ctrlmesh_versioned_docs/version-v0.1/_config/circuitbreaker.md b/ctrlmesh_versioned_docs/version-v0.1/_config/circuitbreaker.md new file mode 100644 index 00000000..830d1671 --- /dev/null +++ b/ctrlmesh_versioned_docs/version-v0.1/_config/circuitbreaker.md @@ -0,0 +1,51 @@ +--- +sidebar_position: 3 +--- +# CircuitBreaker + +```yaml +apiVersion: ctrlmesh.kusionstack.io/v1alpha1 +kind: CircuitBreaker +metadata: + name: demo + namespace: default +spec: + rateLimitings: + - bucket: + burst: 500 + interval: 1s + limit: 20 + name: deletePod + properties: + sleepingWindowSize: 15m + recoverPolicy: SleepingWindow + resourceRules: + - apiGroups: + - "" + namespaces: + - '*' + resources: + - pods + verbs: + - delete + triggerPolicy: Normal + - bucket: + burst: 200 + interval: 1s + limit: 20 + name: trafficOffLimit + restRules: + - method: POST + url: https://*.com/*/trafficOff + triggerPolicy: LimiterOnly + trafficInterceptRules: + - contents: + - .*(127.0.0.1).* + interceptType: White + methods: + - POST + - GET + - PUT + name: internalOnly + +``` \ No newline at end of file diff --git a/ctrlmesh_versioned_docs/version-v0.1/_config/shardingconfig.md b/ctrlmesh_versioned_docs/version-v0.1/_config/shardingconfig.md new file mode 100644 index 00000000..6130c389 --- /dev/null +++ b/ctrlmesh_versioned_docs/version-v0.1/_config/shardingconfig.md @@ -0,0 +1,35 @@ +--- +sidebar_position: 1 +--- +# ShardingConfig + +## Auto Sharding +```yaml +apiVersion: ctrlmesh.kusionstack.io/v1alpha1 +kind: ShardingConfig +metadata: + name: sharding-root + namespace: default +spec: + root: + prefix: demo + targetStatefulSet: operator-demo + canary: + replicas: 1 + inNamespaces: + - ns-canary + auto: + everyShardReplicas: 2 + shardingSize: 2 + resourceSelector: + - relatedResources: + - apiGroups: + - '*' + resources: + - pods + controller: + leaderElectionName: operator-leader + webhook: + certDir: /etc/kubernetes/webhook-cret/ + port: 9443 +``` \ No newline at end of file diff --git a/ctrlmesh_versioned_docs/version-v0.1/concepts/_category_.json b/ctrlmesh_versioned_docs/version-v0.1/concepts/_category_.json new file mode 100644 index 00000000..50a638da --- /dev/null +++ b/ctrlmesh_versioned_docs/version-v0.1/concepts/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Concepts", + "position": 2 +} diff --git a/ctrlmesh_versioned_docs/version-v0.1/concepts/concepts.md b/ctrlmesh_versioned_docs/version-v0.1/concepts/concepts.md new file mode 100644 index 00000000..b474cfe6 --- /dev/null +++ b/ctrlmesh_versioned_docs/version-v0.1/concepts/concepts.md @@ -0,0 +1,78 @@ +# Concepts + +Generally, a `ctrlmesh-proxy` container will be injected into each operator Pod that has configured in ShardingConfigs. +This proxy container will intercept and handle the connection by between API/Oth Server and controllers/webhooks in the Pod. + +

+

+ + +ApiServer proxy method: +- *iptables nat*: +- *fake kubeconfig*: + +The `ctrlmesh-manager` dispatches rules to the proxies, so that they can route requests according to the rules. + + +A core CRD of ControllerMesh is `ShardingConfig`. It contains all rules for user's controller: + +```yaml +apiVersion: ctrlmesh.kusionstack.io/v1alpha1 +kind: ShardingConfig +metadata: + name: sharding-demo + namespace: operator-demo +spec: + controller: + leaderElectionName: operator-leader + webhook: + certDir: /tmp/webhook-certs + port: 9443 + limits: + - relateResources: + - apiGroups: + - '*' + resources: + - pods + - services + selector: + matchExpressions: + - key: ctrlmesh.kusionstack.io/namespace + operator: In + values: + - ns-a + - ns-b + matchLabels: + app: foo + selector: + matchExpressions: + - key: statefulset.kubernetes.io/pod-name + operator: In + values: + - operator-demo-0 +``` + +- selector: for all pods under a shard. It can be a subset of pods under a StatefulSet. +- controller: configuration for controller, including leader election name +- webhook: configuration for webhook, including certDir and port of this webhook +- limits: shard isolation is achieved through a set of `ObjectSelector`. + +When `manager` is first launched, shard labels will be added to all configured resources. + +- `ctrlmesh.kusionstack.io/sharding-hash`: the hash value calculated based on the namespace ranges from 0 to 31. +- `ctrlmesh.kusionstack.io/namespace`: the namespace referring to this resource. +- `ctrlmesh.kusionstack.io/control`: under ctrlmesh-manager control. + + +In this repo, we only support `ObjectSelector` type of flow control, +which means the `ctrlmesh-proxy `will proxy http/s requests to the ApiServer, +and inject a `LabelSelector` into the request param for the requested resource type. + + + + +Router: +

+

\ No newline at end of file diff --git a/docs/user_docs/support/_category_.json b/ctrlmesh_versioned_docs/version-v0.1/faq/_category_.json similarity index 100% rename from docs/user_docs/support/_category_.json rename to ctrlmesh_versioned_docs/version-v0.1/faq/_category_.json diff --git a/ctrlmesh_versioned_docs/version-v0.1/faq/faq.md b/ctrlmesh_versioned_docs/version-v0.1/faq/faq.md new file mode 100644 index 00000000..6c650fc7 --- /dev/null +++ b/ctrlmesh_versioned_docs/version-v0.1/faq/faq.md @@ -0,0 +1,5 @@ +--- +sidebar_position: 1 +--- + +# FAQ \ No newline at end of file diff --git a/ctrlmesh_versioned_docs/version-v0.1/images/fake-configmap.png b/ctrlmesh_versioned_docs/version-v0.1/images/fake-configmap.png new file mode 100644 index 00000000..e47c1f84 Binary files /dev/null and b/ctrlmesh_versioned_docs/version-v0.1/images/fake-configmap.png differ diff --git a/ctrlmesh_versioned_docs/version-v0.1/images/mesh-arch-2.png b/ctrlmesh_versioned_docs/version-v0.1/images/mesh-arch-2.png new file mode 100644 index 00000000..ce05d7ab Binary files /dev/null and b/ctrlmesh_versioned_docs/version-v0.1/images/mesh-arch-2.png differ diff --git a/ctrlmesh_versioned_docs/version-v0.1/images/mesh-arch.png b/ctrlmesh_versioned_docs/version-v0.1/images/mesh-arch.png new file mode 100644 index 00000000..3f94003a Binary files /dev/null and b/ctrlmesh_versioned_docs/version-v0.1/images/mesh-arch.png differ diff --git a/ctrlmesh_versioned_docs/version-v0.1/images/mesh-proxy.png b/ctrlmesh_versioned_docs/version-v0.1/images/mesh-proxy.png new file mode 100644 index 00000000..c4e7a6a6 Binary files /dev/null and b/ctrlmesh_versioned_docs/version-v0.1/images/mesh-proxy.png differ diff --git a/ctrlmesh_versioned_docs/version-v0.1/images/sharding.png b/ctrlmesh_versioned_docs/version-v0.1/images/sharding.png new file mode 100644 index 00000000..decbec69 Binary files /dev/null and b/ctrlmesh_versioned_docs/version-v0.1/images/sharding.png differ diff --git a/ctrlmesh_versioned_docs/version-v0.1/intro/_category_.json b/ctrlmesh_versioned_docs/version-v0.1/intro/_category_.json new file mode 100644 index 00000000..fa1c06ac --- /dev/null +++ b/ctrlmesh_versioned_docs/version-v0.1/intro/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Introduction", + "position": 1 +} diff --git a/ctrlmesh_versioned_docs/version-v0.1/intro/intro.md b/ctrlmesh_versioned_docs/version-v0.1/intro/intro.md new file mode 100644 index 00000000..ab974f1b --- /dev/null +++ b/ctrlmesh_versioned_docs/version-v0.1/intro/intro.md @@ -0,0 +1,21 @@ + +# Controller Mesh + +KusionStack Controller Mesh is a solution that helps developers managing their controllers/operators better. + +## Key Features + +1. **Sharding**: Through relevant configurations, Kubernetes single-point deployed operator applications can be flexibly shard deployed. +2. **Canary upgrade**: Depends on sharding, the controller instances can be updated in canary progress instead of updated in one time. +3. **Circuit breaker and rate limiter**: Not only Kubernetes operation requests, but also other external operation requests. +4. **Multicluster routing and sharding** +5. **And more**: Fault injection and Observability (Todo). + + +## Architecture +

+

+ +Visit [Installation](./../started/install.md) or [Quick Start](./../started/try.md). + diff --git a/ctrlmesh_versioned_docs/version-v0.1/started/_category_.json b/ctrlmesh_versioned_docs/version-v0.1/started/_category_.json new file mode 100644 index 00000000..c1998c5e --- /dev/null +++ b/ctrlmesh_versioned_docs/version-v0.1/started/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Getting Started", + "position": 3 +} diff --git a/ctrlmesh_versioned_docs/version-v0.1/started/install.md b/ctrlmesh_versioned_docs/version-v0.1/started/install.md new file mode 100644 index 00000000..e2b01f1c --- /dev/null +++ b/ctrlmesh_versioned_docs/version-v0.1/started/install.md @@ -0,0 +1,68 @@ +--- +sidebar_position: 1 +--- +# Installation + +## Install with helm +Controller Mesh requires **Kubernetes version >= 1.18** +```shell +# Firstly add charts repository if you haven't do this. +$ helm repo add kusionstack https://kusionstack.github.io/charts + +# To update the kusionstack repo. +$ helm repo update kusionstack + +# Install the latest version. +$ helm install ctrlmesh kusionstack/ctrlmesh + +# Upgrade to the latest version +$ helm upgrade ctrlmesh kusionstack/ctrlmesh + +# Uninstall +$ helm uninstall ctrlmesh +``` +[Helm](https://github.com/helm/helm) is a tool for managing packages of pre-configured Kubernetes resources. +### Optional: chart parameters + +The following table lists the configurable parameters of the chart and their default values. + +| Parameter | Description | Default | +|-------------------------------------|-------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `namespace` | namespace for controller mesh installation | `ctrlmesh` | +| `namespaceEnabled` | Whether to create the installation.namespace | `true` | +| `manager.replicas` | Replicas of ctrlmesh-manager deployment | `2` | +| `manager.image.repo` | Repository for ctrlmesh-manager image | `kusionstack/ctrlmesh-manager` | +| `manager.image.pullPolicy` | Image pull policy for ctrlmesh-manager | `IfNotPresent` | +| `manager.image.tag` | Tag for ctrlmesh-manager | `v0.1.0` | +| `manager.resources.limits.cpu` | CPU resource limit of ctrlmesh-manager container | `500m` | +| `manager.resources.limits.memory` | Memory resource limit of ctrlmesh-manager container | `512Mi` | +| `manager.resources.requests.cpu` | CPU resource request of ctrlmesh-manager container | `10m` | +| `manager.resources.requests.memory` | Memory resource request of ctrlmesh-manager container | `64Mi` | +| `proxy.image.repo` | Repository for ctrlmesh-proxy image | `kusionstack/ctrlmesh-proxy` | +| `proxy.image.pullPolicy` | Image pull policy for ctrlmesh-proxy | `IfNotPresent` | +| `proxy.image.tag` | Tag for ctrlmesh-proxy | `v0.1.0` | +| `proxy.resources.limits.cpu` | CPU resource requests of ctrlmesh-proxy container | `100m` | +| `proxy.resources.limits.memory` | Memory resource requests of ctrlmesh-proxy container | `100Mi` | +| `init.image.repo` | Repository for ctrlmesh-init image | `kusionstack/ctrlmesh-init` | +| `init.image.tag` | Tag for ctrlmesh-init | `v0.1.0` | +| `shardingGroupVersionKinds` | Sharding resource lists(yaml) | | + +config `groupVersionKinds` in file: +```yaml +ctrlmesh.kusionstack.io/v1alpha1: +- '*' +v1: +- Pod +- PersistentVolumeClaim +- Service +- ConfigMap +- Endpoint +apps/v1: +- StatefulSet +- ReplicaSet +- ControllerRevision +``` + +Specify each parameter using the `--set key=value` argument to `helm install` or `helm upgrade`. + + diff --git a/ctrlmesh_versioned_docs/version-v0.1/started/try.md b/ctrlmesh_versioned_docs/version-v0.1/started/try.md new file mode 100644 index 00000000..74dc19ee --- /dev/null +++ b/ctrlmesh_versioned_docs/version-v0.1/started/try.md @@ -0,0 +1,281 @@ +--- +sidebar_position: 4 +--- +# Try a Sample +This guide lets you quickly evaluate KusionStack Controller Mesh. + + +## Install Controller Mesh Manager +Controller Mesh requires **Kubernetes version >= 1.18** + +**Install with helm** +```bash +# Firstly add KusionStack charts repository if you haven't do this. +$ helm repo add kusionstack https://kusionstack.github.io/charts + +# To update the kusionstack repo. +$ helm repo update kusionstack + +# Install the latest version. +$ helm install ctrlmesh kusionstack/ctrlmesh + +# Wait manager ready +$ kubectl -n ctrlmesh get po +NAME READY STATUS RESTARTS AGE +ctrlmesh-57d6b4df57-mdslc 1/1 Running 0 40s +ctrlmesh-57d6b4df57-mtv2s 1/1 Running 0 40s +``` +[Install manager with more options](install.md) + + +## Try with sample-operator + +Here is an example of a `Deployment` enabling sharding. +### Get and deploy sample-operator v0 +👉 [sample-operator repo](https://github.com/KusionStack/controller-mesh/tree/sample-operator) +```bash +# Clone and checkout branch sample-operator. +$ git clone -b sample-operator https://github.com/KusionStack/controller-mesh.git +$ cd sample + +# Make sure you have kind or test cluster, and kubectl is available. + +# Deploy default sample-operator v0. +$ IMAGE_TAG=v0.1.0 make deploy + +namespace/kusionstack-sample created +serviceaccount/kusionstack-controller-manager created +role.rbac.authorization.k8s.io/kusionstack-leader-election-role created +clusterrole.rbac.authorization.k8s.io/kusionstack-manager-role created +rolebinding.rbac.authorization.k8s.io/kusionstack-leader-election-rolebinding created +clusterrolebinding.rbac.authorization.k8s.io/kusionstack-sample-manager-rolebinding created +deployment.apps/kusionstack-sample-operator-v0 created + +# kusionstack-sample-operator-v0 is created. +$ kubectl get deploy -n kusionstack-sample +NAME READY UP-TO-DATE AVAILABLE AGE +kusionstack-sample-operator-v0 2/2 2 2 14s + +$ kubectl get po -n kusionstack-sample +NAME READY STATUS RESTARTS AGE +kusionstack-sample-operator-v0-66f7595c7b-n4c47 1/1 Running 0 50s +kusionstack-sample-operator-v0-66f7595c7b-wxwtv 1/1 Running 0 50s + +# sample-operator uses leader-election. Only one leader pod reconciling. +$ kubectl -n kusionstack-sample get lease +NAME HOLDER AGE +sample-operator-leader kusionstack-sample-operator-v0-66f7595c7b-wxwtv_c0ed684d-f332-47f6-890c-dd7e489486f2 53 +``` +### Play with ShardingConfig + +By configuring `ShardingConfig` appropriately, you can achieve canary and sharding deploy. + +**Isolate canary namespaces** +```bash +# Create some test namespaces([foo-01, foo-02, ..., foo-31]). +$ chmod +x ./scripts/create-ns-foo.sh && ./scripts/create-ns-foo.sh + +# All namespaces are controlled by sample-operator v0. +$ kubectl get ns -l sample.kusionstack.io/control-by=kusionstack-sample-operator-v0-66f7595c7b-wxwtv +NAME STATUS AGE +default Active 12d +foo-01 Active 78s +foo-02 Active 78s +foo-03 Active 78s +... ... ... +foo-32 Active 78s + +# There are more details in leader pod log. +$ kubectl logs kusionstack-sample-operator-v0-66f7595c7b-wxwtv -n kusionstack-sample | grep "hold namespaces" +I0110 09:32:50.950535 1 runner.go:101] hold namespaces [ctrlmesh default foo-01 foo-02 foo-03 foo-04 foo-05 foo-06 foo-07 foo-08 foo-09 foo-10 foo-11 foo-12 foo-13 foo-14 foo-15 foo-16 foo-17 foo-18 foo-19 foo-20 foo-21 foo-22 foo-23 foo-24 foo-25 foo-26 foo-27 foo-28 foo-29 foo-30 foo-31 foo-32 kusionstack-sample kusionstack-system local-path-storage] + +# Apply sample ShardingConfigs +$ ./bin/kustomize build config/shardingconfig/canary | kubectl apply -f - +shardingconfig.ctrlmesh.kusionstack.io/kusionstack-sample-operator-0-canary created +shardingconfig.ctrlmesh.kusionstack.io/kusionstack-sample-operator-1-normal created +``` + +export const Highlight = ({children, color}) => ( + + {children} + +); + +The [kusionstack-sample-operator-0-canary](https://github.com/KusionStack/controller-mesh/blob/sample-operator/sample/config/shardingconfig/canary/shardingconfig-canary.yaml) has restricted the scope of namespaces [foo-01, foo-02, foo-03] reconciled by version `v1`. +And [kusionstack-sample-operator-1-normal](https://github.com/KusionStack/controller-mesh/blob/sample-operator/sample/config/shardingconfig/canary/shardingconfig-normal.yaml) decided that other namespaces will be reconciled by version `v0`. +```bash +# Patch labels to pod template to inject sidecar and ShardingConfig +$ kubectl -n kusionstack-sample patch deployment kusionstack-sample-operator-v0 --type=strategic --patch \ + 'spec: + template: + metadata: + labels: + ctrlmesh.kusionstack.io/enable-proxy: "true" + ctrlmesh.kusionstack.io/watching: "true"' + +# Mesh proxy container was injected +$ kubectl get po -n kusionstack-sample +NAME READY STATUS RESTARTS AGE +kusionstack-sample-operator-v0-6944bb4bf5-gclqq 2/2 Running 0 30s +kusionstack-sample-operator-v0-6944bb4bf5-lfwdb 2/2 Running 0 41s + +# Find current leader +# sharding lease format: ${leader-election-name}---${shardingconfig-name} +$ kubectl get lease -n kusionstack-sample +NAME HOLDER AGE +sample-operator-leader---kusionstack-sample-operator-1-normal kusionstack-sample-operator-v0-6944bb4bf5-lfwdb_497a7962-a5f1-465e-b8ef-6e35660c63f4 32s + +# Namespaces [foo-1, foo-2, foo-3] are no longer under v0 control. +$ kubectl logs kusionstack-sample-operator-v0-6944bb4bf5-lfwdb -c manager -n kusionstack-sample | grep "namespaces" + ... hold namespaces [default foo-04 foo-05 ... foo-32] + +``` + +**Deploy canary sample-operator v1** + +```bash +# Apply sample operator v1 which deployment already labeled +$ ./bin/kustomize build config/manager-v1 | kubectl apply -f - +deployment.apps/kusionstack-sample-operator-v1 created + +# Two pods created +$ kubectl get po -n kusionstack-sample +NAME READY STATUS RESTARTS AGE +kusionstack-sample-operator-v0-6944bb4bf5-gclqq 2/2 Running 0 4m +kusionstack-sample-operator-v0-6944bb4bf5-lfwdb 2/2 Running 0 4m +kusionstack-sample-operator-v1-7b6bbb49c8-kbgww 0/2 ContainerCreating 0 3s +kusionstack-sample-operator-v1-7b6bbb49c8-qbzjj 0/2 ContainerCreating 0 3s + +# The canary shard uses a separate lease +$ kubectl get lease -n kusionstack-sample +NAME HOLDER AGE +sample-operator-leader---kusionstack-sample-operator-0-canary kusionstack-sample-operator-v1-7b6bbb49c8-qbzjj_64272983-c59a-4574-933d-7d5fea7a1e35 15s +sample-operator-leader---kusionstack-sample-operator-1-normal kusionstack-sample-operator-v0-6944bb4bf5-lfwdb_497a7962-a5f1-465e-b8ef-6e35660c63f4 4m + +# Only foo-01, foo-02, foo-03 controlled by v1 +$ kubectl get ns -l sample.kusionstack.io/control-by=v1 -n kusionstack-sample +NAME STATUS AGE +foo-01 Active 4m +foo-02 Active 4m +foo-03 Active 4m + +$ kubectl logs kusionstack-sample-operator-v1-7b6bbb49c8-qbzjj -c manager -c kusionstack-sample| grep namespaces + ... hold namespaces [foo-01 foo-02 foo-03] +``` +Similarly, if you want to have more shards, you need to do the following steps: +1. Extract a portion of the namespace from the existing ShardingConfigs. +2. Configure a new ShardingConfig and apply it. +3. Recreate or restart the existing pods to make the new ShardingConfig take effect. +4. Scale out the Pods for the new ShardingConfig. + + +### Clear sample resources + +```bash +$ chmod +x ./scripts/clear.sh && ./scripts/clear.sh +``` + + +:::tip +**Beta**: *We try to support automatic sharding strategy. With automatic sharding configuration, there is no need to manually configure each shard's configuration. It manages multiple sub-shardingconfigs automatically through a root configuration.* +::: + + + +## Try with Operating + +For `StatefulSet` case, you can use the **[Operating v0.1.1](https://kusionstack.io/docs/operating/introduction/)** available here. + +Deploy the sample operator with ShardingConfig: + +```bash +$ helm repo update +$ helm install sample-operating kusionstack/operating \ + --version v0.2.0 \ + --set sharding.enabled=true \ + --set sharding.isDemo=true + +$ kubectl -n kusionstack-system get sts +NAME READY AGE +kusionstack-operating 5/5 1m45s + +# The proxy container will be automatically injected into the pod +$ kubectl -n kusionstack-system get po +NAME READY STATUS RESTARTS AGE +kusionstack-operating-0 2/2 Running 0 42s +kusionstack-operating-1 2/2 Running 0 32s +kusionstack-operating-2 2/2 Running 0 21s +kusionstack-operating-3 2/2 Running 0 12s +kusionstack-operating-4 0/2 ContainerCreating 0 1s + +# Now we have three shards with three lease. +# operating-0-canary -> [kusionstack-operating-0] +# operating-1-normal -> [kusionstack-operating-1, kusionstack-operating-2] +# operating-2-normal -> [kusionstack-operating-3, kusionstack-operating-4] +$ kubectl -n kusionstack-system get lease +NAME HOLDER AGE +kusionstack-controller-manager---operating-0-canary kusionstack-operating-0_81b5bbae-be63-45ed-a939-e67e0c3d6326 12m +kusionstack-controller-manager---operating-1-normal kusionstack-operating-1_e4bbad49-e6ec-42fa-8ffd-caae82156a3e 12m +kusionstack-controller-manager---operating-2-normal kusionstack-operating-3_94f7f81a-f9e6-47d6-b72b-e16da479e9be 12m +``` + + Show the sample ShardingConfig: + +```bash +$ helm template sample-operating kusionstack/operating \ + --version v0.1.1 \ + --set sharding.enabled=true \ + --set sharding.isDemo=true \ + --show-only templates/shardingconfig.yaml +``` + +Here is a sample `ShardingConfig`: +```yaml title="operating/templates/shardingconfig.yaml" +apiVersion: ctrlmesh.kusionstack.io/v1alpha1 +kind: ShardingConfig +metadata: + name: sharding-root + namespace: kusionstack-system +spec: + # Auto sharding config + root: + prefix: operating + targetStatefulSet: kusionstack-operating + canary: + replicas: 1 + inNamespaces: + - kusionstack-system + auto: + everyShardReplicas: 2 + shardingSize: 2 + resourceSelector: + - relateResources: + - apiGroups: + - '*' + resources: + - configmaps + - pods + - endpoints + - services + - replicasets + - apiGroups: + - apps.kusionstack.io + resources: + - '*' + controller: + leaderElectionName: kusionstack-controller-manager +``` +You can configure the ShardingConfig according to your requirements. + +:::info +In order to enable the ShardingConfig, you also need to add the following label to the pod template. +`ctrlmesh.kusionstack.io/watching: 'true'` +`ctrlmesh.kusionstack.io/enable-proxy: 'true'` +We plan to deprecate it in future versions. +::: \ No newline at end of file diff --git a/ctrlmesh_versioned_sidebars/version-v0.1-sidebars.json b/ctrlmesh_versioned_sidebars/version-v0.1-sidebars.json new file mode 100644 index 00000000..ea4bfaa5 --- /dev/null +++ b/ctrlmesh_versioned_sidebars/version-v0.1-sidebars.json @@ -0,0 +1,8 @@ +{ + "ctrlmesh": [ + { + "type": "autogenerated", + "dirName": "." + } + ] +} diff --git a/ctrlmesh_versions.json b/ctrlmesh_versions.json new file mode 100644 index 00000000..3fccb453 --- /dev/null +++ b/ctrlmesh_versions.json @@ -0,0 +1,3 @@ +[ + "v0.1" +] diff --git a/docs/governance/intro/_category_.json b/docs/community/intro/_category_.json similarity index 100% rename from docs/governance/intro/_category_.json rename to docs/community/intro/_category_.json diff --git a/docs/community/intro/intro.md b/docs/community/intro/intro.md new file mode 100644 index 00000000..4feb48c8 --- /dev/null +++ b/docs/community/intro/intro.md @@ -0,0 +1,19 @@ +--- +sidebar_position: 1 +--- + +# Community + +Welcome to the KusionStack open source community! Your participation is vital for the healthy growth of our open source projects. There are several ways you can get involved. You can create issues or fix bugs, improve documentation, or contribute code by submitting pull requests (PRs). The KusionStack project is always looking for new contributors and feedback. To get in touch, please refer to the links below. + +* [Open a New Discussion on GitHub](https://github.com/orgs/KusionStack/discussions) +* [Contact Us on Slack](https://app.slack.com/client/T03H6QE4VL0/setup-welcome) + +# CONTRIBUTING + +We appreciate contributions from the community! To submit changes, please refer to the contributing file in the corresponding KusionStack repository. The files are available at the following links: +* [Kusion](https://github.com/KusionStack/kusion/blob/main/docs/contributing.md) +* [Operating](https://github.com/KusionStack/operating/blob/main/docs/contributing.md) + +# CODE OF CONDUCT +To make KusionStack a welcoming and harassment-free experience for everyone, we follow the [KusionStack Code of Conduct](https://github.com/KusionStack/kusion/blob/main/docs/CODE_OF_CONDUCT.md). diff --git a/docs/ctrlmesh/_config/_category_.json b/docs/ctrlmesh/_config/_category_.json new file mode 100644 index 00000000..1369e116 --- /dev/null +++ b/docs/ctrlmesh/_config/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Configuration", + "position": 5 +} diff --git a/docs/ctrlmesh/_config/circuitbreaker.md b/docs/ctrlmesh/_config/circuitbreaker.md new file mode 100644 index 00000000..830d1671 --- /dev/null +++ b/docs/ctrlmesh/_config/circuitbreaker.md @@ -0,0 +1,51 @@ +--- +sidebar_position: 3 +--- +# CircuitBreaker + +```yaml +apiVersion: ctrlmesh.kusionstack.io/v1alpha1 +kind: CircuitBreaker +metadata: + name: demo + namespace: default +spec: + rateLimitings: + - bucket: + burst: 500 + interval: 1s + limit: 20 + name: deletePod + properties: + sleepingWindowSize: 15m + recoverPolicy: SleepingWindow + resourceRules: + - apiGroups: + - "" + namespaces: + - '*' + resources: + - pods + verbs: + - delete + triggerPolicy: Normal + - bucket: + burst: 200 + interval: 1s + limit: 20 + name: trafficOffLimit + restRules: + - method: POST + url: https://*.com/*/trafficOff + triggerPolicy: LimiterOnly + trafficInterceptRules: + - contents: + - .*(127.0.0.1).* + interceptType: White + methods: + - POST + - GET + - PUT + name: internalOnly + +``` \ No newline at end of file diff --git a/docs/ctrlmesh/_config/shardingconfig.md b/docs/ctrlmesh/_config/shardingconfig.md new file mode 100644 index 00000000..6130c389 --- /dev/null +++ b/docs/ctrlmesh/_config/shardingconfig.md @@ -0,0 +1,35 @@ +--- +sidebar_position: 1 +--- +# ShardingConfig + +## Auto Sharding +```yaml +apiVersion: ctrlmesh.kusionstack.io/v1alpha1 +kind: ShardingConfig +metadata: + name: sharding-root + namespace: default +spec: + root: + prefix: demo + targetStatefulSet: operator-demo + canary: + replicas: 1 + inNamespaces: + - ns-canary + auto: + everyShardReplicas: 2 + shardingSize: 2 + resourceSelector: + - relatedResources: + - apiGroups: + - '*' + resources: + - pods + controller: + leaderElectionName: operator-leader + webhook: + certDir: /etc/kubernetes/webhook-cret/ + port: 9443 +``` \ No newline at end of file diff --git a/docs/ctrlmesh/concepts/_category_.json b/docs/ctrlmesh/concepts/_category_.json new file mode 100644 index 00000000..50a638da --- /dev/null +++ b/docs/ctrlmesh/concepts/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Concepts", + "position": 2 +} diff --git a/docs/ctrlmesh/concepts/concepts.md b/docs/ctrlmesh/concepts/concepts.md new file mode 100644 index 00000000..b474cfe6 --- /dev/null +++ b/docs/ctrlmesh/concepts/concepts.md @@ -0,0 +1,78 @@ +# Concepts + +Generally, a `ctrlmesh-proxy` container will be injected into each operator Pod that has configured in ShardingConfigs. +This proxy container will intercept and handle the connection by between API/Oth Server and controllers/webhooks in the Pod. + +

+

+ + +ApiServer proxy method: +- *iptables nat*: +- *fake kubeconfig*: + +The `ctrlmesh-manager` dispatches rules to the proxies, so that they can route requests according to the rules. + + +A core CRD of ControllerMesh is `ShardingConfig`. It contains all rules for user's controller: + +```yaml +apiVersion: ctrlmesh.kusionstack.io/v1alpha1 +kind: ShardingConfig +metadata: + name: sharding-demo + namespace: operator-demo +spec: + controller: + leaderElectionName: operator-leader + webhook: + certDir: /tmp/webhook-certs + port: 9443 + limits: + - relateResources: + - apiGroups: + - '*' + resources: + - pods + - services + selector: + matchExpressions: + - key: ctrlmesh.kusionstack.io/namespace + operator: In + values: + - ns-a + - ns-b + matchLabels: + app: foo + selector: + matchExpressions: + - key: statefulset.kubernetes.io/pod-name + operator: In + values: + - operator-demo-0 +``` + +- selector: for all pods under a shard. It can be a subset of pods under a StatefulSet. +- controller: configuration for controller, including leader election name +- webhook: configuration for webhook, including certDir and port of this webhook +- limits: shard isolation is achieved through a set of `ObjectSelector`. + +When `manager` is first launched, shard labels will be added to all configured resources. + +- `ctrlmesh.kusionstack.io/sharding-hash`: the hash value calculated based on the namespace ranges from 0 to 31. +- `ctrlmesh.kusionstack.io/namespace`: the namespace referring to this resource. +- `ctrlmesh.kusionstack.io/control`: under ctrlmesh-manager control. + + +In this repo, we only support `ObjectSelector` type of flow control, +which means the `ctrlmesh-proxy `will proxy http/s requests to the ApiServer, +and inject a `LabelSelector` into the request param for the requested resource type. + + + + +Router: +

+

\ No newline at end of file diff --git a/docs/ctrlmesh/faq/_category_.json b/docs/ctrlmesh/faq/_category_.json new file mode 100644 index 00000000..60548d80 --- /dev/null +++ b/docs/ctrlmesh/faq/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "FAQ", + "position": 6 +} diff --git a/docs/ctrlmesh/faq/faq.md b/docs/ctrlmesh/faq/faq.md new file mode 100644 index 00000000..6c650fc7 --- /dev/null +++ b/docs/ctrlmesh/faq/faq.md @@ -0,0 +1,5 @@ +--- +sidebar_position: 1 +--- + +# FAQ \ No newline at end of file diff --git a/docs/ctrlmesh/images/fake-configmap.png b/docs/ctrlmesh/images/fake-configmap.png new file mode 100644 index 00000000..e47c1f84 Binary files /dev/null and b/docs/ctrlmesh/images/fake-configmap.png differ diff --git a/docs/ctrlmesh/images/mesh-arch-2.png b/docs/ctrlmesh/images/mesh-arch-2.png new file mode 100644 index 00000000..ce05d7ab Binary files /dev/null and b/docs/ctrlmesh/images/mesh-arch-2.png differ diff --git a/docs/ctrlmesh/images/mesh-arch.png b/docs/ctrlmesh/images/mesh-arch.png new file mode 100644 index 00000000..3f94003a Binary files /dev/null and b/docs/ctrlmesh/images/mesh-arch.png differ diff --git a/docs/ctrlmesh/images/mesh-proxy.png b/docs/ctrlmesh/images/mesh-proxy.png new file mode 100644 index 00000000..c4e7a6a6 Binary files /dev/null and b/docs/ctrlmesh/images/mesh-proxy.png differ diff --git a/docs/ctrlmesh/images/sharding.png b/docs/ctrlmesh/images/sharding.png new file mode 100644 index 00000000..decbec69 Binary files /dev/null and b/docs/ctrlmesh/images/sharding.png differ diff --git a/docs/ctrlmesh/intro/_category_.json b/docs/ctrlmesh/intro/_category_.json new file mode 100644 index 00000000..fa1c06ac --- /dev/null +++ b/docs/ctrlmesh/intro/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Introduction", + "position": 1 +} diff --git a/docs/ctrlmesh/intro/intro.md b/docs/ctrlmesh/intro/intro.md new file mode 100644 index 00000000..ab974f1b --- /dev/null +++ b/docs/ctrlmesh/intro/intro.md @@ -0,0 +1,21 @@ + +# Controller Mesh + +KusionStack Controller Mesh is a solution that helps developers managing their controllers/operators better. + +## Key Features + +1. **Sharding**: Through relevant configurations, Kubernetes single-point deployed operator applications can be flexibly shard deployed. +2. **Canary upgrade**: Depends on sharding, the controller instances can be updated in canary progress instead of updated in one time. +3. **Circuit breaker and rate limiter**: Not only Kubernetes operation requests, but also other external operation requests. +4. **Multicluster routing and sharding** +5. **And more**: Fault injection and Observability (Todo). + + +## Architecture +

+

+ +Visit [Installation](./../started/install.md) or [Quick Start](./../started/try.md). + diff --git a/docs/ctrlmesh/started/_category_.json b/docs/ctrlmesh/started/_category_.json new file mode 100644 index 00000000..c1998c5e --- /dev/null +++ b/docs/ctrlmesh/started/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Getting Started", + "position": 3 +} diff --git a/docs/ctrlmesh/started/install.md b/docs/ctrlmesh/started/install.md new file mode 100644 index 00000000..e2b01f1c --- /dev/null +++ b/docs/ctrlmesh/started/install.md @@ -0,0 +1,68 @@ +--- +sidebar_position: 1 +--- +# Installation + +## Install with helm +Controller Mesh requires **Kubernetes version >= 1.18** +```shell +# Firstly add charts repository if you haven't do this. +$ helm repo add kusionstack https://kusionstack.github.io/charts + +# To update the kusionstack repo. +$ helm repo update kusionstack + +# Install the latest version. +$ helm install ctrlmesh kusionstack/ctrlmesh + +# Upgrade to the latest version +$ helm upgrade ctrlmesh kusionstack/ctrlmesh + +# Uninstall +$ helm uninstall ctrlmesh +``` +[Helm](https://github.com/helm/helm) is a tool for managing packages of pre-configured Kubernetes resources. +### Optional: chart parameters + +The following table lists the configurable parameters of the chart and their default values. + +| Parameter | Description | Default | +|-------------------------------------|-------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `namespace` | namespace for controller mesh installation | `ctrlmesh` | +| `namespaceEnabled` | Whether to create the installation.namespace | `true` | +| `manager.replicas` | Replicas of ctrlmesh-manager deployment | `2` | +| `manager.image.repo` | Repository for ctrlmesh-manager image | `kusionstack/ctrlmesh-manager` | +| `manager.image.pullPolicy` | Image pull policy for ctrlmesh-manager | `IfNotPresent` | +| `manager.image.tag` | Tag for ctrlmesh-manager | `v0.1.0` | +| `manager.resources.limits.cpu` | CPU resource limit of ctrlmesh-manager container | `500m` | +| `manager.resources.limits.memory` | Memory resource limit of ctrlmesh-manager container | `512Mi` | +| `manager.resources.requests.cpu` | CPU resource request of ctrlmesh-manager container | `10m` | +| `manager.resources.requests.memory` | Memory resource request of ctrlmesh-manager container | `64Mi` | +| `proxy.image.repo` | Repository for ctrlmesh-proxy image | `kusionstack/ctrlmesh-proxy` | +| `proxy.image.pullPolicy` | Image pull policy for ctrlmesh-proxy | `IfNotPresent` | +| `proxy.image.tag` | Tag for ctrlmesh-proxy | `v0.1.0` | +| `proxy.resources.limits.cpu` | CPU resource requests of ctrlmesh-proxy container | `100m` | +| `proxy.resources.limits.memory` | Memory resource requests of ctrlmesh-proxy container | `100Mi` | +| `init.image.repo` | Repository for ctrlmesh-init image | `kusionstack/ctrlmesh-init` | +| `init.image.tag` | Tag for ctrlmesh-init | `v0.1.0` | +| `shardingGroupVersionKinds` | Sharding resource lists(yaml) | | + +config `groupVersionKinds` in file: +```yaml +ctrlmesh.kusionstack.io/v1alpha1: +- '*' +v1: +- Pod +- PersistentVolumeClaim +- Service +- ConfigMap +- Endpoint +apps/v1: +- StatefulSet +- ReplicaSet +- ControllerRevision +``` + +Specify each parameter using the `--set key=value` argument to `helm install` or `helm upgrade`. + + diff --git a/docs/ctrlmesh/started/try.md b/docs/ctrlmesh/started/try.md new file mode 100644 index 00000000..74dc19ee --- /dev/null +++ b/docs/ctrlmesh/started/try.md @@ -0,0 +1,281 @@ +--- +sidebar_position: 4 +--- +# Try a Sample +This guide lets you quickly evaluate KusionStack Controller Mesh. + + +## Install Controller Mesh Manager +Controller Mesh requires **Kubernetes version >= 1.18** + +**Install with helm** +```bash +# Firstly add KusionStack charts repository if you haven't do this. +$ helm repo add kusionstack https://kusionstack.github.io/charts + +# To update the kusionstack repo. +$ helm repo update kusionstack + +# Install the latest version. +$ helm install ctrlmesh kusionstack/ctrlmesh + +# Wait manager ready +$ kubectl -n ctrlmesh get po +NAME READY STATUS RESTARTS AGE +ctrlmesh-57d6b4df57-mdslc 1/1 Running 0 40s +ctrlmesh-57d6b4df57-mtv2s 1/1 Running 0 40s +``` +[Install manager with more options](install.md) + + +## Try with sample-operator + +Here is an example of a `Deployment` enabling sharding. +### Get and deploy sample-operator v0 +👉 [sample-operator repo](https://github.com/KusionStack/controller-mesh/tree/sample-operator) +```bash +# Clone and checkout branch sample-operator. +$ git clone -b sample-operator https://github.com/KusionStack/controller-mesh.git +$ cd sample + +# Make sure you have kind or test cluster, and kubectl is available. + +# Deploy default sample-operator v0. +$ IMAGE_TAG=v0.1.0 make deploy + +namespace/kusionstack-sample created +serviceaccount/kusionstack-controller-manager created +role.rbac.authorization.k8s.io/kusionstack-leader-election-role created +clusterrole.rbac.authorization.k8s.io/kusionstack-manager-role created +rolebinding.rbac.authorization.k8s.io/kusionstack-leader-election-rolebinding created +clusterrolebinding.rbac.authorization.k8s.io/kusionstack-sample-manager-rolebinding created +deployment.apps/kusionstack-sample-operator-v0 created + +# kusionstack-sample-operator-v0 is created. +$ kubectl get deploy -n kusionstack-sample +NAME READY UP-TO-DATE AVAILABLE AGE +kusionstack-sample-operator-v0 2/2 2 2 14s + +$ kubectl get po -n kusionstack-sample +NAME READY STATUS RESTARTS AGE +kusionstack-sample-operator-v0-66f7595c7b-n4c47 1/1 Running 0 50s +kusionstack-sample-operator-v0-66f7595c7b-wxwtv 1/1 Running 0 50s + +# sample-operator uses leader-election. Only one leader pod reconciling. +$ kubectl -n kusionstack-sample get lease +NAME HOLDER AGE +sample-operator-leader kusionstack-sample-operator-v0-66f7595c7b-wxwtv_c0ed684d-f332-47f6-890c-dd7e489486f2 53 +``` +### Play with ShardingConfig + +By configuring `ShardingConfig` appropriately, you can achieve canary and sharding deploy. + +**Isolate canary namespaces** +```bash +# Create some test namespaces([foo-01, foo-02, ..., foo-31]). +$ chmod +x ./scripts/create-ns-foo.sh && ./scripts/create-ns-foo.sh + +# All namespaces are controlled by sample-operator v0. +$ kubectl get ns -l sample.kusionstack.io/control-by=kusionstack-sample-operator-v0-66f7595c7b-wxwtv +NAME STATUS AGE +default Active 12d +foo-01 Active 78s +foo-02 Active 78s +foo-03 Active 78s +... ... ... +foo-32 Active 78s + +# There are more details in leader pod log. +$ kubectl logs kusionstack-sample-operator-v0-66f7595c7b-wxwtv -n kusionstack-sample | grep "hold namespaces" +I0110 09:32:50.950535 1 runner.go:101] hold namespaces [ctrlmesh default foo-01 foo-02 foo-03 foo-04 foo-05 foo-06 foo-07 foo-08 foo-09 foo-10 foo-11 foo-12 foo-13 foo-14 foo-15 foo-16 foo-17 foo-18 foo-19 foo-20 foo-21 foo-22 foo-23 foo-24 foo-25 foo-26 foo-27 foo-28 foo-29 foo-30 foo-31 foo-32 kusionstack-sample kusionstack-system local-path-storage] + +# Apply sample ShardingConfigs +$ ./bin/kustomize build config/shardingconfig/canary | kubectl apply -f - +shardingconfig.ctrlmesh.kusionstack.io/kusionstack-sample-operator-0-canary created +shardingconfig.ctrlmesh.kusionstack.io/kusionstack-sample-operator-1-normal created +``` + +export const Highlight = ({children, color}) => ( + + {children} + +); + +The [kusionstack-sample-operator-0-canary](https://github.com/KusionStack/controller-mesh/blob/sample-operator/sample/config/shardingconfig/canary/shardingconfig-canary.yaml) has restricted the scope of namespaces [foo-01, foo-02, foo-03] reconciled by version `v1`. +And [kusionstack-sample-operator-1-normal](https://github.com/KusionStack/controller-mesh/blob/sample-operator/sample/config/shardingconfig/canary/shardingconfig-normal.yaml) decided that other namespaces will be reconciled by version `v0`. +```bash +# Patch labels to pod template to inject sidecar and ShardingConfig +$ kubectl -n kusionstack-sample patch deployment kusionstack-sample-operator-v0 --type=strategic --patch \ + 'spec: + template: + metadata: + labels: + ctrlmesh.kusionstack.io/enable-proxy: "true" + ctrlmesh.kusionstack.io/watching: "true"' + +# Mesh proxy container was injected +$ kubectl get po -n kusionstack-sample +NAME READY STATUS RESTARTS AGE +kusionstack-sample-operator-v0-6944bb4bf5-gclqq 2/2 Running 0 30s +kusionstack-sample-operator-v0-6944bb4bf5-lfwdb 2/2 Running 0 41s + +# Find current leader +# sharding lease format: ${leader-election-name}---${shardingconfig-name} +$ kubectl get lease -n kusionstack-sample +NAME HOLDER AGE +sample-operator-leader---kusionstack-sample-operator-1-normal kusionstack-sample-operator-v0-6944bb4bf5-lfwdb_497a7962-a5f1-465e-b8ef-6e35660c63f4 32s + +# Namespaces [foo-1, foo-2, foo-3] are no longer under v0 control. +$ kubectl logs kusionstack-sample-operator-v0-6944bb4bf5-lfwdb -c manager -n kusionstack-sample | grep "namespaces" + ... hold namespaces [default foo-04 foo-05 ... foo-32] + +``` + +**Deploy canary sample-operator v1** + +```bash +# Apply sample operator v1 which deployment already labeled +$ ./bin/kustomize build config/manager-v1 | kubectl apply -f - +deployment.apps/kusionstack-sample-operator-v1 created + +# Two pods created +$ kubectl get po -n kusionstack-sample +NAME READY STATUS RESTARTS AGE +kusionstack-sample-operator-v0-6944bb4bf5-gclqq 2/2 Running 0 4m +kusionstack-sample-operator-v0-6944bb4bf5-lfwdb 2/2 Running 0 4m +kusionstack-sample-operator-v1-7b6bbb49c8-kbgww 0/2 ContainerCreating 0 3s +kusionstack-sample-operator-v1-7b6bbb49c8-qbzjj 0/2 ContainerCreating 0 3s + +# The canary shard uses a separate lease +$ kubectl get lease -n kusionstack-sample +NAME HOLDER AGE +sample-operator-leader---kusionstack-sample-operator-0-canary kusionstack-sample-operator-v1-7b6bbb49c8-qbzjj_64272983-c59a-4574-933d-7d5fea7a1e35 15s +sample-operator-leader---kusionstack-sample-operator-1-normal kusionstack-sample-operator-v0-6944bb4bf5-lfwdb_497a7962-a5f1-465e-b8ef-6e35660c63f4 4m + +# Only foo-01, foo-02, foo-03 controlled by v1 +$ kubectl get ns -l sample.kusionstack.io/control-by=v1 -n kusionstack-sample +NAME STATUS AGE +foo-01 Active 4m +foo-02 Active 4m +foo-03 Active 4m + +$ kubectl logs kusionstack-sample-operator-v1-7b6bbb49c8-qbzjj -c manager -c kusionstack-sample| grep namespaces + ... hold namespaces [foo-01 foo-02 foo-03] +``` +Similarly, if you want to have more shards, you need to do the following steps: +1. Extract a portion of the namespace from the existing ShardingConfigs. +2. Configure a new ShardingConfig and apply it. +3. Recreate or restart the existing pods to make the new ShardingConfig take effect. +4. Scale out the Pods for the new ShardingConfig. + + +### Clear sample resources + +```bash +$ chmod +x ./scripts/clear.sh && ./scripts/clear.sh +``` + + +:::tip +**Beta**: *We try to support automatic sharding strategy. With automatic sharding configuration, there is no need to manually configure each shard's configuration. It manages multiple sub-shardingconfigs automatically through a root configuration.* +::: + + + +## Try with Operating + +For `StatefulSet` case, you can use the **[Operating v0.1.1](https://kusionstack.io/docs/operating/introduction/)** available here. + +Deploy the sample operator with ShardingConfig: + +```bash +$ helm repo update +$ helm install sample-operating kusionstack/operating \ + --version v0.2.0 \ + --set sharding.enabled=true \ + --set sharding.isDemo=true + +$ kubectl -n kusionstack-system get sts +NAME READY AGE +kusionstack-operating 5/5 1m45s + +# The proxy container will be automatically injected into the pod +$ kubectl -n kusionstack-system get po +NAME READY STATUS RESTARTS AGE +kusionstack-operating-0 2/2 Running 0 42s +kusionstack-operating-1 2/2 Running 0 32s +kusionstack-operating-2 2/2 Running 0 21s +kusionstack-operating-3 2/2 Running 0 12s +kusionstack-operating-4 0/2 ContainerCreating 0 1s + +# Now we have three shards with three lease. +# operating-0-canary -> [kusionstack-operating-0] +# operating-1-normal -> [kusionstack-operating-1, kusionstack-operating-2] +# operating-2-normal -> [kusionstack-operating-3, kusionstack-operating-4] +$ kubectl -n kusionstack-system get lease +NAME HOLDER AGE +kusionstack-controller-manager---operating-0-canary kusionstack-operating-0_81b5bbae-be63-45ed-a939-e67e0c3d6326 12m +kusionstack-controller-manager---operating-1-normal kusionstack-operating-1_e4bbad49-e6ec-42fa-8ffd-caae82156a3e 12m +kusionstack-controller-manager---operating-2-normal kusionstack-operating-3_94f7f81a-f9e6-47d6-b72b-e16da479e9be 12m +``` + + Show the sample ShardingConfig: + +```bash +$ helm template sample-operating kusionstack/operating \ + --version v0.1.1 \ + --set sharding.enabled=true \ + --set sharding.isDemo=true \ + --show-only templates/shardingconfig.yaml +``` + +Here is a sample `ShardingConfig`: +```yaml title="operating/templates/shardingconfig.yaml" +apiVersion: ctrlmesh.kusionstack.io/v1alpha1 +kind: ShardingConfig +metadata: + name: sharding-root + namespace: kusionstack-system +spec: + # Auto sharding config + root: + prefix: operating + targetStatefulSet: kusionstack-operating + canary: + replicas: 1 + inNamespaces: + - kusionstack-system + auto: + everyShardReplicas: 2 + shardingSize: 2 + resourceSelector: + - relateResources: + - apiGroups: + - '*' + resources: + - configmaps + - pods + - endpoints + - services + - replicasets + - apiGroups: + - apps.kusionstack.io + resources: + - '*' + controller: + leaderElectionName: kusionstack-controller-manager +``` +You can configure the ShardingConfig according to your requirements. + +:::info +In order to enable the ShardingConfig, you also need to add the following label to the pod template. +`ctrlmesh.kusionstack.io/watching: 'true'` +`ctrlmesh.kusionstack.io/enable-proxy: 'true'` +We plan to deprecate it in future versions. +::: \ No newline at end of file diff --git a/docs/develop/build-docs/_category_.json b/docs/develop/build-docs/_category_.json deleted file mode 100644 index 8da86e76..00000000 --- a/docs/develop/build-docs/_category_.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "label": "Build Doc", - "position": 2 -} diff --git a/docs/develop/build-docs/build-docs.md b/docs/develop/build-docs/build-docs.md deleted file mode 100644 index ef04bde7..00000000 --- a/docs/develop/build-docs/build-docs.md +++ /dev/null @@ -1,90 +0,0 @@ -# Build Doc - -Kusion 的文档采用 [Docusaurus](https://docusaurus.io/) 框架构建,Docusaurus 是基于 React 构建的站点生成器。在构建之前先安装 [Node.js 16+](https://nodejs.org)。 - -文档仓库:https://github.com/KusionStack/kusionstack.io - -## 1. 克隆仓库 - -然后克隆文档仓库到本地: - -``` -$ git clone git@github.com:KusionStack/kusionstack.io.git -``` - -Markdown 格式的文档主要在 docs 和 blog 两个目录,目录对应的内容说明如下: - -- `/docs` - 文档根目录 -- `/docs/user_docs` - 使用文档,针对 Kusion 使用者 -- `/docs/develop` - 开发文档,针对 Kusion 项目本身开发和完善 -- `/docs/referece` - 参考手册,工具、语言、模型的参考 -- `/docs/governance` - 治理,开源社区、路线规划等 -- `/blog` - 博客文章 - -## 2. 本地预览 - -预览和构建之前需要先执行 `npm install` 命令安装 Node.js 依赖的包,然后执行 `npm run start` 命令启动本地预览: - -``` -$ npm install -$ npm run start - -> website@0.1.0 start -> docusaurus start - -[INFO] Starting the development server... -[SUCCESS] Docusaurus website is running at http://localhost:3000/. - -✔ Client - Compiled successfully in 3.84s - -client (webpack 5.69.1) compiled successfully - -█ -``` - -该命令会通过默认浏览器打开 http://localhost:3000 页面。左上角的导航栏有:使用文档、开发文档、内部文档、参考手册、治理和博客按钮,分别对应前文对应的目录。右上角对应多语言、文档仓库和主题切换按钮。主体页面是 Kusion 一句话简介和快速开始的按钮链接,下面是 KCL 配置语言、Kusion 模型库和 Kusion 引擎的介绍。 - - -## 3. 构建发布 - -同样需要先执行 `npm install` 命令安装 Node.js 依赖的包(至少执行一次),然后通过 `npm run build` 构建最终的页面资源: - -``` -$ npm run build - -> website@0.1.0 build -> docusaurus build - -[INFO] [zh-CN] Creating an optimized production build... - -█ -``` - -构建是会有更严格的检查,比如内部的坏链接会输出红色的错误信息、橘黄色输出警告信息。对于测试测试,如果遇到比较多的坏链接,可以先将 `docusaurus.config.js` 文件中的 `onBrokenLinks` 和 `onBrokenMarkdownLinks` 设置为 `"ignore"` 关闭。产生的文件输出到 `build` 目录,该目录可以直接部署发布。 - - -## 4. 配置文件 - -配置文件有文档配置、侧边栏和内部文档几个: - -- [docusaurus.config.js](https://github.com/KusionStack/kusionstack.io/blob/main/docusaurus.config.js) 是 [Docusaurus](https://docusaurus.io/) 的主配置文件。 -- [sidebars.js](https://github.com/KusionStack/kusionstack.io/blob/main/sidebars.js) 对应文档的侧边栏配置,被 [docusaurus.config.js](https://github.com/KusionStack/kusionstack.io/blob/main/docusaurus.config.js) 文件引用。 - -## 5. 主页面内容 - -主页面内容由以下文件构建产生: - -- [docusaurus.config.js](https://github.com/KusionStack/kusionstack.io/blob/main/docusaurus.config.js) 是 [Docusaurus](https://docusaurus.io/) 的主配置文件,包含顶部的导航栏和底部的链接。 -- [src/pages/index.js](https://github.com/KusionStack/kusionstack.io/blob/main/src/pages/index.js) 对应页面主体区域,包含快速开始的链接按钮。 -- [src/components/HomepageFeatures.js](https://github.com/KusionStack/kusionstack.io/blob/main/src/components/HomepageFeatures.js) 对应 Kusion 的特性介绍。 - -## 6. 内部链接 - -网址内部的相对链接可以通过 Markdown 文件的相对路径映射,比如 [`/docs/develop/build-docs`](https://github.com/KusionStack/kusionstack.io/docs/develop/build-docs) 文件中可以通过 [`/docs/user_docs/intro/kusion-intro`](/docs/user_docs/intro/kusion-intro) 绝对路径或 [`../repos`](../repos) 相对路径引用。 - -注意:目录内部的 `index.md` 或与目录同名的 Markdonwn 文件对应目录链接的页面。 - -## 7. 更新模型文档 - -Konfig 中的 [模型文档](/docs/reference/model) 是从 KCL 代码,通过 [docgen](/docs/reference/cli/kcl/docgen) 工具自动提取产生(比如自动生成的 [ConfigMap](/docs/reference/model/kusion_models/kube/frontend/configmap/doc_configmap) 模型文档)。如果希望完善模型文档,首先需要到 [Konfig 代码仓库](https://github.com/KusionStack/konfig) 添加或更新文档,然后重新生成文档(目前还不能自动同步,有兴趣的用户可以尝试 [提供帮助](/docs/governance/contribute/contribute-docs))。 diff --git a/docs/develop/build-from-source/_category_.json b/docs/develop/build-from-source/_category_.json deleted file mode 100644 index 1bb8ee61..00000000 --- a/docs/develop/build-from-source/_category_.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "label": "Build Kusion", - "position": 2 -} diff --git a/docs/develop/build-from-source/build-from-source.md b/docs/develop/build-from-source/build-from-source.md deleted file mode 100644 index e190342d..00000000 --- a/docs/develop/build-from-source/build-from-source.md +++ /dev/null @@ -1,4 +0,0 @@ -# Build KusionStack - -When users want to modify the underlying code and see the effect themselves, they want to build the KusionStack tool from the code. Building from code is not only a necessary condition for developing KusionStack projects, but also a necessary process for normal binary packaging and distribution. - diff --git a/docs/develop/build-from-source/docker.md b/docs/develop/build-from-source/docker.md deleted file mode 100644 index 137d169d..00000000 --- a/docs/develop/build-from-source/docker.md +++ /dev/null @@ -1,184 +0,0 @@ ---- -sidebar_position: 1 ---- - -# Docker & Ubuntu - -KusionStack 主要工具以 Rust、Golang 等语言为主开发,KCL 语言通过 Python 提供一些扩展插件,同时还需要依赖 Git、Makefile 等工具。为了方便配置开发环境,我们提供了基于 ubuntu:20.04 的 Dockerfile 配置文件:https://github.com/KusionStack/KCLVM/blob/main/Dockerfile 。用户可以基于该自行构建镜像,也可以通过 `docker pull kusionstack/kclvm-builder-ubuntu` 命令拉取镜像。 - -如果是本地的 Ubuntu 环境,可以参考 Dockerfile 文件的命令安装依赖环境。 - -注意: -1. 本地除了 Docker 之外,还需要有 Bash 和 GMake 等工具 -2. macOS m1 系统对 Docker 的支持还有待完善,构建时可能遇到阻塞等问题 - -## 1. 构建 KCLVM - -KCLVM 是 Kusion 中 KCL 配置语言的实现,通过以下命令克隆 KCLVM 代码到一个新的目录: - -``` -$ git clone git@github.com:KusionStack/KCLVM.git -``` - -然后在命令行环境切换到 KCLVM 代码根目录,执行以下命令: - -``` -$ make sh-in-docker -root@ubuntu:~/kclvm# pwd -/root/kclvm -``` - -以上命令会将宿主机器当前的 KCLVM 目录映射到容器中的 `/root/kclvm` 目录,同时进入 Bash 环境。 - -然后通过 `run.sh` 脚本构建 CPython: - -``` -root@ubuntu:~/kclvm# ./run.sh -1) build 3) build-kclvm 5) test 7) lint-check 9) release-arm64 -2) build-cpython 4) update-kclvm 6) format 8) release -Please select the action: 2 -... -``` - -选择 2 进行 CPython 构建,构建时间几十分钟不等。构建的结果在 `/root/kclvm/_build/dist/ubuntu/cpython` 目录,CPython 只需要构建一次。 - -然后是构建 KCLVM 的 Python 和 Rust 版本,同时安装依赖的包(包括依赖的插件等): - -``` -root@ubuntu:~/kclvm# ./run.sh -1) build 3) build-kclvm 5) test 7) lint-check 9) release-arm64 -2) build-cpython 4) update-kclvm 6) format 8) release -Please select the action: 3 -... -``` - -构建结果在 `/root/kclvm/_build/dist/ubuntu/kclvm` 目录,其中插件在 `plugins` 子目录,二进制程序在 `bin` 子目录。将 `/root/kclvm/_build/dist/ubuntu/kclvm/bin` 目录添加到 `PATH` 环境变量,然后输入 `which kcl` 或 `kcl -h` 测试 KCL 命令行。 - -然后编译执行 `/root/kclvm/hello.k` 配置程序: - -``` -root@ubuntu:~/kclvm# kcl hello.k -name: kcl -age: 1 -two: 2 -x0: - name: kcl - age: 1 -x1: - name: kcl - age: 101 -``` - -一切正常就说明构建成功了。 - -## 2. 构建 kclvm-go 和 kcl-go - -kclvm-go 是基于 KCLVM 命令包装的 Go 语言 SDK,上层的 Kusion 命令也是通过 `kclvm-go` 使用 KCLVM 的功能。`kcl-go` 是基于 `kclvm-go` SDK,采用 Go 语言实现了一个命令行工具,其中包含 KCL 语言的 Playground 和单元测试等功能。 - -在 Docker 镜像中已经安装了 Go 版本,可以通过以下命令查看。 - -``` -root@ubuntu:~/kclvm# go version -go version go1.16.3 linux/amd64 -``` - -克隆 `kclvm-go` 仓库: - -``` -root@ubuntu:~/kclvm# cd -root@ubuntu:~# git clone git@github.com:KusionStack/kclvm-go.git -``` - -然后执行 `kclvm-go/examples/hello/main.go`: - -``` -root@ubuntu:~# cd kclvm-go -root@ubuntu:~/kclvm-go# go run ./examples/hello -age: 1 -name: kcl -two: 2 -x0: - age: 1 - name: kcl -x1: - age: 101 - name: kcl -``` - -测试程序正常运行说明 `kclvm-go` 的构建已经成功了。现在可以执行更复杂的 `kcl-go` 命令: - -``` -root@ubuntu:~/kclvm-go# go run ./cmds/kcl-go -NAME: - kcl-go - K Configuration Language Virtual Machine - -USAGE: - kcl-go - kcl-go [global options] command [command options] [arguments...] - - kcl-go kcl -h - kcl-go -h -... -``` - -也可以通过 `kcl-go` 命令行执行 `kclvm-go/hello.k`: - -``` -root@ubuntu:~/kclvm-go# go run ./cmds/kcl-go run hello.k -age: 1 -name: kcl -two: 2 -x0: - age: 1 - name: kcl -x1: - age: 101 - name: kcl -``` - -或者通过 `go run ./cmds/kcl-go play` 启动 Playground 服务,然后浏览器打开 http://127.0.0.1:2021 页面测试。 - -## 3. 构建 Kusion 命令 - -Kusion 是更上层的工具集合,其核心命令是采用 Go 语言实现,底层和 KCLVM 的交互是通过 `kclvm-go` 包完成。 - -克隆 Kusion 仓库: - -``` -root@ubuntu:~/kclvm# cd -root@ubuntu:~# git clone git@github.com:KusionStack/kusion.git -``` - -然后执行 `kusion/cmds/kusionctl` 程序: - -``` -root@ubuntu:~/kusion# go run ./cmd/kusionctl -kusion 作为云原生可编程技术栈,通过代码管理 kubernetes 集群。 -... -``` - -正常情况可以看到 kusion 命令的帮助信息。 - -## 4. KCLOpenapi - -KCLOpenapi 是 KCL 语言版本的 OpenAPI 工具,完整采用 Go 语言实现。因此可以在任何一个安装了 Go 1.16+ 的环境编译。 - -``` -$ git clone git@github.com:KusionStack/kcl-openapi.git -$ cd kcl-openapi -$ go run ./cmd/swagger -h -Usage: - swagger [OPTIONS] - -Swagger tries to support you as best as possible when building APIs. - -It aims to represent the contract of your API with a language agnostic -description of your application in json or yaml. -... -``` - -正常可以看到帮助信息。 - -## 5. 其它 - -KCLVM、KusionCtl 是本地开发需要经常构建的仓库,目前因为开发资源和时间的原因导致开发文档还不够完善,希望社区同学多多反馈共同参与完善。此外还有 VSCode 插件等外围工具的构建,用户可以参考仓库内部实现代码和文档操作。 diff --git a/docs/develop/build-from-source/windows.md b/docs/develop/build-from-source/windows.md deleted file mode 100644 index 1cd4ef24..00000000 --- a/docs/develop/build-from-source/windows.md +++ /dev/null @@ -1,92 +0,0 @@ ---- -sidebar_position: 4 ---- - -# Windows/X64 - -Windows 是最流行的桌面系统,有着庞大的用户群体,但是默认缺少完整的开发环境。如果需要在 Windows 下构建 KusionStack 工具,首先需要安装开发环境。 - -假设是 Windows/X64 环境,首先安装以下命令: - -1. 安装 VC2019,确保,默认的 C++ 工具都已经安装好 - - https://visualstudio.microsoft.com/zh-hans/downloads/ -1. 安装 Rust 和 cargo - - https://forge.rust-lang.org/infra/other-installation-methods.html - - 安装 x86_64-pc-windows-msvc 版本,配套 MSVC 编译器(待确认) -1. 安装 Go1.16+,必要时可以根据网络环境配置代理服务 - - https://go.dev/dl/ -1. 安装 TDM-GCC-x64 工具 - - https://jmeubank.github.io/tdm-gcc/download/ -1. 安装 LLVM-12.0.1-win64 - - https://github.com/PLC-lang/llvm-package-windows/releases/tag/v12.0.1 - - 设置 `LLVM_SYS_120_PREFIX` 和 `LLVM_SYS_70_PREFIX` 环境变量为安装的目录 -1. 打开 VS2019-x64 命令行 - -## 1. 构建 KCLVM - -KCLVM 是 Kusion 中 KCL 配置语言的实现,通过以下命令克隆 KCLVM 代码到一个新的目录,地址:`git@github.com:KusionStack/KCLVM.git`。 - -然后在 VS2019-x64 命令行环境切换到 `KCLVM` 目录执行 `cargo build` 测试 Rust 等环境。 - -然后在 VS2019-x64 命令行环境切换到 `.\Scripts\build-windows` 目录,执行 `build.bat` 批处理脚本进行构建。输出的文件在 `.\Scripts\build-windows\_output\kclvm-windows` 目录。 - -构建成功后通过以下命令测试 KCL 命令: - -``` -_output\kclvm-windows\kclvm.exe -m kclvm ..\..\hello.k -_output\kclvm-windows\kcl-go.exe run ..\..\hello.k -_output\kclvm-windows\kcl.exe ..\..\hello.k -``` - -一切正常就说明构建成功了。 - -## 2. 构建 kclvm-go 和 kcl-go - -kclvm-go 是基于 KCLVM 命令包装的 Go 语言 SDK,上层的 Kusion 命令也是通过 `kclvm-go` 使用 KCLVM 的功能。`kcl-go` 是基于 `kclvm-go` SDK,采用 Go 语言实现了一个命令行工具,其中包含 KCL 语言的 Playground 和单元测试等功能。 - -首先将 `kclvm.exe` 命令所在目录添加到 `PATH` 环境变量,然后重新登陆系统通过 `where kclvm` 命令检查是否可以找到 kclvm 命令。 - -然后克隆 kclvm-go 仓库,地址为:`git@github.com:KusionStack/kclvm-go.git`。然后进入 `kclvm-go` 命令执行以下命令: - -- `go run ./examples/hello` -- `go run ./cmds/kcl-go` - -测试程序正常运行说明 `kclvm-go` 的构建已经成功了。 - -也可以通过 `go run ./cmds/kcl-go run hello.k` 命令行执行 `kclvm-go/hello.k`,输出以下结果: - -```yaml -age: 1 -name: kcl -two: 2 -x0: - age: 1 - name: kcl -x1: - age: 101 - name: kcl -``` - -或者通过 `go run ./cmds/kcl-go play` 启动 Playground 服务,然后浏览器打开 http://127.0.0.1:2021 页面测试。 - - -## 3. 构建 Kusion 命令 - -Kusion 是更上层的工具集合,其核心命令是采用 Go 语言实现,底层和 KCLVM 的交互是通过 `kclvm-go` 包完成。 - -克隆 Kusion 仓库:`git@github.com:KusionStack/kusion.git` - -然后进入 kusion 目录执行 `go run ./cmd/kusionctl` 命令。正常情况可以看到 kusion 命令的帮助信息。 - -## 4. KCLOpenapi - -KCLOpenapi 是 KCL 语言版本的 OpenAPI 工具,仓库地址:`git@github.com:aKusionStack/kcl-openapi.git`。 - -KCLOpenapi 是纯 Go 语言实现的工具,按照正常的 Go 程序构建流行即可。 - -可以通过 `go run ./cmd/swagger -h` 查看命令的帮助信息。 - - -## 5. 其它 - -KCLVM、KusionCtl 是本地开发需要经常构建的仓库,目前因为开发资源和时间的原因导致开发文档还不够完善,希望社区同学多多反馈共同参与完善。此外还有 VSCode 插件等外围工具的构建,用户可以参考仓库内部实现代码和文档操作。 diff --git a/docs/develop/design/_category_.json b/docs/develop/design/_category_.json deleted file mode 100644 index 7690954d..00000000 --- a/docs/develop/design/_category_.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "label": "Design and Implementation", - "position": 4 -} diff --git a/docs/develop/design/design.md b/docs/develop/design/design.md deleted file mode 100644 index 824b123d..00000000 --- a/docs/develop/design/design.md +++ /dev/null @@ -1,2 +0,0 @@ -# Design and Implementation - diff --git a/docs/develop/design/kclvm.md b/docs/develop/design/kclvm.md deleted file mode 100644 index 79ccc2d2..00000000 --- a/docs/develop/design/kclvm.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -sidebar_position: 99 ---- - -# KCLVM Architecture - -![](/img/docs/develop/design/kcl-tech-arch.png) - -The implementation of `KCLVM` compiler is driven by many specifications (mainly including KCL language specification, KCL multilingual integration specification, and KCL OpenAPI specification). Besides, KCL is a compiled language, maintaining the same three-stage architecture as the regular programming language compiler, and using LLVM-IR as the intermediate link between KCL and Native/WASM code. - -KCL has the following three core stages: - -* Translation KCL code to LLVM-IR. By parsing the KCL code and traversing the KCL AST, the corresponding LLVM-IR code is generated according to the KCL language specification. -* KCL runtime library integration. KCL runtime library provides runtime KCL value calculation, memory, context management, built-in library and plug-in library support. -* User mode and system mode code linking and execution. Link user mode code and system mode code into a dynamic link library, and finally execute the compiled KCL code through the unified runner module. - -In addition, KCL provides enhanced support for the semantic resolver and plugins: - -* Resolver - * **Static type inference and checking**. Type inference and checking can be performed at compile time to avoid the overhead of type check at runtime, which can be used as the basis for IDE plug-in and semantic API support (such as schema model query, dependency analysis, etc.) - * **Configuration graph unification**. By building and merging the configuration data dependency graph during the compilation process, the final configuration data can be obtained through only a few calculations during the runtime. - * **Semantic dependency graph**. Through the built-in semantic dependency graph, KCL can complete the dependency analysis when the configuration changes, which can improve the end-to-end compilation performance by performing incremental compilation. - * **Schema-centric OOP**. KCL language only retains the syntax of single inheritance, but the schema can mix and reuse the same code fragments through the features such as `mixin` and `protocol`. -* Plugin. We can use Python/Go to write plugin libraries, which mainly include some domain capabilities, such as accessing networks or databases. diff --git a/docs/develop/design/konfig.md b/docs/develop/design/konfig.md deleted file mode 100644 index c949df58..00000000 --- a/docs/develop/design/konfig.md +++ /dev/null @@ -1,82 +0,0 @@ ---- -sidebar_position: 4 ---- - -# Konfig Dir Struct - -本文主要解释 Konfig 配置大库的目录和代码结构,其中涉及的基本概念解释可见[《模型概览》](/docs/reference/model/overview)。 - -## 1. 整体结构 - -```bash -. -├── Makefile # 通过 Makefile 封装常用命令 -├── README.md # 配置大库说明 -├── appops # 应用运维目录,用来放置所有应用的 KCL 运维配置 -│ ├── guestbook-frontend -│ ├── http-echo -│ └── nginx-example -├── base # Kusion Model 模型库 -│ ├── examples # Kusion Model 样例代码 -│ │ ├── monitoring # 监控配置样例 -│ │ ├── native # Kubernetes 资源配置样例 -│ │ ├── provider # 基础资源配置样例 -│ │ └── server # 云原生应用运维配置模型样例 -│ └── pkg -│ ├── kusion_kubernetes # Kubernetes 底层模型库 -│ ├── kusion_models # 核心模型库 -│ ├── kusion_prometheus # Prometheus 底层模型库 -│ └── kusion_provider # 基础资源 底层模型库 -├── hack # 放置一些脚本 -└── kcl.mod # 大库配置文件,通常用来标识大库根目录位置以及大库所需依赖 -``` - -## 2. 核心模型库结构 - -核心模型库一般命名为 kusion_models,主要包含前端模型、后端模型、Mixin、渲染器等,目录结构为: - -```bash -├── commons # 基础资源核心模型库 -├── kube # 云原生资源核心模型库 -│ ├── backend # 后端模型 -│ ├── frontend # 前端模型 -│ │ ├── common # 通用前端模型 -│ │ ├── configmap # ConfigMap 前端模型 -│ │ ├── container # 容器前端模型 -│ │ ├── ingress # Ingress 前端模型 -│ │ ├── resource # 资源规格前端模型 -│ │ ├── secret # Secret 前端模型 -│ │ ├── service # Service 前端模型 -│ │ ├── sidecar # Sidecar 容器前端模型 -│ │ ├── strategy # 策略前端模型 -│ │ ├── volume # Volume 前端模型 -│ │ └── server.k # 云原生应用运维前端模型 -│ ├── metadata # 应用运维的元数据模型 -│ ├── mixins # 统一放置可复用的 Mixin -│ ├── render # 渲染器,把前后端模型联系在一起的桥梁 -│ ├── templates # 静态配置 -│ └── utils # 工具方法 -└── metadata # 通用元数据模型 -``` - -## 3. Project 和 Stack 结构 - -Project 和 Stack 的基本概念可见 [《Project & Stack》](/user_docs/concepts/konfig.md)。 - -Project 在配置大库的应用运维(appops)场景中对应的概念是「应用」,Stack 对应的概念是「环境」,更多映射关系可见[《映射关系》](/user_docs/guides/organizing-projects-stacks/mapping.md)。 - -本节以应用「nginx-example」为例,介绍 Project 和 Stack 在配置大库中的基本目录结构: - -```bash -├── README.md # Project 介绍文件 -├── base # 各环境通用配置 -│ └── base.k # 通用 KCL 配置 -├── dev # 环境特有配置 -│ ├── ci-test # 测试目录 -│ │ ├── settings.yaml # 测试数据 -│ │ └── stdout.golden.yaml # 测试期望结果 -│ ├── kcl.yaml # 多文件编译配置,是 KCL 编译的入口 -│ ├── main.k # 当前环境 KCL 配置 -│ └── stack.yaml # Stack 配置文件 -└── project.yaml # Project 配置文件 -``` diff --git a/docs/develop/kep.md b/docs/develop/kep.md deleted file mode 100644 index aab256f8..00000000 --- a/docs/develop/kep.md +++ /dev/null @@ -1,7 +0,0 @@ -# KEP - -KCL Enhancement Proposals: - -- Example: [KEP-1](https://github.com/KusionStack/KEP/blob/main/keps/KEP-0001.md) -- Repo: https://github.com/KusionStack/KEP - diff --git a/docs/develop/repos.md b/docs/develop/repos.md deleted file mode 100644 index 4e68f552..00000000 --- a/docs/develop/repos.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -sidebar_position: 1 ---- - -# Repo Struct - -## 1. 主要仓库 - -Kusion 的顶级仓库包含 Kusion 主仓库、Konfig 模型仓库、文档仓库等、IDE 扩展仓库、KCLVM 相关仓库等,关系如下图: - -![](/img/docs/develop/repos/repo-dag-01.png) - -- Kusion 主库:https://github.com/KusionStack/kusion -- Kusion 网站仓库:https://github.com/KusionStack/kusionstack.io -- Kusion 模型库:https://github.com/KusionStack/konfig -- KCLVM 主库:https://github.com/KusionStack/KCLVM -- IDE 扩展仓库:https://github.com/KusionStack/vscode-kcl - -## 2. 文档仓库 - - -文档相关的仓库关系如下: - -![](/img/docs/develop/repos/repo-dag-docs.png) - -文档主要包含网址的文档、相关的案例代码文档、语言规范文档和 Kusion 模型库自带的文档等。 - -- 文档主仓库:https://github.com/KusionStack/kusionstack.io -- 电子书:https://github.com/KusionStack/kusion-in-action-book - -## 3. KCLVM 仓库 - -其中 KCLVM 相关仓库是 KCL 配置语言、规范、工具等实现的仓库,其展开的子仓库关系如下: - -![](/img/docs/develop/repos/repo-dag-02.png) - -最上面提供 KCLVM 实现的多语言绑定接口,目前主要提供 Go、Python 等绑定,后续计划提供 Java、NodeJS 等更多的语言绑定。中间部分是 KCL 语言的实现和语言规范部分。此外、还有 KCL 语言的插件和配套的 IDE 插件等。 - -- KCLVM 主库:https://github.com/KusionStack/KCLVM -- kclvm-go:https://github.com/KusionStack/kclvm-go -- KCL 插件:https://github.com/KusionStack/kcl-plugin diff --git a/docs/events/2022/_category_.json b/docs/events/2022/_category_.json deleted file mode 100644 index fd41878b..00000000 --- a/docs/events/2022/_category_.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "label": "Events 2022", - "position": 1 -} diff --git a/docs/events/2022/glcc.md b/docs/events/2022/glcc.md deleted file mode 100644 index 9c7d1dce..00000000 --- a/docs/events/2022/glcc.md +++ /dev/null @@ -1 +0,0 @@ -# GitLink Code Camp diff --git a/docs/events/2022/kcl_paper.md b/docs/events/2022/kcl_paper.md deleted file mode 100644 index 286e7bf7..00000000 --- a/docs/events/2022/kcl_paper.md +++ /dev/null @@ -1,13 +0,0 @@ -# KCL papers were accepted by SETTA 2022 - -Recently, the KCL paper written by the team members of KusionStack was accepted by SETTA 2022 International Conference. - -![](/img/docs/events/2022/kcl_paper_setta.png) - -The purpose of the Symposium on Dependable Software Engineering (SETTA) symposium is to bring international researchers together to exchange research results and ideas on bridging the gap between formal methods and software engineering. This is for instance reflected by the challenges in applying formal techniques and tools to engineering large-scale systems such as Cyber-Physical Systems (CPS), Internet-of-Things (IoT), Enterprise Systems, Cloud-Based Systems, and so forth. - -The title of the paper "KCL: A Declarative Language for Large scale Configuration and Policy Management". The core contribution of this paper is to propose the KCL declarative language, development mechanism, and consistent workflow. Through the language model and constraint capabilities, we can improve the large-scale efficiency and liberate multi-team collaborative productivity of operational development and operation systematically while ensuring stability for large-scale configuration and policy management. - -In addition, SETTA 2022 will hold an online conference from October 27 to October 28 (Beijing time), at which time the details of KCL papers will be shared and welcome to join [KusionStack Community](https://github.com/KusionStack/community). For details of SETTA 2022 meeting agenda, please refer to https://lcs.ios.ac.cn/setta2022/program.php - -Note: KCL has been open source in Github. Visit https://github.com/KusionStack/KCLVM for more information. diff --git a/docs/events/_category_.json b/docs/events/_category_.json deleted file mode 100644 index 72330713..00000000 --- a/docs/events/_category_.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "label": "Events", - "position": 0 -} diff --git a/docs/governance/contribute/_category_.json b/docs/governance/contribute/_category_.json deleted file mode 100644 index e2dca064..00000000 --- a/docs/governance/contribute/_category_.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "label": "Contribution Guide", - "position": 4 -} diff --git a/docs/governance/contribute/contribute-code.md b/docs/governance/contribute/contribute-code.md deleted file mode 100644 index 41cabd69..00000000 --- a/docs/governance/contribute/contribute-code.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -sidebar_position: 2 ---- - -# How to Contribute Code? - -欢迎参与 Kusion 共建贡献完善代码、完善代码文档和测试,同时也欢迎通过 Issue 提供反馈。本文主要针对修改和完善已有的代码,如果是要增加 Konfig 新模型请先通过 Issue 或讨论中充分讨论,如果是希望增加 KCL 语言请通过 [KEP](/docs/develop/kep) 流程提交。 - -## 1. 代码和注释中的错别字 - -如果只是修改代码和注释中的错别字,不涉及代码逻辑的调整,那么可以直接在 Github 克隆仓库后直接修改并提交 PR。需要注意的是尽量保持代码风格一致。 - -## 2. 如何贡献 Kusion 模型库代码 - -- 先确保本地测试环境正常 -- 修改代码并补充测试 -- 本地测试通过后提交 PR - -## 3. 如何贡献 KusionCtl 代码 - -- 先确保本地测试环境正常 -- 修改代码并补充测试 -- 本地测试通过后提交 PR - -## 4. 如何贡献 KCLVM 代码 - -- 先确保本地测试环境正常 -- 修改代码并补充测试 -- 本地测试通过后提交 PR - -## 5. 如何贡献 VS Code 插件代码 - -请参考 VS Code 插件仓库的相关文档 - -## 6. 开发流程相关代码 - -欢迎通过 Issue 和讨论组讨论。 diff --git a/docs/governance/contribute/contribute-docs.md b/docs/governance/contribute/contribute-docs.md deleted file mode 100644 index 76c664b7..00000000 --- a/docs/governance/contribute/contribute-docs.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -sidebar_position: 1 ---- - -# How to Contribute Document? - -本文主要针对已有的文档做局部修改。如果是投稿博客文章、添加新的文档或者调整文档目录结构请先联系团队成员。 - -Kusion 文档分为用户指南、开发文档、内部文档、参考手册和博客文章等,他们的区别如下: - -- 用户指南:对应使用文档,是让用户以最小的代价快速使用 Kusion 工具完整工作,不要涉及太多的内部原理和实现 -- 开发文档:内部是怎么实现的,主要针对希望了解 Kusion 原理和参与贡献和开发的同学 -- 内部文档:针对企业用户的一些内部场景定制的文档 -- 参考手册:Kusion 工具、KCL 配置语言、模型库 等全部特性的文档,内容覆盖最广但比较琐碎 -- 博客文章:没有特别的限制,可以是针对某些具体的场景、某些技术点或者是整体发展展望等分享文章 - -在贡献不同类型的文档时,最好能够结合上面的定位对不同的内容做一些适当的裁剪,给读者最佳体验。 - -## 1. 基本规范 - -- 除标题外,内部小标题尽量带编号,便于阅读 -- 工具自动输出的文档需要由到源代码的链接,小标题可以不带编号 -- 尽量不要贴大段的代码(30行以内),代码最好给出文字解释和对应的参考链接 -- 有图有真相,但是不推荐过度复杂的架构图 -- 内部链接:采用 [`/docs/user_docs/intro/kusion-intro`](/docs/user_docs/intro/kusion-intro) 绝对路径形式 - -**标点和空格** - -- 在中文的文档中优先使用中文的标点 -- 中文和英文之间需要增加 1 个空格 -- 中文和数字之间需要增加 1 个空格 -- 中文使用全角标点,标点前后均不添加空格 -- 英文内容使用半角标点,标点后面加 1 个空格 -- 链接前后需要保留一个空格,但是段落开头和中文全角标点附近不用添加空格。 - -**图片和资源文件名** - -- 文件名和目录名只能用数字、英文字母、下划线 `_` 和减号 `-` 组成 -- 当前文档的图片放在当前目录的 images 目录下 -- 矢量图片可以通过 [drawio 离线版](https://github.com/jgraph/drawio-desktop/releases) 绘制(并同时提交源文件),以 200% 分辨率导出 png 格式图片 - -## 2. 使用文档内容的基本模式 - -每个使用文档可以看作是一个相对完整的分享或博客文章(参考手册不再此类)。使用文档遵循以下模式组织内容: - -1. 概览:本文希望解决什么问题,达到什么效果,可以先放最终效果截图 -1. 依赖的环境:需要安装什么工具,并给出相关链接 -1. 引入本文构建资源的关系图或架构图 - - 需要用到的 Konfig 模型,给出模型参考页面链接,以及对应的上游原始模型的文档链接 -1. 具体的操作步骤 - - 尽量确保最小化代码,甚至可以刻意隐藏一些干扰代码,同时给出完整代码对应的链接 - - 列出每个步骤命令的概要输出信息,并配以文字描述 -1. 给出测试方式 - - 尽量采用社区通用的方式(比如kube、curl命令、或浏览器)测试 - - 给出测试结果的截图(和开头呼应) -1. 总结和展望 - - 简单回顾当前操作的流程,以及一些可以展开的地方(可以给出一些链接) - -## 3. 测试和提交 PR - -先克隆文档仓库,本地通过 `npm run start` 和 `npm run build` 命令测试查看效果,可以参考 [构建文档](/docs/develop/build-docs)。确保可以正常浏览后提交 PR 即可。 diff --git a/docs/governance/contribute/contribute.md b/docs/governance/contribute/contribute.md deleted file mode 100644 index 46b8a7c1..00000000 --- a/docs/governance/contribute/contribute.md +++ /dev/null @@ -1,4 +0,0 @@ -# Contribute Guide - -If it is the first time to participate in the open source community, you can first check the similar changes in Issue and PR (PullRequest). Then you can fully discuss your own problems with the community or development team classmates, and you can also feedback the problems encountered by creating issues, and then you can provide code patches for related issues. At the beginning, you can start with document improvement and partial code improvement, such as: documentation, Examples, multi-language Binding, etc. At the same time, students who want to participate deeply can contribute to core features such as language functions, language testing, programming frameworks, and various backends. - diff --git a/docs/governance/contribute/docs/_category_.json b/docs/governance/contribute/docs/_category_.json deleted file mode 100644 index 88f95e19..00000000 --- a/docs/governance/contribute/docs/_category_.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "label": "Doc Style", - "position": 4 -} diff --git a/docs/governance/contribute/git-guideline.md b/docs/governance/contribute/git-guideline.md deleted file mode 100644 index ad41a1f2..00000000 --- a/docs/governance/contribute/git-guideline.md +++ /dev/null @@ -1,119 +0,0 @@ -# Git Commit Guide - -本文介绍了 Git 提交变更时需要注意的事项,如果拒绝接受本文的内容会导致提交的变更无法被接受。 - -## 1. 关于 issue - -在提交一个 issue 之前,请先查阅已经关闭的 issue ,也许在关闭的 issue 中已经存在合适的解决方案。 - -如果没有找到合适的方案,我们提供了4种模版在创建 issue 的时候使用。 -- Bug Report : 发现了一个 Bug,可以通过 Bug Report 模版创建 issue 与我们联系。 -- Enhancement : 开发者对工具进行了增强,可以通过 Enhancement 模版创建 issue 来介绍增加的内容。 -- Feature Request : 在使用的过程中想要为工具增加某些新的特性或者功能,可以通过 Feature Request 模版创建 issue 来描述新特性。 -- Ask a Question : 如果有任何的疑问,可以通过 Ask a Question 模版来创建一个 issue 与我们联系。 - -在选择合适的模版后,只需要填写模版上的要求填写的内容即可。如果在创建 issue 的时候发现没有模版,或者模版内容为空,可以通过微信群,钉钉群或者邮件向我们反馈这个问题。 - -## 2. 关于 Git 分支 - -要向 KusionStack 贡献代码,您必须拥有一个 GitHub 帐户,以便您可以将代码推送到您自己的 KusionStack 分支并创建拉取请求。我们推荐参考 [Angular 规范](https://github.com/angular/angular.js/blob/master/DEVELOPERS.md#-git-commit-guidelines) 为您自己的分支命名。 -推荐的格式如下: - -``` -{type}-{a_short_description} -``` -分支名称主要包括两个字段,并通过 “-” 分割。其中: - - {type} : 当前分支内容的类型。 - - {a_short_description}: 一个简短的描述,介绍这个分支的主要内容。 - -e.g. 张三首先 Fork 仓库到自己账户下,然后创建对应名称 `zhangsan:fix-output-fmt-bug` 的分支(冒号之前是张三的账号),用于修复输出格式化 bug。 - -## 3. 关于 Git Commit -我们参考 [Commitizen](https://github.com/commitizen/cz-cli) 书写 Commit Message。 -``` -注: 如果直接使用 Commitizen 生成 Commit Message,需要注意因为 Commitizen -是开发人员管理 commit 的工具,与项目本身无关联,因此由 Commitizen 生成的中间产物 -(如: node_modules 文件目录)可能没有在项目 .gitignore 文件中。 - -您可以 git add {filename} 选择要提交的文件而忽视中间产物。 -或者您可以向 .gitignore 文件中添加如下内容而自动忽视中间产物: -# commitizen -package.json -package-lock.json -node_modules/* -``` -如果手动编写 Commit Message,我们也建议采用 [Commitizen](https://github.com/commitizen/cz-cli) 的 commit message 格式。 - -``` -{type} ( {component_or_file} ) {a_short_description} - {a_longer_description} - BREAKING CHANGE: {breaking_change_description}. - {linked issue} -``` - -其中主要包括6个字段: - - {type} : 当前 commit 对应的分支的类型。 - - {component_or_file}: 当前 commit 改动的模块或者文件的名称。 - - {a_short_description}: 简短的描述介绍 commit 中的内容。 - - {a_longer_description}: 详细的描述用来介绍 commit 中的内容。 - - {breaking_change_description}: 如果 commit 中包含破环兼容性的改动,需要对兼容性改动产生的影响进行介绍。 - - {linked issue}: 与当前这个 commit 关联的 issue。 - - 其中 {breaking_change_description} 和 {linked issue} 如果 commit 中不包含破坏兼容性的改动和关联的 issue,可以省略。 - - e.g. 张三在分支 `zhangsan:fix-output-fmt-bug` 中创建的 commit。 - ``` - - fix(kclvm-printer): fix an output format bug in kclvm-printer - - There is an output format bug in kclvm-printer because ..., - So, The calling of method "XXX" is replaced by "HHHH"..., - ... - - -- 如果没有破坏兼容性的改动和关联的 issue 可以省略下面内容。 - - BREAKING CHANGE: This change maybe cause ....... - - fix #123 - - ``` - -## 4. 关于 pull request - -在提交一个 PR 之前,可能需要优先考虑以下几个问题: -- 请先查阅已经关闭的 PR ,也许在已经关闭的 PR 中,可能存在已经完成的解决方案。 -- 我们建议在提交变更之前,提交一个对应的 issue 描述变更中将要解决的问题,并将变更对应的 PR 与 issue 关联。 -- 在向我们提交 PR 之后,请签署 [Contributor License Agreement(CLA)](#cla) ,如果拒绝签署,我们将无法接受 PR。 -- 请确保每次改动都创建了一个新的分支,并根据上文中提到的规范为分支命名。 -- 一次 PR 请不要超过两个 commit ,请将多余的 commit 通过 squash 压缩,并根据上文中提到的规范,编写 commit message 。 -- 我们提供了 [PR 模版](https://github.com/KusionStack/.github/blob/main/.github/PULL_REQUEST_TEMPLATE.md),只需要添加模版中要求的内容即可,如果在创建PR时发现没有模版或者模版内容为空,可以通过微信群,钉钉群或者邮件向我们反馈这个问题。 - -我们建议PR的标题与分支名、commit message 风格保持一致: -``` -{type} ( {component_name_or_file_name} ) :{a_short_description} -``` - -e.g. 张三为分支`fix/zhangsan/fix_output_fmt_bug`创建的PR名称。 -``` -fix(kclvm-printer): fix an output format bug in kclvm-printer. -``` - -## 5. 目前 type 支持的类型 -参考[ Angular 规范](https://github.com/angular/angular.js/blob/master/DEVELOPERS.md#-git-commit-guidelines),type 支持类型的类型如下: -``` -- feat: -- 添加了新的功能特性。 -- fix: -- 进行了 Bug 的修复。 -- docs: -- 进行了文档部分的修改。 -- style: -- 对代码格式的修改,并不影响代码的功能,如:删除多余空格,代码缩进等。 -- refactor: -- 在不改变代码功能的基础上对代码进行了的重构。 -- perf: -- 对代码进行了性能优化。 -- test: -- 添加或者调整已有的测试用例。 -- build: -- 对构建系统或者外部依赖库进行了调整。 -- ci: -- 调整了 CI 的配置文件或者脚本。 -- chore: -- 对源代码和测试文件之外其他部分的调整。 -- revert: -- 对 commit 进行回滚。 -``` - -## 6. Contributor License Agreement(CLA) - -在第一次向我们提交 PR 之后,在 PR 中的 CLA 检查将会失败并提示签署 CLA。您可以通过自己的账户之间在 PR 回复 "I have read the CLA Document and I hereby sign the CLA" 表示同意签署 CLA,然后手动重启失败的 CLA 检查 Action 即可。当 PR 被成功合并之后将会被锁定不能再修改。 diff --git a/docs/governance/contribute/tasks.md b/docs/governance/contribute/tasks.md deleted file mode 100644 index 46464dde..00000000 --- a/docs/governance/contribute/tasks.md +++ /dev/null @@ -1,38 +0,0 @@ -# Tasks for newbie - -为了帮助新同学更好的参与 Kusion 开源项目,我们将提供一些新手任务参考思路,帮助大家成为 Kusion Contributor!并为有价值的贡献者颁发 Kusion 贡献者证书和小纪念品。 - -## 1. 完善文档 - -文档虽然重要,但是不能和代码脱节。Kusion 推崇的是 XaC 的万物皆码的哲学:我们不仅仅通过 Git 来管理网站的 Markdown 文档,还通过直接提取 KCL 代码的方式产出文档。目前有以下文档依然需要大量完善,大家可以从以下任务开始: - -- 寻找文档中错别字、不准确文档、过时文档,提交 Issue 和修改补丁 -- 给 [Konfig](https://github.com/KusionStack/konfig) 补全模型和属性文档,增加使用的例子 -- 将已有的中文文档翻译为英文文档 -- 补充更多的 Example - -## 2. 增加多语言绑定 - -为了方便更多的语言集成 Kusion 工具,我们需要给给多的通用编程语言提供 SDK 绑定。 - -- 为 Node.js 提供 SDK -- 为 Java 提供 SDK - - -## 3. 增加新的 Konfig 模型 - -- 针对 Konfig 缺少的场景,补充对应的模型和文档 - -## 4. 完善测试 - -测试是代码改进和演化的基本保障,可以尝试为以下模块提供更多的测试: - -- 给 Konfig 补充测试代码 -- 给 Plugin 补充测试代码 -- 给 KCL OpenAPI 工具补充测试代码 -- 给 KCLVM 补充内部实现模块测试代码 -- 给 KusionCtl 内部模块补充测试代码 - -## 5. 挑战性任务 - -- KCL 语言:语言功能、语言测试、编程框架、多种后端 diff --git a/docs/governance/intro/intro.md b/docs/governance/intro/intro.md deleted file mode 100644 index bd008002..00000000 --- a/docs/governance/intro/intro.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -sidebar_position: 1 ---- -# Community - -Welcome to the [KusionStack](/docs/user_docs/intro/kusion-intro) open source community, everyone's participation is the driving force for the healthy growth of all open source projects! There are many ways to participate in open source. Everyone can create Issues or fix bugs, improve documentation or modify code by submitting PR (Pull Request), or open new feature discussions by submitting KEP, or share stories about the process of evangelism and use of Kusion with surrounding friends. diff --git a/docs/governance/intro/license.md b/docs/governance/intro/license.md deleted file mode 100644 index f1cc7fd7..00000000 --- a/docs/governance/intro/license.md +++ /dev/null @@ -1,210 +0,0 @@ ---- -sidebar_position: 99 ---- -# License - -Kusion Use [Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0) License: - -``` - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2022 The Kusion Authors. All rights reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -``` diff --git a/docs/governance/intro/roadmap.mdx b/docs/governance/intro/roadmap.mdx deleted file mode 100644 index f13610c2..00000000 --- a/docs/governance/intro/roadmap.mdx +++ /dev/null @@ -1,19 +0,0 @@ ---- -sidebar_position: 3 ---- - -# Roadmap - -- 2022.5 - Open Source Code - -## KCLVM Roadmap - -TODO - -## Kusion Roadmap - -TODO - -## IDE Extension - -TODO diff --git a/docs/governance/intro/support.md b/docs/governance/intro/support.md deleted file mode 100644 index 551fe349..00000000 --- a/docs/governance/intro/support.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -sidebar_position: 1 ---- - -# Ask for help - -Kusion has a developer and user community of many enthusiasts. - -On this page, we list the Kusion-related communities you can participate in; see other pages in this section for additional online and offline learning materials. - -Before joining the Kusion community, please read the [Contributor Covenant](https://www.contributor-covenant.org/version/2/0/code_of_conduct/) , which is expected to be followed by all community members. - -## Discussion - -- Submit an issue on Github -- Communicate in the Github discussion group -- Get the latest status through official website, Github, Twitter, Wechat and other accounts - -## New Feature - -Please try to avoid submitting pull requests for new features, we may already have someone working on them, or maybe this feature is already part of our future plans. In conclusion, please contact us before submitting new features! - -## Other - -Please follow Kusion related projects and blogs. - - diff --git a/docs/governance/intro/team.md b/docs/governance/intro/team.md deleted file mode 100644 index ec76442c..00000000 --- a/docs/governance/intro/team.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -sidebar_position: 2 ---- - -# Team - -## 1. Core Team - -The Kusion Core team is committed to the development and improvement of core functions such as KCL language, KusionCtl, Kusion model library, IDE plug-ins, and the Kusion documentation website. Lists the current members of the Kusion team in alphabetical order. - -## 2. Honorary - -The development of Kusion cannot be separated from the people who have contributed so much to it. Thank you. - -## 3. Student - -Some students helped Kusion during school and internships, and contributed excellent features including plugin option verification, migration tools, and Bootstrap themes. - -## 4. Special - diff --git a/docs/governance/release-policy/_category_.json b/docs/governance/release-policy/_category_.json deleted file mode 100644 index bc39d651..00000000 --- a/docs/governance/release-policy/_category_.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "label": "Release Policy", - "position": 3 -} diff --git a/docs/governance/release-policy/index.md b/docs/governance/release-policy/index.md deleted file mode 100644 index 602cc23c..00000000 --- a/docs/governance/release-policy/index.md +++ /dev/null @@ -1,3 +0,0 @@ -# Release Policy - -The release policy defines the semantics of the release, the release process, etc. In order to maximize the concurrent development process, Kusion and KCLVM, etc. adopt independent release strategies. diff --git a/docs/governance/release-policy/kcl_plugin.md b/docs/governance/release-policy/kcl_plugin.md deleted file mode 100644 index eeba36d7..00000000 --- a/docs/governance/release-policy/kcl_plugin.md +++ /dev/null @@ -1,25 +0,0 @@ -# KCL Plugin Release Policy - -KCL 插件的设计初衷只是为了扩展 KCL 语言的功能,其定位并不是完全复刻通用的编程语言全部生态。因此 KCL 插件刻意做了一些必要的限制:首先插件之间不能相互导入;其次在同一个模块中插件不能同名;最后Python实现的插件指南使用标准库和插件框架提供的功能。 - -## 1. 每个插件独立维护版本 - -基于以上的设计背景,同一个 kcl.mod 定义的模块中每个插件是相互独立的,插件之间和其依赖均不会出现依赖冲突的问题。因此,每个 KCL 插件可以独立发布独立维护。 - -## 2. kcl.mod 指定依赖的插件信息 - -kcl.mod 中 `[kcl_plugin]` 字段标注插件信息: - -```toml -# kcl.mod - -[kcl_plugin] -hello = { git = "https://github.com/KusionStack/kcl-plugin.git", path = "hello", branch = "master" } -project_context = { git = "https://github.com/KusionStack/kcl-plugin.git", path = "project_context", version = "0.1.0"} -utils = { path = "${PATH}/plugins/utils" } -``` - -## 3. 插件对 KCLVM 的版本依赖 - -插件本身可以指定依赖的 KCL 语言的版本,相关工具做检查。 - diff --git a/docs/governance/release-policy/kclvm.md b/docs/governance/release-policy/kclvm.md deleted file mode 100644 index 9d9738f6..00000000 --- a/docs/governance/release-policy/kclvm.md +++ /dev/null @@ -1,23 +0,0 @@ -# KCLVM Release Policy - -KCLVM 是 KCL 语言的实现,开源之后开发团队希望采用 [语义化版本](https://semver.org/lang/zh-CN/) 来简化管理。版本格式:主版本号.次版本号.修订号。版本号递增规则如下:主版本号对应不兼容的 API 修改,次版本号对应向下兼容的功能性新增,修订号对应向下兼容的问题修正。其中主版本号和次版本号均包含了不一样的特性统一称之为大版本,补丁修复称之为小版本。 - -总体目标是每个季度发布一个特性增强的大版本,并支持最近发布的两个大版本,根据需要不定期发布其他版本的修订。 - -## 1. 发布流程 - -发布流程如下: - -- master 主干开发,每日产出一个 Nightly 版本,CI 系统进行测试 -- beta 测试分支,经过 6 周后从 Nightly 版本产出一个 Beta 版本 -- stable 稳定分支,经过 6 周后从 Beta 版本产出一个 Stable 版本 -- release-branch.kcl-x.y 发布分支,每个季度从 Stable 版本产出一个 rc 候选版本,并最终发布 -- release-branch.kcl-x.y 分支的 BUG 修复需要合并回 master,然后逐步同步到 beta、stable 分支 - -其中 stable、beta 只是延迟的 master 分支,release-branch.kcl-x.y 发布后将和 master 保存独立。 - -如果本次发布失败,则顺延到下个发布周期。 - -## 2. 发布维护 - -发布次要版本以解决一个或多个没有解决方法的关键问题(通常与稳定性或安全性有关)。版本中包含的唯一代码更改是针对特定关键问题的修复。重要的仅文档更改和安全测试更新也可能包括在内,但仅此而已。一旦 KCL 1.x+2 发布,解决 KCL 1.x 的非安全问题的次要版本就会停止更新。解决 KCL 1.x 安全问题的次要版本在 KCL 1.x+2 发布后停止。 diff --git a/docs/governance/release-policy/kusion.md b/docs/governance/release-policy/kusion.md deleted file mode 100644 index dce9d2ea..00000000 --- a/docs/governance/release-policy/kusion.md +++ /dev/null @@ -1,20 +0,0 @@ -# kusionctl Release Policy - -kusionctl 是基于 KCL 的 DevOps 工具链,开源之后开发团队希望采用 [语义化版本](https://semver.org/lang/zh-CN/) 来简化管理。版本格式:主版本号.次版本号.修订号。版本号递增规则如下:主版本号对应不兼容的 API 修改,次版本号对应向下兼容的功能性新增,修订号对应向下兼容的问题修正。其中主版本号和次版本号均包含了不一样的特性统一称之为大版本,补丁修复称之为小版本。 - -总体目标是每个季度发布一个特性增强的大版本,并支持最近发布的两个大版本,根据需要不定期发布其他版本的修订。 - -## 1. 发布流程 - -发布流程如下: - -- master 主干开发,每日产出一个 Nightly 版本,CI 系统进行测试 -- beta 测试分支,经过 3 周后从 Nightly 版本产出一个 Beta 版本 -- stable 稳定分支,经过 3 周后从 Beta 版本产出一个 Stable 版本 -- rc 发布分支,每个季度从 Stable 版本产出一个 rc 候选版本,并最终发布 - -如果本次发布失败,则顺延到下个发布周期。 - -## 2. 发布维护 - -发布次要版本以解决一个或多个没有解决方法的关键问题(通常与稳定性或安全性有关)。版本中包含的唯一代码更改是针对特定关键问题的修复。重要的仅文档更改和安全测试更新也可能包括在内,但仅此而已。一旦 Kusion 1.x+2 发布,解决 Kusion 1.x 的非安全问题的次要版本就会停止更新。解决 Kusion 1.x 安全问题的次要版本在 Kusion 1.x+2 发布后停止。 diff --git a/docs/karpor/1-getting-started/1-overview.mdx b/docs/karpor/1-getting-started/1-overview.mdx new file mode 100644 index 00000000..8c83972f --- /dev/null +++ b/docs/karpor/1-getting-started/1-overview.mdx @@ -0,0 +1,419 @@ +--- +id: overview +title: Overview +slug: / +--- + +import { + AiOutlineArrowRight, + AiFillCheckCircle, + AiFillCloseCircle, +} from "react-icons/ai"; +import logoImg from "@site/static/karpor/assets/logo/logo-full.png"; +import searchImg from "@site/static/karpor/assets/overview/search.png"; +import insightImg from "@site/static/karpor/assets/overview/insight.png"; +import visionImg from "@site/static/karpor/assets/overview/vision.png"; +import comingSoonImg from "@site/static/karpor/assets/misc/coming-soon.jpeg"; +import KarporButton from "@site/src/components/KarporButton"; +import GithubStar from "@site/src/components/GithubStars"; +import ReactPlayer from "react-player"; +import Typed from "typed.js"; +import clsx from "clsx"; + +export const Feature = ({ imgSrc, title, description, reverse }) => { + const reverseStyle = reverse ? { flexDirection: "row-reverse" } : {}; + return ( + <> +

{title}

+
+
+ +
+
+ {description} +
+
+ + ); +}; + +export const Content = () => { + const karporVsOthers = [ + { + label: "User Interface", + karpor: true, + kubernetesDashboard: true, + labelDesc: "", + }, + { + label: "Multi-Cluster", + karpor: true, + kubernetesDashboard: false, + labelDesc: "Ability to connect to multiple clusters simultaneously", + }, + { + label: "Aggregated Resource View", + karpor: true, + kubernetesDashboard: false, + labelDesc: "Human-friendly view for resources", + }, + { + label: "Security Compliance", + karpor: true, + kubernetesDashboard: false, + labelDesc: "Automatic scanning risk, assessing health score", + }, + { + label: "Resource Relationship Topology", + karpor: true, + kubernetesDashboard: false, + labelDesc: "Insight into the context of resources", + }, + ]; + const h2Style = { + paddingBottom: "14px", + borderBottom: "2px solid #f1f1f1", + fontSize: 28, + }; + const flexDirectionStyle = { + display: "flex", + flexDirection: "column", + alignItems: "center", + }; + // Setup typed animation + const el = React.useRef(null); + React.useEffect(() => { + const typed = new Typed(el.current, { + strings: [ + "Locate resources, for Developers.", + "Explore cluster insights, for Admins.", + "Connect multi-clusters, for Platforms.", + ], + typeSpeed: 40, + backDelay: 1500, + loop: true, + }); + return () => { + // Destroy Typed instance during cleanup to stop animation + typed.destroy(); + }; + }, []); + return ( + <> +
+
+ +
+
+ + +
+
+ Intelligence for Kubernetes ✨ +
+
+ +
+
+
+
+
+

📖 What is Karpor?

+
+ Karpor is Intelligence for Kubernetes. It brings advanced{" "} + 🔍 Search, 💡 Insight and ✨ AI to Kubernetes. It is + essentially a Kubernetes Visualization Tool. With Karpor, you can gain crucial + visibility into your Kubernetes clusters across any clouds. +
+
+ We hope to become a{" "} + + small and beautiful, vendor-neutral, developer-friendly, + community-driven + {" "} + open-source project! 🚀 +
+
+
+
+ +
+
+
+
+

💡 Why Karpor?

+
+ + ⚡️ Automatic Syncing +
+ Automatically synchronize your resources across any clusters + managed by the multi-cloud platform. +
+
+ 🔍 Powerful, flexible queries +
+ Effectively retrieve and locate resources across multi clusters + that you are looking for in a quick and easy way. + + } + /> +
+ + 🔒 Compliance Governance +
+ Understand your compliance status across multiple clusters and + compliance standards. +
+
+ 📊 Resource Topology +
+ Logical and topological views of relevant resources within their + operational context. +
+
+ 📉 Cost Optimization +
+ Coming soon. + + } + /> +
+ + 💬 Natural Language Operations +
+ Interact with Kubernetes using plain language for more + intuitive operations. +
+
+ 📦 Contextual AI Responses +
+ Get smart, contextual assistance that understands your needs. +
+
+ 🤖 AIOps for Kubernetes +
+ Automate and optimize Kubernetes management with AI-powered + insights. + + } + /> +
+
+
+
+
+

🌈 Our Vision

+
+ The increasing complexity of the kubernetes ecosystem is an + undeniable trend that is becoming more and more difficult to + manage. This complexity not only entails a heavier burden on + operations and maintenance but also slows down the adoption of + new technologies by users, limiting their ability to fully + leverage the potential of kubernetes. +
+
+ We wish Karpor to focus on 🔍 search, 📊 insights, + and ✨ AI, to break through the increasingly complex maze of + kubernetes, achieving the following value proposition: +
+
+
+ +
+
+
+

🙌 Karpor vs. Kubernetes Dashboard

+
+
+
+
+ Karpor +
+
+ + Kubernetes Dashboard + +
+
+ {karporVsOthers?.map((item) => { + return ( +
+
+
{item?.label}
+ {item?.labelDesc && ( +
{item?.labelDesc}
+ )} +
+
+ {item?.karpor ? ( + + ) : ( + + )} +
+
+ {item?.kubernetesDashboard ? ( + + ) : ( + + )} +
+
+ ); + })} +
+

🎖️ Open Source Contributors

+
+

Thanks all! 🍻

+

+ Follow{" "} + Contributing Guide, + come and join us! 👇 +

+ +
+

👉 Next Step

+
+ +
+ + ); +}; + + diff --git a/docs/karpor/1-getting-started/2-installation.md b/docs/karpor/1-getting-started/2-installation.md new file mode 100644 index 00000000..9793f7d3 --- /dev/null +++ b/docs/karpor/1-getting-started/2-installation.md @@ -0,0 +1,172 @@ +--- +title: Installation +--- + +## Install with helm + +If you have a kubernetes cluster, helm is the recommended installation method. + +The following tutorial will guide you to install Karpor using Helm, which will install the chart with the release name `karpor-release` in namespace `karpor`. + +### Prerequisites + +* Helm v3+ +* A Kubernetes Cluster (The simplest way is to deploy a kubernetes cluster locally using `kind` or `minikube`) + +### Remote Installation + +First, add the karpor chart repo to your local repository. + +```shell +helm repo add kusionstack https://kusionstack.github.io/charts +helm repo update +``` + +Then you can use the following command to install the latest version of Karpor. + +```shell +helm install karpor-release kusionstack/karpor +``` + +![Install](./assets/2-installation/install.gif) + +**Note** that installing this chart directly means it will use the [default template values](https://github.com/KusionStack/charts/blob/master/charts/karpor/values.yaml) for Karpor. + +You may have to set your specific configurations if it is deployed into a production cluster, or you want to customize the chart configuration, such as `resources`, `replicas`, `port` etc. + +All configurable parameters of the Karpor chart are detailed [here](#chart-parameters). + +```shell +helm install karpor-release kusionstack/karpor --set server.replicas=3 --set syncer.port=7654 +``` + +### Search all available versions + +You can use the following command to view all installable chart versions. + +```shell +helm repo update +helm search repo kusionstack/karpor --versions +``` + +### Upgrade specified version + +You can specify the version to be upgraded through the `--version`. + +```shell +# Upgrade to the latest version. +helm upgrade karpor-release kusionstack/karpor + +# Upgrade to the specified version. +helm upgrade karpor-release kusionstack/karpor --version 1.2.3 +``` + +### Local Installation + +If you have problem connecting to [https://kusionstack.github.io/charts/](https://kusionstack.github.io/charts/) in production, you may need to manually download the chart from [here](https://github.com/KusionStack/charts) and use it to install or upgrade locally. + +```shell +git clone https://github.com/KusionStack/charts.git +helm install karpor-release charts/karpor +helm upgrade karpor-release charts/karpor +``` + +### Uninstall + +To uninstall/delete the `karpor-release` helm release in namespace `karpor`: + +```shell +helm uninstall karpor-release +``` + +### Image Registry Proxy for China + +If you are in China and have problem to pull image from official DockerHub, you can use the registry proxy: + +```shell +helm install karpor-release kusionstack/karpor --set registryProxy=docker.m.daocloud.io +``` + +**NOTE**: The above is just an example, you can replace the value of `registryProxy` as needed. + +### Chart Parameters + +The following table lists the configurable parameters of the chart and their default values. + +#### General Parameters + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| namespace | string | `"karpor"` | Which namespace to be deployed. | +| namespaceEnabled | bool | `true` | Whether to generate namespace. | +| registryProxy | string | `""` | Image registry proxy will be the prefix as all component image. | + +#### Global Parameters + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| global.image.imagePullPolicy | string | `"IfNotPresent"` | Image pull policy to be applied to all Karpor components. | + +#### Karpor Server + +The Karpor Server Component is main backend server. It itself is an `apiserver`, which also provides `/rest-api` to serve Dashboard. + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| server.image.repo | string | `"kusionstack/karpor"` | Repository for Karpor server image. | +| server.image.tag | string | `""` | Tag for Karpor server image. Defaults to the chart's appVersion if not specified. | +| server.name | string | `"karpor-server"` | Component name for karpor server. | +| server.port | int | `7443` | Port for karpor server. | +| server.replicas | int | `1` | The number of karpor server pods to run. | +| server.resources | object | `{"limits":{"cpu":"500m","ephemeral-storage":"10Gi","memory":"1Gi"},"requests":{"cpu":"250m","ephemeral-storage":"2Gi","memory":"256Mi"}}` | Resource limits and requests for the karpor server pods. | +| server.serviceType | string | `"ClusterIP"` | Service type for the karpor server. The available type values list as ["ClusterIP"、"NodePort"、"LoadBalancer"]. | + +#### Karpor Syncer + +The Karpor Syncer Component is independent server to synchronize cluster resources in real-time. + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| syncer.image.repo | string | `"kusionstack/karpor"` | Repository for Karpor syncer image. | +| syncer.image.tag | string | `""` | Tag for Karpor syncer image. Defaults to the chart's appVersion if not specified. | +| syncer.name | string | `"karpor-syncer"` | Component name for Karpor syncer. | +| syncer.port | int | `7443` | Port for Karpor syncer. | +| syncer.replicas | int | `1` | The number of karpor syncer pods to run. | +| syncer.resources | object | `{"limits":{"cpu":"500m","ephemeral-storage":"10Gi","memory":"1Gi"},"requests":{"cpu":"250m","ephemeral-storage":"2Gi","memory":"256Mi"}}` | Resource limits and requests for the karpor syncer pods. | + +#### ElasticSearch + +The ElasticSearch Component to store the synchronized resources and user data. + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| elasticsearch.image.repo | string | `"docker.elastic.co/elasticsearch/elasticsearch"` | Repository for ElasticSearch image. | +| elasticsearch.image.tag | string | `"8.6.2"` | Specific tag for ElasticSearch image. | +| elasticsearch.name | string | `"elasticsearch"` | Component name for ElasticSearch. | +| elasticsearch.port | int | `9200` | Port for ElasticSearch. | +| elasticsearch.replicas | int | `1` | The number of ElasticSearch pods to run. | +| elasticsearch.resources | object | `{"limits":{"cpu":"2","ephemeral-storage":"10Gi","memory":"4Gi"},"requests":{"cpu":"2","ephemeral-storage":"10Gi","memory":"4Gi"}}` | Resource limits and requests for the karpor elasticsearch pods. | + +#### ETCD + +The ETCD Component is the storage of Karpor Server as `apiserver`. + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| etcd.image.repo | string | `"quay.io/coreos/etcd"` | Repository for ETCD image. | +| etcd.image.tag | string | `"v3.5.11"` | Specific tag for ETCD image. | +| etcd.name | string | `"etcd"` | Component name for ETCD. | +| etcd.persistence.accessModes[0] | string | `"ReadWriteOnce"` | | +| etcd.persistence.size | string | `"10Gi"` | | +| etcd.port | int | `2379` | Port for ETCD. | +| etcd.replicas | int | `1` | The number of etcd pods to run. | +| etcd.resources | object | `{"limits":{"cpu":"500m","ephemeral-storage":"10Gi","memory":"1Gi"},"requests":{"cpu":"250m","ephemeral-storage":"2Gi","memory":"256Mi"}}` | Resource limits and requests for the karpor etcd pods. | + +#### Job + +This one-time job is used to generate root certificates and some preliminary work. + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| job.image.repo | string | `"kusionstack/karpor"` | Repository for the Job image. | +| job.image.tag | string | `""` | Tag for Karpor image. Defaults to the chart's appVersion if not specified. | diff --git a/docs/karpor/1-getting-started/3-quick-start.md b/docs/karpor/1-getting-started/3-quick-start.md new file mode 100644 index 00000000..d0179bb2 --- /dev/null +++ b/docs/karpor/1-getting-started/3-quick-start.md @@ -0,0 +1,145 @@ +--- +title: Quick Start +--- +## Prerequisites + +* Ensure [kubectl](https://kubernetes.io/docs/tasks/tools/) is installed. +* Ensure [helm](https://helm.sh/docs/intro/install/) is installed. +* If you do not have a ready-made cluster, you still need a [kind](https://kind.sigs.k8s.io/docs/user/quick-start/#installation/). + +## Create Cluster (Optional) + +First, if you do not have a ready-made cluster, you need to create a kubernetes cluster in your local environment with the `kind` tool. Follow these steps: + +1. Create a cluster. You can create a cluster named `demo-cluster` using the following command: + ```shell + kind create cluster --name demo-cluster + ``` + + This will create a new Kubernetes cluster in your local Docker environment. Wait for a moment until the cluster creation is complete. +2. Verify that the cluster is running properly by executing the command: + ```shell + kubectl cluster-info + ``` + + If everything is set up correctly, you'll see information about your Kubernetes cluster. + +## Installation + +To install Karpor, execute the following command in your terminal: + +```shell +helm repo add kusionstack https://kusionstack.github.io/charts +helm repo update +helm install karpor kusionstack/karpor +``` + +For more installation details, please refer to the [Installation Documentation](2-installation.md). + +![Install](./assets/2-installation/install.gif) + +## Access Karpor Dashboard + +1. Run the following command to access the Karpor service running in the cluster: + ```shell + kubectl -n karpor port-forward service/karpor-server 7443:7443 + ``` + + After executing this command, if you access port 7443 on your local machine, the traffic will be forwarded to port 7443 of the karpor-server service in the Kubernetes cluster. +2. Open your browser and enter the following URL: + ```shell + https://127.0.0.1:7443 + ``` + +This will open the Karpor Web interface. 👇 + +![Open in Browser](./assets/2-installation/open-in-browser.gif) + +Congratulations! 🎉 You have successfully installed Karpor. Now you can start using Karpor to explore and gain insights into resources across multiple clusters. + +## Create Access Token + +Before registering clusters, you need to create an access token to log in to the Karpor Web interface. Here are the brief steps to create a token: + +1. Export the KubeConfig of the Hub Cluster: + +```shell +kubectl get configmap karpor-kubeconfig -n karpor -o go-template='{{.data.config}}' > $HOME/.kube/karpor-hub-cluster.kubeconfig +``` + +2. Create ServiceAccount and ClusterRoleBinding: + +```shell +export KUBECONFIG=$HOME/.kube/karpor-hub-cluster.kubeconfig +kubectl create serviceaccount karpor-admin +kubectl create clusterrolebinding karpor-admin --clusterrole=karpor-admin --serviceaccount=default:karpor-admin +``` + +3. Create token: + +```shell +kubectl create token karpor-admin --duration=1000h +``` + +Copy the generated token, which will be used later to log in to the Karpor Web interface. + +For detailed instructions on creating tokens, please refer to the [How to Create Token](../3-user-guide/1-how-to-create-token.md) documentation. + +## Register Cluster + +To register a new cluster with Karpor, follow these steps: + +1. Log in to the Karpor Web interface using the token created in the previous step. +2. Navigate to the `Cluster Management` section in the Karpor UI. +3. Click on the `Register Cluster` button. +4. Follow the on-screen instructions to complete the registration process. + +5. When registering a cluster, please note the following: + + - The cluster name must be unique and cannot be changed once created. + - Ensure that there is network connectivity between the server address (target cluster address) in the uploaded cluster certificate and Karpor. + - If you deployed Karpor in a local cluster and want to register that local cluster, you need to modify the server address in the cluster certificate to the internal cluster address `https://kubernetes.default.svc.cluster.local:443` to ensure that Karpor can directly access the target cluster. + - If you want to register an EKS cluster, additional configuration of the KubeConfig is required, including adding the `env`, `interactiveMode`, and `provideClusterInfo` fields. For detailed steps, please refer to the "Registering an EKS Cluster" section in the [Multi-cluster Management](../3-user-guide/2-multi-cluster-management.md) documentation. + +6. After completing the above steps, click the `Validate and Submit` button. + +An example of the `Register Cluster` page can be found in the image below: + +![](/karpor/assets/cluster-mng/cluster-mng-register-new-cluster.png) + +For a more detailed explanation of the registration process, refer to the [Multi-cluster management](../3-user-guide/2-multi-cluster-management.md) Documentation. + +## Search Resources + +Karpor provides a powerful search feature that allows you to quickly find resources across the registered clusters. To use this feature: + +1. Go to the `Search` page within the Karpor UI. +2. Enter the search criteria for the resources you are looking for. + +Here is an example of the `Search` page: + +![](/karpor/assets/search/search-auto-complete.png) +![](/karpor/assets/search/search-result.png) + +To learn more about the search capabilities and how to use them effectively, check out the [Search Methods Documentation](../5-references/3-search-methods.md). + +## Gain Insight into Resources + +By clicking on a result from your search, you can delve into the `Insight` page, where you'll be able to investigate risks related to the resource, see a topological view with its relevant resources, and examine its detailed information. + +Here are examples for what you can find on the Insight page: + +![](/karpor/assets/insight/insight-home.png) +![](/karpor/assets/insight/insight-single-issue.png) +![](/karpor/assets/insight/insight-topology.png) + +## Conclusion + +Please note that this guide only provides a quick start for Karpor, and you may need to refer to additional documentations and resources to configure and use other features. + +If you have any questions or concerns, check out the official documentation of Karpor or seek relevant support. + +## Next Step + +- Learn Karpor's [Architecture](../concepts/architecture) and [Glossary](../concepts/glossary). +- View [User Guide](../user-guide/multi-cluster-management) to look on more of what you can achieve with Karpor. diff --git a/docs/karpor/1-getting-started/_category_.json b/docs/karpor/1-getting-started/_category_.json new file mode 100644 index 00000000..41f4c00e --- /dev/null +++ b/docs/karpor/1-getting-started/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Getting Started" +} diff --git a/docs/karpor/1-getting-started/assets/2-installation/install.gif b/docs/karpor/1-getting-started/assets/2-installation/install.gif new file mode 100644 index 00000000..68889793 Binary files /dev/null and b/docs/karpor/1-getting-started/assets/2-installation/install.gif differ diff --git a/docs/karpor/1-getting-started/assets/2-installation/open-in-browser.gif b/docs/karpor/1-getting-started/assets/2-installation/open-in-browser.gif new file mode 100644 index 00000000..00adfb18 Binary files /dev/null and b/docs/karpor/1-getting-started/assets/2-installation/open-in-browser.gif differ diff --git a/docs/karpor/2-concepts/1-architecture.md b/docs/karpor/2-concepts/1-architecture.md new file mode 100644 index 00000000..c53e8491 --- /dev/null +++ b/docs/karpor/2-concepts/1-architecture.md @@ -0,0 +1,24 @@ +--- +title: Architecture +--- +![](assets/1-architecture/architecture.png) + +## Components + +- `Dashboard`: Web UI for Karpor. +- `Server`: Main Backend Server for Karpor. +- `Syncer`: Independent Server to synchronize cluster resources in real-time. +- `Storage`: Storage Backend to store the synchronized resources and user data. + +## How Karpor Works + +1. After installation, users can register clusters of interest into Karpor. +2. The Syncer runs and automatically synchronizes the resources of interest from the cluster to Storage. It also ensures the real-time changes to the resources are automatically sync-ed to Karpor Storage. +3. When a user wishes to locate specific resource(s), a search query can be typed into the search box in the Dashboard. The Dashboard interacts with the search endpoint of the Server. The search module within the Server parses the search query, searches for relevant resources in Storage, and returns the results to the Dashboard. +4. Upon clicking a search result, the user is directed to a resource insight page. The Dashboard calls the insight endpoint of the Server, where the Server's insight module performs a static scan of the resource, generates issue reports, and locates its relevant resources to draw a resource topology map with all of its parents and children. +5. The insight page also applies to groups of resources, such as all resources in a cluster, a Group-Version-Kind combination, a namespace or a custom-defined resource group. + +## Next Step + +- Learn Karpor's [Glossary](../concepts/glossary). +- View [User Guide](../user-guide/multi-cluster-management) to look on more of what you can achieve with Karpor. diff --git a/docs/karpor/2-concepts/3-glossary.md b/docs/karpor/2-concepts/3-glossary.md new file mode 100644 index 00000000..780e374b --- /dev/null +++ b/docs/karpor/2-concepts/3-glossary.md @@ -0,0 +1,57 @@ +--- +title: Glossary +--- +## Cluster + +Equivalent to the concept of a cluster in `Kubernetes`, such as a cluster named `democluster`. + +`Karpor` can manage multiple clusters, including cluster registration, certificate rotation, generating and viewing insights, and other operations through a Dashboard. It also supports accessing any managed cluster using a unified certificate issued by `Karpor` through command-line tools such as `kubectl` and `kubectx`. + +For more details, please refer to the best practice: [One Pass with Proxy](../3-user-guide/5-best-production-practices/1-one-pass-with-proxy.md). + +## Hub Cluster + +Cluster that manages other clusters. Since Karpor itself is also a Kubernetes Apiserver, we have registered some custom resources in this special cluster to manage cluster metadata, resource recycling strategies, and so on. We refer to this special cluster as the Hub Cluster, distinguishing it from the hosted user clusters. + +## Managed Cluster + +It generally refers to the clusters managed by the Hub Cluster, which are typically the user clusters hosted in Karpor. + +## Resource + +Equivalent to the resource concept in `Kubernetes`, such as a `Deployment` named `mockDeployment`. + +`Karpor` performs real-time synchronization, search, and insights on resources within the managed clusters. A resource is the object with the smallest granularity for searching and insights in `Karpor`. + +## Resource Group + +**A resource group is a logical organizational structure** used to combine related `Kubernetes` resources for a more intuitive view, search, and insight experience. For example, an `Application` named `mockapp` resource group can be created to includes a `Namespace`, a `Deployment`, and multiple `Pods`, all with a specific label such as `app.kubernetes.io/name: mockapp`. + +## Resource Group Rule + +**A resource group rule is a set of conditions** that groups specific resources into appropriate resource groups. These rules aim to organize resources into logical units based on properties such as `annotations`, `labels`, `namespace`, and so on. For example, to define an Application resource group rule, you can specify the `app.kubernetes.io/name` annotation as a grouping condition. + +`Karpor` has a preset resource group rule - `Namespace` - as well as custom resource group rules. + +![](assets/3-glossary/image-20240326171327110.png) + +## Topology + +In `Karpor`, the topology refers to the **relations and dependencies between relevant resources within a given resource group**. Viewing and understanding the interior structure of a resource group is made easier with a visual topology diagram, which is helpful for troubleshooting and locating issues. + +## Audit + +Audit refers to **performing a compliance scan on all resources within a given resource group**. The goal is to help users discover potential risks. The scanning tools and rules used for the audit are currently built into the system, but we will support customization in the future. + +## Issue + +**The output of the audit is referred to as issues**. If there are no problems with the scanned object, the audit results will be empty. Otherwise, all identified risks will be categorized by their risk level and displayed, including descriptions of each risk, associated resources, etc., guiding users to fix the issues, ensure the security and compliance of the cluster resources. + +## Score + +The score is used to reflect the **overall health status of a resource group or a resource**, reminding users to take timely adjustments and measures. The health score is calculated based on the resource group's audit results. The factors that impact the score include: **risk level**, **number of risks**, and **total number of resources**. + +## Next Step + +- Learn Karpor's [Architecture](../concepts/architecture). +- View [User Guide](../user-guide/multi-cluster-management) to look on more of what you can achieve with Karpor. diff --git a/docs/karpor/2-concepts/_category_.json b/docs/karpor/2-concepts/_category_.json new file mode 100644 index 00000000..bccddbf1 --- /dev/null +++ b/docs/karpor/2-concepts/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Concepts" +} diff --git a/docs/karpor/2-concepts/assets/1-architecture/architecture.png b/docs/karpor/2-concepts/assets/1-architecture/architecture.png new file mode 100644 index 00000000..afec9346 Binary files /dev/null and b/docs/karpor/2-concepts/assets/1-architecture/architecture.png differ diff --git a/docs/karpor/2-concepts/assets/3-glossary/image-20240326171327110.png b/docs/karpor/2-concepts/assets/3-glossary/image-20240326171327110.png new file mode 100644 index 00000000..f5673eb8 Binary files /dev/null and b/docs/karpor/2-concepts/assets/3-glossary/image-20240326171327110.png differ diff --git a/docs/karpor/3-user-guide/1-how-to-create-token.md b/docs/karpor/3-user-guide/1-how-to-create-token.md new file mode 100644 index 00000000..3e2ef484 --- /dev/null +++ b/docs/karpor/3-user-guide/1-how-to-create-token.md @@ -0,0 +1,83 @@ +--- +title: How to Create Token +--- +In this document, you will learn how to use a token to access the Karpor dashboard. + +[Hub Cluster](../2-concepts/3-glossary.md#hub-cluster) adopts the same Role-Based Access Control (RBAC) mechanism as Kubernetes. This means that in order to access the Hub Cluster, users need to create a ClusterRole, ServiceAccount, and the corresponding ClusterRoleBinding in the Hub Cluster to bind the two. To enhance user experience, we have preset two ClusterRoles: karpor-admin and karpor-guest. The karpor-admin role has permissions to perform all actions on the dashboard, including but not limited to adding or deleting clusters, creating resource groups, etc., while the karpor-guest role is limited to view-only actions on the dashboard. As users gain a deeper understanding of Karpor, they can create additional ClusterRoles based on their needs to achieve more granular permission management. + +## Exporting the KubeConfig for the Hub Cluster + +Since the Hub Cluster requires a KubeConfig for authentication, you can export the KubeConfig to access the Hub Cluster using the following command. +```shell +# The following operation is performed in the Kubernetes cluster where Karpor is installed +kubectl get configmap karpor-kubeconfig -n karpor -o go-template='{{.data.config}}' > $HOME/.kube/karpor-hub-cluster.kubeconfig +``` + +**Note**: Please ensure that the server address in the Hub Cluster's KubeConfig is accessible from your local machine. The default address is the internal cluster address (https://karpor-server.karpor.svc:7443), which cannot be directly connected from local. If you deployed Karpor in a local cluster, you need to forward the karpor-server service to local port 7443 and change the server address to `https://127.0.0.1:7443`. + +You can use the following sed command to change the access address in the Hub Cluster certificate to the local address: + +For MacOS/BSD systems (need an extra `''` after `-i`): +```shell +sed -i '' 's/karpor-server.karpor.svc/127.0.0.1/g' $HOME/.kube/karpor-hub-cluster.kubeconfig +``` + +For Linux/GNU systems (only `-i`): +```shell +sed -i 's/karpor-server.karpor.svc/127.0.0.1/g' $HOME/.kube/karpor-hub-cluster.kubeconfig +``` + +For Windows systems: +Please modify the server address manually in the kubeconfig file. + +## Forward the Services of the Hub Cluster to the Local Machine + +In this section, we assume that you have deployed Karpor in a local cluster. + +As mentioned in the previous section, to access the Hub Cluster locally, you need to forward the karpor-server service to your local machine. If you have used other methods for forwarding, you can skip this step. Here, we will use a simple port-forwarding method. Open another terminal and run: + +```shell +# The following operation is performed in the Kubernetes cluster where Karpor is installed +kubectl -n karpor port-forward svc/karpor-server 7443:7443 +``` + +## Create ServiceAccount and ClusterRoleBinding for Your Users + +This section will guide you on how to create karpor-admin and karpor-guest users in the Hub Cluster and assign the corresponding ClusterRoleBinding to them. Here are the specific steps: + +First, specify the target cluster for kubectl to connect to as the Hub Cluster: +```shell +export KUBECONFIG=$HOME/.kube/karpor-hub-cluster.kubeconfig +``` + +Then, we will create two common identities: administrator (karpor-admin) and guest (karpor-guest). This process includes creating ServiceAccounts and binding them to the corresponding ClusterRoles: + +```shell +kubectl create serviceaccount karpor-admin +kubectl create clusterrolebinding karpor-admin --clusterrole=karpor-admin --serviceaccount=default:karpor-admin +kubectl create serviceaccount karpor-guest +kubectl create clusterrolebinding karpor-guest --clusterrole=karpor-guest --serviceaccount=default:karpor-guest +``` + +## Create Tokens for Your Users + +The following operations need to be performed in the Hub Cluster. Please ensure that kubectl is correctly set to connect to the Hub Cluster: +```shell +export KUBECONFIG=$HOME/.kube/karpor-hub-cluster.kubeconfig +``` + +By default, the validity period of a token is 1 hour. If you need a long-term token, you can specify the expiration time when generating the token. For example: + +```shell +kubectl create token karpor-admin --duration=1000h +``` + +By default, the maximum validity period of the token is 8760 hours (1 year). If you need to modify this maximum validity period, you can add `--service-account-max-token-expiration={MAX_EXPIRATION:h/m/s}` to the startup parameters of the karpor-server. + +**Note**: Creating a token requires kubectl version 1.25.0 or higher. + +## Start Using Karpor Safely + +Copy the token you just generated and paste it into the token input box on the Karpor dashboard, then click login. + +Start your Karpor journey in a secure environment! diff --git a/docs/karpor/3-user-guide/2-multi-cluster-management.md b/docs/karpor/3-user-guide/2-multi-cluster-management.md new file mode 100644 index 00000000..c41cfec5 --- /dev/null +++ b/docs/karpor/3-user-guide/2-multi-cluster-management.md @@ -0,0 +1,93 @@ +--- +title: Multi-Cluster Management +--- +Multi-cluster management is the entrance to register clusters into Karpor, enabling search and insight capabilities across a large number of clusters. + +## Register Cluster + +1. Click the Cluster Management Tab. +2. Click the Register Cluster button. + ![](/karpor/assets/cluster-mng/cluster-mng-empty.png) +3. Add the cluster name. The cluster name must be unique and CANNOT be altered once created. +4. Upload the cluster's KubeConfig file. One with read permission is sufficient. +5. Click the Verify and Submit button. + ![](/karpor/assets/cluster-mng/cluster-mng-register-new-cluster.png) +6. Once verified, the cluster will be added under the Cluster Management page + ![](/karpor/assets/cluster-mng/cluster-mng-register-success.png) + +**Note**: Please ensure network connectivity between the server address (target cluster address) in the uploaded cluster certificate and Karpor. For example, if you have deployed Karpor in a local cluster and want to register that local cluster, you need to modify the server address in the cluster certificate to the internal cluster address `https://kubernetes.default.svc.cluster.local:443` to ensure that Karpor can directly access the target cluster. + +### Register EKS Cluster + +If you want to register an EKS cluster, you need to perform some additional operations on the KubeConfig: + +1. Export the KubeConfig for the EKS cluster. For example, you can obtain the KubeConfig for the specified cluster using the following AWS command: + +```shell +aws eks --region update-kubeconfig --name --kubeconfig= +``` + +2. Add the fields `env`, `interactiveMode`, and `provideClusterInfo` to the `users/exec` section of the exported KubeConfig file. You can refer to the following KubeConfig structure: + +```yaml +apiVersion: v1 +clusters: +- cluster: + certificate-authority-data: CA + server: SERVER + name: CLUSTER +contexts: +- context: + cluster: CLUSTER + user: USER + name: CONTEXT +current-context: CONTEXT +kind: Config +preferences: {} +users: +- name: USER + user: + exec: + apiVersion: client.authentication.k8s.io/v1beta1 + args: + - --region + - ap-southeast-1 + - eks + - get-token + - --cluster-name + - mycluster3 + - --output + - json + command: aws + ### The following fields need to be added to the KubeConfig. + env: + - name: AWS_ACCESS_KEY_ID + value: + - name: AWS_SECRET_ACCESS_KEY + value: + - name: AWS_DEFAULT_REGION + value: + - name: AWS_DEFAULT_OUTPUT + value: json + interactiveMode: IfAvailable + provideClusterInfo: false +``` + +3. Use the modified KubeConfig in [Register Cluster](#register-cluster). + +## Edit Cluster + +The Edit button allows for modifications to the Display Name and Description, thus altering how the cluster's name and description appear on the Dashboard. +![](/karpor/assets/cluster-mng/cluster-mng-edit-cluster.png) + +## Rotate Certificate + +When the KubeConfig expires, you can update the certificate by clicking Rotate Certificate. +![](/karpor/assets/cluster-mng/cluster-mng-rotate-cluster-1.png) +![](/karpor/assets/cluster-mng/cluster-mng-rotate-cluster-2.png) +![](/karpor/assets/cluster-mng/cluster-mng-rotate-cluster-3.png) + +## Remove Cluster + +The delete button facilitates the removal of a registered cluster. +![](/karpor/assets/cluster-mng/cluster-mng-delete-cluster.png) diff --git a/docs/karpor/3-user-guide/3-search.md b/docs/karpor/3-user-guide/3-search.md new file mode 100644 index 00000000..d51f9588 --- /dev/null +++ b/docs/karpor/3-user-guide/3-search.md @@ -0,0 +1,47 @@ +--- +title: How to Search +--- +Within this section, we will explore how to perform multi-cluster resource searches using Karpor, with this guide being done entirely through the Dashboard. + +We support three methods of search: + +- **Search by SQL**: Perform resource searches using SQL query language. +- **Search by DSL**: Conduct resource searches through `Karpor`'s Domain Specific Language (DSL). +- **Search by Natural Language**: Using natural language for resource search. + +## Search by SQL + +Karpor offers a nifty SQL query feature that allows you to search and filter all Kubernetes resources within managed clusters using familiar SQL syntax and provides targeted optimizations and enhancements for multi-cluster resource searches. + +SQL is one of the easily accessible skills for practitioners in the software engineering industry, theoretically making the learning curve quite low. As such, this search method is prepared for you! It is particularly well-suited for beginners to Karpor. + +Below are the steps to use Search by SQL: + +1. **Enter the Search page**: We designed the homepage as the entry point for search, so opening `Karpor`'s Web UI immediately presents you with the search page. + ![](/karpor/assets/search/search-home.png) +2. **Compose SQL query statements**: Write your query statement using SQL syntax, specifying the cluster name, resource type, conditions, and filters you wish to search for. Additionally, if you enter a keyword and press a space, the search box will pop up with a dropdown with auto-completion, suggesting possible keywords you can type next. + ![](/karpor/assets/search/search-auto-complete.png) +3. **Execute the query**: Click the 'search' button to execute the query and be sent to the search results page. Karpor will return a list of resources that match the SQL query. + ![](/karpor/assets/search/search-result.png) +4. **Advanced features**: Utilize our built-in advanced SQL syntax, such as sorting, full-text search, etc., to refine your search further. For details, please refer to: [Search Methodology Documentation](../5-references/3-search-methods.md). + +## Search by DSL + +Coming soon. 🚧 + +## Search by Natural Language + +Although the SQL search function we currently offer does not require additional learning as many engineers already have SQL knowledge, it is clear that the most intuitive and lowest learning threshold search method is to use the user's native language - natural language. + +Therefore, we offers search by natural language feature for Kubernetes resources in Karpor. + +Below are the steps to use Search by Natural Language: + +1. **Enter the Search page**: We designed the homepage as the entry point for search, so opening `Karpor`'s Web UI immediately presents you with the search page. Then we can choose `Search By Natural Language`. + ![](/karpor/assets/search/search-home-natural-language.png) +2. **Compose natural language query statements**:Write your query statement using natural language syntax, specifying the cluster name, resource type, conditions, and filters you wish to search for. + ![](/karpor/assets/search/search-by-natural-language.png) +3. **Execute the query**:Click the 'search' button to execute the query and be sent to the search results page. Karpor will return a list of resources that match the natural language query. + ![](/karpor/assets/search/search-by-natural-language-result.png) +4. **Search tips**:We provide tips for incomplete or haphazard natural language query. +5. **Second search**:Our natural language query is converted to SQL, which the user can modify to start the search again. \ No newline at end of file diff --git a/docs/karpor/3-user-guide/4-insight/1-inspecting-any-resource-group-and-resource.md b/docs/karpor/3-user-guide/4-insight/1-inspecting-any-resource-group-and-resource.md new file mode 100644 index 00000000..6632f8e9 --- /dev/null +++ b/docs/karpor/3-user-guide/4-insight/1-inspecting-any-resource-group-and-resource.md @@ -0,0 +1,27 @@ +--- +title: Inspecting Any Resource Group and Resource +--- +In this part, we will explain in detail through clear steps and examples how to use Karpor to inspect any resource group or resource. + +If you are not familiar with relevant concepts, you can refer to the [Glossary](../../2-concepts/3-glossary.md) section. + +## Inspecting Specific Resources + +1. Search for the resource you are interested in: + ![](/karpor/assets/search/search-home.png) +2. On the search results page, all resources filtered by the criteria will be listed: + ![](/karpor/assets/search/search-result.png) +3. Click on any resource name to jump to that resource's insight page: + ![](/karpor/assets/insight/insight-home.png) + +## Inspecting Specific Resource Groups + +You may notice that in each search result entry, tags for `Cluster`, `Kind`, `Namespace`, etc., of the resource are listed. Please note that these tags are **hyperlinks**, which we refer to as "**anchor points**". These represent the links to a particular resource group or a resource. By clicking on these **anchor points**, you can quickly jump to the insight page of that resource group or resource. + +![](/karpor/assets/search/search-result.png) + +## Flexible Switching Between Resource Groups/Resources + +In fact, besides the tags in the mentioned search results, any resource/resource group names you see on any page can be re-directed to as **anchor points**, which serve like space-time wormholes, allowing you to traverse back and forth through any dimension until you find the resources you are searching for. Both search and anchor points are means to expedite the retrieval, which are key features of Karpor as a Kubernetes Explorer. + +![](/karpor/assets/insight/insight-breadcrumbs.png) diff --git a/docs/karpor/3-user-guide/4-insight/2-custom-resource-group.md b/docs/karpor/3-user-guide/4-insight/2-custom-resource-group.md new file mode 100644 index 00000000..2f22fb79 --- /dev/null +++ b/docs/karpor/3-user-guide/4-insight/2-custom-resource-group.md @@ -0,0 +1,92 @@ +--- +title: Custom Resource Group +--- +## Creating Custom Resource Group + +This section will focus on how to create custom resource group within Karpor. Through custom resource group, you can flexibly manage and organize resources in Karpor according to your own needs and logical concepts. We will guide you step by step to create and define custom resource group and show you how to use these groups for resource insight and management. + +If you're not familiar with **Resource Group** and **Resource Group Rule** related concepts, you can refer to the [Glossary](../../2-concepts/3-glossary.md) section. + +**Let's assume** that within your organization or company, there is a concept of `application unit` that represent **all resources of an application in a certain environment**. + +We mark the **name and environment of the application in the label**. For example, the following is the `application unit` of `mock-apple` in the `production environment`: + +```yaml +apiVersion: v1 +kind: Namespace +metadata: + labels: + app.kubernetes.io/name: mock-apple + name: mock-apple +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/environment: prod + app.kubernetes.io/name: mock-apple +spec: + replicas: 3 + selector: + matchLabels: + app.kubernetes.io/environment: prod + app.kubernetes.io/name: mock-apple + template: + metadata: + labels: + app.kubernetes.io/environment: prod + app.kubernetes.io/name: mock-apple + fruit: apple + spec: + containers: + - image: nginx:latest + name: mock-container + dnsPolicy: ClusterFirst + restartPolicy: Always +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/environment: prod + app.kubernetes.io/name: mock-apple + name: mock-service-apple-prod + namespace: mock-apple +spec: + ports: + - port: 80 + protocol: TCP + targetPort: 80 + selector: + app.kubernetes.io/environment: prod + app.kubernetes.io/name: mock-apple + type: ClusterIP +``` + +Now, we will create a custom `resource group rule` called the `application unit` by following the steps below. It will classify all resources in the cluster according to the rules specified by the user and list all `resource groups` that comply with the rules. + +1. Click on the Insight tab to enter the insight homepage. +2. At the bottom of the page, you will see a default resource group rule `namespace`, which is a single rule classified by a namespace. + ![](/karpor/assets/insight/insight-homepage.png) +3. Click on the create button + of the resource group and fill in the **basic information and classification rules** of the `application unit` in the pop-up window. + ![](/karpor/assets/insight/insight-create-app-resource-group-rule.png) +4. Click on the Submit button, then click on the newly appearing application unit tab to list all application units. + ![](/karpor/assets/insight/insight-list-app-resource-groups.png) +5. You can enter keywords in the search box to quickly find the `application unit` of `mock-apple` in `production`. + ![](/karpor/assets/insight/insight-search-app-resource-group.png) +6. You can click the View button on a resource group card to jump to the corresponding `resource group insight page` and view aggregated information such as all resources, topology relationships, compliance reports, etc. of a certain application unit. +7. If necessary, you can also use the same steps to create an `environment resource group`. + ![](/karpor/assets/insight/insight-create-env-resource-group-rule.png) + ![](/karpor/assets/insight/insight-list-env-resource-groups.png) + +## Edit Custom Resource Group + +You can click the button on the right side of the custom resource group tab to modify basic information and classification rules in the pop-up window. + +![](/karpor/assets/insight/insight-edit-env-resource-group.png) + +## Delete Custom Resource Group + +You can click the button on the right side of the custom resource group tab, then click on the Delete to delete current resource group rule in the pop-up window. + +![](/karpor/assets/insight/insight-delete-env-resource-group.png) diff --git a/docs/karpor/3-user-guide/4-insight/3-summary.md b/docs/karpor/3-user-guide/4-insight/3-summary.md new file mode 100644 index 00000000..1f3970f7 --- /dev/null +++ b/docs/karpor/3-user-guide/4-insight/3-summary.md @@ -0,0 +1,22 @@ +--- +title: Summary +--- +In this section, we will learn about the `summary card` on the Karpor insight page, which are used to quickly view and understand key metrics for the current resource group or resource. + +Under different resource groups, the content displayed by the `summary card` may also vary. + +If you are on: + +1. **Resource Group Insight Page**: + 1. **Cluster Insight Page**, the summary card shows the **Node, Pod numbers, CPU, memory capacity, and Kubernetes version of the cluster**. + ![](/karpor/assets/insight/insight-summary-cluster.png) + 2. **Resource Kind Insight Page**, the summary card shows the **affiliated cluster, GVK (Group Version Kind) information, and the number of that type of resource under the current cluster**. + ![](/karpor/assets/insight/insight-summary-kind.png) + 3. **Namespace Insight Page**, the summary card shows the **affiliated cluster, namespace, and the most abundant resource types under the current namespace.** + ![](/karpor/assets/insight/insight-summary-namespace.png) + 4. **Custom Resource Group Insight Page**, the summary card shows the **key-value of each rule, and several resource statistics under the current resource group.** + ![](/karpor/assets/insight/insight-summary-custom-resource-group.png) +2. **Resource Insight Page**, the summary card shows the **current resource's name, GVK information, affiliated cluster, and namespace.** + ![](/karpor/assets/insight/insight-summary-resource.png) + +⚠️ **Attention**: No matter which resource group insight page you are on, the summary card will always display a health score, calculated based on the risk compliance status of the subject. diff --git a/docs/karpor/3-user-guide/4-insight/4-compliance-report.md b/docs/karpor/3-user-guide/4-insight/4-compliance-report.md new file mode 100644 index 00000000..1c714804 --- /dev/null +++ b/docs/karpor/3-user-guide/4-insight/4-compliance-report.md @@ -0,0 +1,16 @@ +--- +title: Compliance Report +--- +This section will introduce the compliance scan feature, primarily used to detect and assess whether all resources in the current resource or resource group comply with specific compliance standards and security policies. In this section, you will understand how to effectively utilize the compliance scan feature to ensure the security and compliance of the cluster and resources. + +If you're not familiar with **Compliance Report** or **Risk** related concepts, you can refer to the [Glossary](../../2-concepts/3-glossary.md) section. + +1. Follow the guidance on [Inspecting Any Resource Group and Resource](#inspecting-any-resource-group-and-resource) and resource to navigate to the insights page of a particular resource group/resource. +2. You can see the **Compliance Report** card of the resource. + ![](/karpor/assets/insight/insight-home.png) +3. This card displays the **Risk** identified during the scan of the current resource or all the resources under the resource group, categorized by risk level. Under each risk level tag, risks are sorted from highest to lowest occurrence. Each risk entry shows the title, description, number of occurrences, and the scanning tool that discovered the issue. +4. Clicking on a specific risk will display a popup with the details of the risk. + ![](/karpor/assets/insight/insight-single-issue.png) +5. Click on View All Risks, and a drawer will pop out listing all the risks. Here, you can search, categorize, paginate, etc + ![](/karpor/assets/insight/insight-all-issues.png) +6. Once you have resolved a risk following its indications, you can click the [Rescan] button, which will trigger a comprehensive compliance scan of all resources under the resource group. The Dashboard will display the new results once the scan is completed. diff --git a/docs/karpor/3-user-guide/4-insight/5-topology.md b/docs/karpor/3-user-guide/4-insight/5-topology.md new file mode 100644 index 00000000..73a4ef79 --- /dev/null +++ b/docs/karpor/3-user-guide/4-insight/5-topology.md @@ -0,0 +1,19 @@ +--- +title: Topology +--- +## Topology + +In this section, we will explore the topology feature in Karpor. The topology view will help you more intuitively understand the relationships and dependencies among various resources in your cluster. Here's how to use the topology view. + +1. Follow the guidance on [Inspecting Any Resource Group and Resource](#inspecting-any-resource-group-and-resource) to navigate to the insights page of a particular resource group/resource. +2. At the bottom of the page, you can see the resource topology map. + ![](/karpor/assets/insight/insight-topology.png) +3. Depending on the current page: + 1. Resource Insights Page: + 1. The map will display relevant upstream and downstream resources related to the current resource. For example, if the current resource is a Deployment, the topology map will show the ReplicaSet under the Deployment and the Pods under the ReplicaSet. + ![](/karpor/assets/insight/insight-topology-example.png) + 2. Clicking on a node in the resource topology map is equivalent to clicking on an anchor of a specific resource, which will directly navigate to the insights page of that resource. + 2. Resource Group Insights Page: + 1. The map will intuitively show the quantity and relationship of all types of resources under the current resource group. + 2. Clicking on a node in the resource topology map is equivalent to clicking on a resource type, and the list below will refresh with all the resources under a specific type within the current resource group. + ![](/karpor/assets/insight/insight-linkage.png) diff --git a/docs/karpor/3-user-guide/4-insight/_category_.json b/docs/karpor/3-user-guide/4-insight/_category_.json new file mode 100644 index 00000000..c39e5397 --- /dev/null +++ b/docs/karpor/3-user-guide/4-insight/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "How to Insight" +} diff --git a/docs/karpor/3-user-guide/4-insight/index.md b/docs/karpor/3-user-guide/4-insight/index.md new file mode 100644 index 00000000..0bdb110d --- /dev/null +++ b/docs/karpor/3-user-guide/4-insight/index.md @@ -0,0 +1,6 @@ +--- +title: How to Insight +--- +In this section, we will introduce how to gain comprehensive insights into the resources within a cluster using Karpor. You can access the Insight page in various ways and easily toggle between insight pages for different scopes (such as Cluster, Kind, Namespace or individual Resource). If there are domain-specific logical scopes within your current organization, you can even customize resource groups (such as Application, Environment, etc.) by setting resource group rules. We also provide functionality to gain insights into these custom resource groups. + +This guide will be entirely operated from the Karpor Dashboard. diff --git a/docs/karpor/3-user-guide/5-best-production-practices/1-one-pass-with-proxy.md b/docs/karpor/3-user-guide/5-best-production-practices/1-one-pass-with-proxy.md new file mode 100644 index 00000000..cb8cb78f --- /dev/null +++ b/docs/karpor/3-user-guide/5-best-production-practices/1-one-pass-with-proxy.md @@ -0,0 +1,42 @@ +--- +title: One-Pass with Proxy +--- +## Challenges and Demands + +### The Scale of Multicluster at a Grand Scale + +In June 2014, Kubernetes, born from Google's internal Borg project, made a striking debut. Endorsed by tech giants and aided by a thriving open-source community, it gradually became the de facto standard in the container orchestration field. As companies began to deploy Kubernetes in production environments, a single Kubernetes cluster could no longer meet the increasingly complex demands internally. It's common for the number of nodes in a single cluster to exceed the community-recommended limit (5,000), making the expansion into a multicluster configuration a natural choice. + +### The Basic Needs from Multicluster Accessors + +With the thriving development of multiclusters, various platforms may need to access resources across different clusters, requiring access to each cluster's KubeConfig. + +As the number of users and clusters increases, cluster administrators face significant time costs: If there are `M` clusters and `N` users, the time complexity for managing KubeConfig becomes `O(M*N)`. Moreover, users need to switch between different KubeConfigs when accessing different clusters, and the corresponding permissions for KubeConfigs vary across clusters, undoubtedly adding to the complexity of use. + +![Direct Connection: Users need to maintain multiple KubeConfigs](assets/1-one-pass-with-proxy/image-20240326163622363.png) + +Under these circumstances, is there a method to conveniently access resources in different clusters without maintaining a large number of KubeConfigs and managing various users' permissions across clusters? Moreover, this method should ideally be cloud-native, accessible through kubectl and Kubernetes' official client, to reduce the cost of transitioning to this method. The emergence of `Karpor` is to solve these problems. + +## The Idea of A "One-Pass Access" + +We developed `Karpor`, an open-source project. While serving as a Kubernetes Explorer with unique advantages in searching and insight into cluster resources, its foundational multicluster management component, featuring cluster certificate issuance and multicluster request proxying, makes it highly suitable as a unified access point for platforms to multiple clusters. This component supports forwarding user requests to designated clusters in a cloud-native manner, allowing users to maintain a single set of KubeConfigs to access different clusters, making multicluster access as simple as accessing a single cluster. So, how does it work? Below, we introduce `Karpor`'s architecture and functionality. + +![Using Multi-cluster Gateway: Users only need to maintain a single set of KubeConfigs](assets/1-one-pass-with-proxy/image-20240326164141400.png) + +### Multi-cluster Request Routing and Proxy + +`Karpor` includes an application layer gateway capable of forwarding any Kubernetes-style request to a specified Kubernetes cluster. `Karpor` is also developed based on the Kubernetes framework as a kube-apiserver, which can operate independently or as an extension to an existing kube-apiserver. `Karpor` supports handling two types of extended resources: `Cluster` and `Cluster/Proxy`, the former for storing cluster information and the latter for forwarding user requests to a specific cluster. Users can access through the Kubernetes official CLI (`kubectl`) or SDK (`client-go`, `client-java`, etc.). + +`Karpor` proxies all access to `Cluster/Proxy` subresources to the target cluster. For example, to retrieve Pod information from the `Cluster1` cluster, users need to send the `GET /apis/kusionstack.io/Cluster/cluster1/proxy/api/v1/pods` request to `Karpor`. `Karpor` will generate a KubeConfig from the `Cluster/Cluster1` resource for accessing the cluster and proxy the `/api/v1/pods` request to the `Cluster1` cluster. + +![Accessing any managed cluster with kubectl & karpor certificate](assets/1-one-pass-with-proxy/image-20240326165247891.png) + +### Supporting All Kubernetes Native Requests + +`Karpor` supports forwarding all kube-apiserver requests. Specifically, `Karpor` is an application layer gateway that proxies HTTP requests through the HTTP connect protocol. In addition to supporting `get`, `create`, `update`, and `delete` operations on resources, it also supports `watch`, `log`, `exec`, `attach`, etc. (Since the SPDY protocol used for `exec`, and `attach` does not support http2, `Karpor` will disable http2 when forwarding these requests, switching to http1.1 and supporting hijacker processing). + +![](assets/1-one-pass-with-proxy/image-20240326165632158.png) + +## Summary + +As can be gleaned from the text above, utilizing `Karpor`'s multi-cluster management component enables the issuance of a "multi-cluster pass" with controllable permissions for users. Users no longer need to concern themselves with issues such as frequent cluster certificate switching and onboarding of new clusters. With this "one-pass access", the cost of accessing multiple clusters is reduced, fulfilling the most fundamental needs of most users on multi-cluster platforms. diff --git a/docs/karpor/3-user-guide/5-best-production-practices/_category_.json b/docs/karpor/3-user-guide/5-best-production-practices/_category_.json new file mode 100644 index 00000000..82dd90e3 --- /dev/null +++ b/docs/karpor/3-user-guide/5-best-production-practices/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Best Production Practices" +} diff --git a/docs/karpor/3-user-guide/5-best-production-practices/assets/1-one-pass-with-proxy/image-20240326163622363.png b/docs/karpor/3-user-guide/5-best-production-practices/assets/1-one-pass-with-proxy/image-20240326163622363.png new file mode 100644 index 00000000..ab8051fe Binary files /dev/null and b/docs/karpor/3-user-guide/5-best-production-practices/assets/1-one-pass-with-proxy/image-20240326163622363.png differ diff --git a/docs/karpor/3-user-guide/5-best-production-practices/assets/1-one-pass-with-proxy/image-20240326164141400.png b/docs/karpor/3-user-guide/5-best-production-practices/assets/1-one-pass-with-proxy/image-20240326164141400.png new file mode 100644 index 00000000..de950079 Binary files /dev/null and b/docs/karpor/3-user-guide/5-best-production-practices/assets/1-one-pass-with-proxy/image-20240326164141400.png differ diff --git a/docs/karpor/3-user-guide/5-best-production-practices/assets/1-one-pass-with-proxy/image-20240326165247891.png b/docs/karpor/3-user-guide/5-best-production-practices/assets/1-one-pass-with-proxy/image-20240326165247891.png new file mode 100644 index 00000000..27fffb47 Binary files /dev/null and b/docs/karpor/3-user-guide/5-best-production-practices/assets/1-one-pass-with-proxy/image-20240326165247891.png differ diff --git a/docs/karpor/3-user-guide/5-best-production-practices/assets/1-one-pass-with-proxy/image-20240326165632158.png b/docs/karpor/3-user-guide/5-best-production-practices/assets/1-one-pass-with-proxy/image-20240326165632158.png new file mode 100644 index 00000000..99053c68 Binary files /dev/null and b/docs/karpor/3-user-guide/5-best-production-practices/assets/1-one-pass-with-proxy/image-20240326165632158.png differ diff --git a/docs/karpor/3-user-guide/_category_.json b/docs/karpor/3-user-guide/_category_.json new file mode 100644 index 00000000..8f01ba26 --- /dev/null +++ b/docs/karpor/3-user-guide/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "User Guide" +} diff --git a/docs/karpor/4-developer-guide/1-contribution-guide/1-non-code-contribute.md b/docs/karpor/4-developer-guide/1-contribution-guide/1-non-code-contribute.md new file mode 100644 index 00000000..721dbf92 --- /dev/null +++ b/docs/karpor/4-developer-guide/1-contribution-guide/1-non-code-contribute.md @@ -0,0 +1,40 @@ +--- +title: Non-code Contribution Guide +--- +You can contribute in any of the following ways that interest you. + +## Contributing Use Cases and Demos + +* If you are using Karpor, the simplest way to contribute is to [express gratitude to the community](https://github.com/KusionStack/karpor/issues/343). + +## Reporting Bugs + +Before submitting a new issue, please make sure that no one has already reported the problem. + +Check the [Issue list](https://github.com/KusionStack/karpor/issues) for any similar issues. + +[Report bugs](https://github.com/KusionStack/karpor/issues/new?assignees=&labels=kind%2Fbug&projects=&template=bug-report.yaml) by submitting a Bug report, ensuring you provide as much information as possible to help reproduce the Bug. + +Follow the issue template and add additional information to help us replicate the issue. + +## Security Issues + +If you believe you have discovered a security vulnerability, please read our [security policy](https://github.com/KusionStack/karpor/blob/main/SECURITY.md) for more detailed information. + +## Suggesting Enhancements + +If you have ideas to improve Karpor, please submit a [feature request](https://github.com/KusionStack/karpor/issues/new?assignees=&labels=kind%2Ffeature&projects=&template=enhancement.yaml). + +## Answering Questions + +If you have a question and cannot find the answer in the [documentation](https://www.kusionstack.io/karpor/), the next step is to ask on [GitHub Discussions](https://github.com/KusionStack/karpor/discussions). + +Helping these users is important to us, and we would love to have your help. You can contribute by answering [their questions](https://github.com/KusionStack/karpor/discussions) to help other Karpor users. + +## Contributing Documentation + +Contributing to the documentation requires some knowledge on how to submit a pull request to Github, which I think won't be difficult if you follow the guide. + +* [kusionstack.io Developer's Guide](https://github.com/KusionStack/kusionstack.io/blob/main/README.md) + +For more ways to contribute, please look at the [Open Source Guide](https://opensource.guide/how-to-contribute/). diff --git a/docs/karpor/4-developer-guide/1-contribution-guide/2-code-contribute.md b/docs/karpor/4-developer-guide/1-contribution-guide/2-code-contribute.md new file mode 100644 index 00000000..35060c9c --- /dev/null +++ b/docs/karpor/4-developer-guide/1-contribution-guide/2-code-contribute.md @@ -0,0 +1,174 @@ +--- +title: Code Contribution Guide +--- +In this code contribution guide, you will learn about the following: + +- [How to run Karpor locally](#running-karpor-locally) +- [How to create a pull request](#creating-a-pull-request) +- [Code review guidelines](#code-review) +- [Formatting guidelines for pull requests](#formatting-guidelines) +- [Updating Documentation and Website](#updating-documentation-and-website) + +## Running Karpor Locally + +This guide will help you get started with Karpor development. + +### Prerequisites + +* Golang version 1.22+ + +
+ Installing Golang + +1. Install go1.22+ from the [official website](https://go.dev/dl/). Extract the binary files and place them at a location, assuming it is located under the home directory `~/go/`, here is an example command, you should choose the correct binary file for your system. + +``` +wget https://go.dev/dl/go1.22.5.linux-amd64.tar.gz +tar xzf go1.22.5.linux-amd64.tar.gz +``` + +If you would like to maintain multiple versions of golang in your local development environment, you can download the package and extract it to a location, like `~/go/go1.22.1`, and then alter the path in the command below accordingly. + +1. Set environment variables for Golang + +``` +export PATH=~/go/bin/:$PATH +export GOROOT=~/go/ +export GOPATH=~/gopath/ +``` + +If the `gopath` folder does not exist, create it with `mkdir ~/gopath`. These commands will add the go binary folder to the `PATH` environment variable (making it the primary choice for go) and set the `GOROOT` environment to this go folder. Please add these lines to your `~/.bashrc` or `~/.zshrc` file, so you won't need to set these environment variables every time you open a new terminal. + +1. (Optional) Some regions, such as China, may have slow connection to the default go registry; you can configure GOPROXY to speed up the download process. + +``` +go env -w GOPROXY=https://goproxy.cn,direct +``` + +
+ +* Kubernetes version v1.20+ configured with `~/.kube/config`. +* golangci-lint version v1.52.2+, it will be installed automatically if you run `make lint`, if the installation fails, you can install it manually. + +
+ Manually installing golangci-lint + +You can install it manually following the [guide](https://golangci-lint.run/welcome/install), or use the command: + +``` +cd ~/go/ && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s v1.52.2 +``` + +
+ +### Building + +- Clone this project + +```shell +git clone git@github.com:KusionStack/karpor.git +``` + +- Build locally + +Executing `make build-all` will build the executables for all platforms; if you only want to build for a specific platform, execute `make build-${PlatformName}`, e.g., `make build-darwin`. To see all available commands, execute `make help`. + +### Testing + +It's essential to write tests to maintain code quality, you can run all unit tests by executing the following command in the project root directory: + +```shell +make test +``` + +If you need to generate extra coverage report files, execute: + +```shell +make cover +``` + +Then you can view the content of the coverage report in a browser by running: + +```shell +make cover-html +``` + +## Creating a Pull Request + +We are thrilled that you are considering contributing to the Karpor project! + +This document will guide you through the process of [creating a pull request](./index.md#contribute-a-pull-request). + +### Before you begin + +We know you are excited to create your first pull request. Before we get started, make sure your code follows the relevant [code conventions](../2-conventions/2-code-conventions.md). + +### Your First Pull Request + +Before submitting your PR, run the following commands to ensure they all succeed: + +``` +make test +make lint +``` + +If this is your first time contributing to an open-source project on GitHub, please make sure to read the instructions on [creating a pull request](https://help.github.com/en/articles/creating-a-pull-request). + +To increase the chances of your pull request being accepted, please ensure your pull request follows these guidelines: + +- The title and description match the implementation. +- The commits in the pull request follow the [formatting guidelines](#Formatting-guidelines). +- The pull request closes a related issue. +- The pull request includes necessary tests to verify the expected behavior. +- If your pull request has conflicts, please rebase your branch onto the main branch. + +If the pull request fixes a bug: + +- The pull request description must contain `Closes #` or `Fixes #`. +- To prevent regressions, the pull request should include tests that replicate the bug being fixed. + +## Code Review + +Once you have created a pull request, the next step is to have others review your changes. Review is a learning opportunity for both reviewers and the author of the pull request. + +If you believe a specific person should review your pull request, you can tag them in the description or a comment. +Tag a user by typing an `@` symbol followed by their username. + +We recommend that you read [How to do a code review](https://google.github.io/eng-practices/review/reviewer/) to learn more about code reviews. + +## Formatting Guidelines + +A well-crafted pull request can minimize the time to get your changes accepted. These guidelines will help you write well-formulated commit messages and descriptions for your pull requests. + +### Commit Message Format + +More see: [Commit Conventions](../2-conventions/4-commit-conventions.md) + +### Pull Request Title + +When accepting pull requests, the Karpor team merges all commits into one. + +The pull request title becomes the subject line of the merged commit message. + +We still encourage contributors to write informative commit messages, as they will be part of the Git commit body. + +We use the pull request titles when generating change logs for releases. Hence, we strive to make the titles as informative as possible. + +Make sure your pull request title uses the same format as the commit message subject line. If the format is not followed, we will add a `title-needs-formatting` label on the pull request. + +### Passing All CI Checks + +Before merging, all testing CIs should pass: + +- Coverage should not drop. Currently, the pull request coverage should be at least 60%. +- Karpor uses a **CLA** for the contributor agreement. It requires you to sign for every commit before merging the pull request. + +## Updating Documentation and Website + +If your pull request has been merged, and it is a new feature or enhancement, you need to update the documentation and send a pull request to the [kusionstack.io](https://github.com/KusionStack/kusionstack.io) repository. + +Learn how to write documentation through the following guide: + +- [kusionstack.io Developer Guide](https://github.com/KusionStack/kusionstack.io/blob/main/README.md) + +Awesome, you've completed the lifecycle of code contribution! diff --git a/docs/karpor/4-developer-guide/1-contribution-guide/3-roles.md b/docs/karpor/4-developer-guide/1-contribution-guide/3-roles.md new file mode 100644 index 00000000..e9193204 --- /dev/null +++ b/docs/karpor/4-developer-guide/1-contribution-guide/3-roles.md @@ -0,0 +1,41 @@ +--- +title: Roles +--- +Thank you for your interest and support for karpor! + +This document outlines the roles and responsibilities of contributors in the project, as well as the process for becoming a Contributor and losing Maintainer status. We hope that this document will help every contributor understand the growth path and make a greater contribution to the project's development. + +## Contributor Roles and Responsibilities + +we have two main contributor roles: Contributor and Maintainer. + +Here is a brief introduction to these two roles: + +1. Contributor: A contributor to the project who can contribute code, documentation, testing, and other resources. Contributors provide valuable resources to the project, helping it to continuously improve and develop. +2. Maintainer: A maintainer of the project who is responsible for the day-to-day maintenance of the project, including reviewing and merging PRs, handling issues, and releasing versions. Maintainers are key members of the project and have a significant impact on the project's development direction and decision-making. + +## How to become a Maintainer + +We welcome every contributor to contribute to the project's development and encourage contributors to upgrade to the role of Maintainer. + +The following are the conditions for upgrading from Contributor to Maintainer: + +1. Continuous contribution: Contributors need to contribute to the project continuously for a period of time (e.g., 3 months). This demonstrates the contributor's attention and enthusiasm for the project. +2. Quality assurance: The code or documentation submitted by contributors needs to maintain a high level of quality, meet the project's specifications, and have a positive impact on the project. +3. Active participation: Contributors need to actively participate in project discussions and decision-making, providing constructive opinions and suggestions for the project's development. +4. Team collaboration: Contributors need to have good teamwork skills, communicate friendly with other contributors and maintainers, and work together to solve problems. +5. Responsibility: Contributors need to have a certain sense of responsibility and be willing to undertake some of the project maintenance work, including reviewing PRs and handling issues. When a contributor meets the above conditions, existing maintainers will evaluate them. + +If they meet the requirements of Maintainer, they will be invited to become a new Maintainer. + +## Losing Maintainers status + +Maintainer have important responsibilities in the project, and we hope that every Maintainer can maintain their attention and enthusiasm for the project. + +However, we also understand that everyone's time and energy are limited, so when Maintainers cannot continue to fulfill their responsibilities, they will be downgraded to the role of Contributor: + +1. Long-term inactivity: If a Maintainer has not participated in project maintenance work, including reviewing PRs and handling issues, for a period of time (e.g., 3 months), they will be considered inactive. +2. Quality issues: If a Maintainer's work in the project has serious quality issues that affect the project's development, they will be considered not meeting the requirements of Maintainer. +3. Team collaboration issues: If a Maintainer has serious communication or teamwork issues with other contributors and maintainers, such as disrespecting others' opinions, frequent conflicts, or refusing to collaborate, which affects the project's normal operation and atmosphere, they will be considered not meeting the requirements of Maintainer. +4. Violation of rules: If a Maintainer violates the project's rules or code of conduct, including but not limited to leaking sensitive information or abusing privileges, they will be considered not meeting the requirements of Maintainer. +5. Voluntary application: If a Maintainer cannot continue to fulfill their responsibilities due to personal reasons, they can voluntarily apply to be downgraded to the role of Contributor. diff --git a/docs/karpor/4-developer-guide/1-contribution-guide/_category_.json b/docs/karpor/4-developer-guide/1-contribution-guide/_category_.json new file mode 100644 index 00000000..09eab23b --- /dev/null +++ b/docs/karpor/4-developer-guide/1-contribution-guide/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Contribution Guide" +} diff --git a/docs/karpor/4-developer-guide/1-contribution-guide/index.md b/docs/karpor/4-developer-guide/1-contribution-guide/index.md new file mode 100644 index 00000000..a23e2100 --- /dev/null +++ b/docs/karpor/4-developer-guide/1-contribution-guide/index.md @@ -0,0 +1,118 @@ +# Contributing Guide + +Contributing Guide that introduces how to participate and contribute to the community. + +To help us create a safe and positive community experience for all, we require all participants adhere to the CNCF Community [Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md). + +## Before contributing + +### Find a Contribution Point + +You can contribute to Karpor in several ways including code and non-code contributions, +we appreciate every effort you contribute to the community. + +Here are some examples: + +* Contribute to the codebase and docs. +* Report and triage issues. +* Organize meetups and user groups in your local area. +* Help others by answering questions about Karpor. + +And: + +- If you don’t know what issues start, we have prepared a [Community tasks | 新手任务清单 🎖︎](https://github.com/KusionStack/karpor/issues/463), or you can filter [help wanted](https://github.com/KusionStack/karpor/issues?q=is%3Aopen+is%3Aissue+label%3A%22help+wanted%22) or [good first issue](https://github.com/KusionStack/karpor/issues?q=is%3Aopen+is%3Aissue++label%3A%22good+first+issue%22) label in issue tracker. you can choose the issue you like. +- If you have any questions, please [Submit the Issue](https://github.com/KusionStack/karpor/issues/new/choose) or [Post on the discussions](https://github.com/KusionStack/karpor/discussions/new/choose), we will answer as soon as possible. + +### How to Contribute Non-code + +We regard non-coding contribution as equally important with code contribution for the community's very existence and its future growth. + +- Refer to [Non-code Contribution Guide](./non-code-contribute) to know how you could help. + +### How to Contribute Code + +Unsure where to begin contributing to Karpor codebase? Start by browsing issues labeled `good first issue` or `help wanted`. + +- [Good first issue](https://github.com/KusionStack/karpor/labels/good%20first%20issue) issues are generally straightforward to complete. +- [Help wanted](https://github.com/KusionStack/karpor/labels/help%20wanted) issues are problems we would like the community to help us with regardless of complexity. +- Refer to [Code Contribution Guide](./code-contribute) for more details. + +Learn [Code Conventions](../conventions/code-conventions) and [Test Conventions](../conventions/test-conventions) and understand what to pay attention to when writing code. + +And learn the [Release Process And Cadence](../conventions/release-process) to know when your code changes will be released. + +## Contribute a Pull Request + +After opening or claiming an issue, you could contribute codes or non-codes to karpor by a pull request. Here are the steps you should follow: + +### Fork Repository + +Karpor adopts trunk-based development, i.e., the code used for release is maintained on the main branch. + +Thus, to develop karpor, you have to fork one project in [karpor](https://github.com/KusionStack/karpor) repository to your workspace, and then check out a new branch to develop coding. + +### Develop Code/Non-Code + +Now you can start writing to solve the issue. To maintain the quality of karpor, after submitting the PR, some necessary checks will be triggered. + +After the development is completed, commit and push to your forked repository. Since the PR Title will be used as a merging commit message, we ask your PR Title to meet the [Commit Conventions](../2-conventions/4-commit-conventions.md). + +Here are some simple explanations: + +PR Title should be organized following this structure: + +``` +[optional scope]: + +[optional body] +``` + +The required type helps better capture the area of the commit, based on the [Angular guidelines](https://github.com/angular/angular/blob/22b96b9/CONTRIBUTING.md#-commit-message-guidelines). + +We use lowercase for `` to avoid spending time on case-sensitive issues. `` can be one of the following: + +``` +feat: A new feature +fix: A bug fix +docs: Documentation only changes +build: Changes that affect the build system or external dependencies +style: Changes that do not affect the meaning of the code (white-space, formatting, missing semi-colons, etc) +refactor: A code change that neither fixes a bug nor adds a feature +perf: A code change that improves performance +test: Adding missing tests or correcting existing tests +chore: Changes to the build process or auxiliary tools and libraries such as documentation generation +``` + +### Open a Pull Request + +[Open a pull request](https://github.com/KusionStack/karpor/pulls) from the develop branch of your forked repository to the main branch of karpor. You should clearly describe what you do in the PR, and link it to an issue. Besides, the PR title should also follow the commit conventions described above, and must be 5-256 characters in length, prefix `WIP` and `[WIP]` are not allowed. + +### Sign CLA + +If it was your first pull request, you need to sign our [CLA(Contributor License Agreement)](https://github.com/KusionStack/.github/blob/main/CLA.md). The only thing you need to do is to post a pull request comment same as the below format: + +`I have read the CLA Document and I hereby sign the CLA` + +If your CLA signature failed, you may find the solutions below: + +* The comment must be in the same format as above, with no extra spaces, line breaks, etc. +* The git committer must be the same one who created the Karpor PR + +### PR Checks + +To keep the reliability of the karpor project, the following check will get triggered automatically: + +* Unit Test +* Golang Lint +* Commit Lint +* PR Title Lint +* License Lint +* Markdown Link Lint + +Please make sure your PR passes these checks. + +## Become a Community Member + +If you're interested to become a community member or learn more about the governance, please check the [Roles](./3-roles.md) for details. + +Enjoy coding and collaboration in Karpor world! diff --git a/docs/karpor/4-developer-guide/2-conventions/1-release-process.md b/docs/karpor/4-developer-guide/2-conventions/1-release-process.md new file mode 100644 index 00000000..6dda486a --- /dev/null +++ b/docs/karpor/4-developer-guide/2-conventions/1-release-process.md @@ -0,0 +1,49 @@ +--- +title: Release Process And Cadence +--- +## Release Planning + +We will establish and continuously follow up on the release plan through [GitHub Milestones](https://github.com/KusionStack/karpor/milestones). Each release milestone will include two types of tasks: + +- Tasks Maintainers commit to complete. Maintainers will decide on the features they are committed to implementing before the next release based on their available time and effort. Usually, tasks are finalized after offline discussions and then added to the milestone. These tasks will be assigned to the Maintainer who plans to implement or test them. +- Additional items contributed by community contributors, typically non-urgent features or optimizations. Maintainers do not commit to completing these issues within the release cycle but will commit to reviewing submissions from the community. + +The milestones will clearly describe the most important features and their expected completion dates. This will clearly inform end-users about the timing and contents of the next release. + +In addition to the next milestone, we will also maintain drafts of future release milestones. + +## Release Standards + +- All **official releases** should be tagged on the `main` branch, with optional pre-release version suffixes such as: `alpha`, `beta`, `rc`, for example, a regular official release version might be `v1.2.3`, `v1.2.3-alpha.0`. For instance, if we want to perform some validations before releasing the official version `v1.2.3`, we could first release a pre-release version like `v1.2.3-alpha.0`, followed by `v1.2.3` after the validation is complete. +- Maintainers commit to completing certain features and enhancements, tracking progress through [GitHub Milestones](https://github.com/KusionStack/karpor/milestones). +- We will do our best to avoid release delays; thus, if we cannot complete a feature on time, it will be moved to the next release. +- A new version will be released every **1 month**. + +## Release Standard Procedure + +Maintainers are responsible for driving the release process and following standard operating procedures to ensure the quality of the release. + +1. Tag the git commit designated for release and push it upstream; the tag needs to comply with [Semantic Versioning](#semantic-versioning). +2. Ensure that the triggered Github Actions pipeline is executed successfully. Once successful, it will automatically generate a new Github Release, which includes the Changelog calculated from commit messages, as well as artifacts such as images and tar.gz files. +3. Write clear release notes based on the **Github Release**, including: + - User-friendly release highlights. + - Deprecated and incompatible changes. + - Brief instructions on how to install and upgrade. + +## Gate Testing + +Before creating the release branch, we will have a **1-week** code freeze period. During this period, we will refrain from merging any feature PRs and will only fix bugs. + +Maintainers will test and fix these last-minute issues before each release. + +## Semantic Versioning + +`Karpor` adopts [Semantic Versioning](https://semver.org/) for its version numbers. + +The version format: `MAJOR.MINOR.PATCH`, for example, `v1.2.3`. The version number **incrementing rules** are as follows: + +- MAJOR version when you make incompatible API changes. +- MINOR version when you add functionality in a backwards-compatible manner. +- PATCH version when you make backwards-compatible bug fixes. + +**Pre-release version numbers and build metadata** can be added to the `MAJOR.MINOR.PATCH` as an extension, like `v1.2.3-alpha.0`, `v1.2.3-beta.1`, `v1.2.3-rc.2`, where `-alpha.0`, `-beta.1`, `-rc.2` are pre-release versions. diff --git a/docs/karpor/4-developer-guide/2-conventions/2-code-conventions.md b/docs/karpor/4-developer-guide/2-conventions/2-code-conventions.md new file mode 100644 index 00000000..0fa03be0 --- /dev/null +++ b/docs/karpor/4-developer-guide/2-conventions/2-code-conventions.md @@ -0,0 +1,94 @@ +--- +title: Code Conventions +--- +In this section, you will find the code conventions for all kinds of code Karpor project related. It's not necessary to learn all of them at once, but make sure you have read corresponding parts before you start to code. + +- [Go Code Conventions](#go-code-conventions) +- [Bash or Script Conventions](#bash-or-script-conventions) +- [Directory and File Conventions](#directory-and-file-conventions) +- [Logging Conventions](#logging-conventions) + +## Go Code Conventions + +- [Go Code Review Comments](https://go.dev/wiki/CodeReviewComments) +- [Effective Go](https://go.dev/doc/effective_go) +- Know and avoid [Go landmines](https://gist.github.com/lavalamp/4bd23295a9f32706a48f) +- Comment your code. + + - [Go's commenting conventions](https://go.dev/blog/godoc) + - If reviewers ask questions about why the code is the way it is, that's a + sign that comments might be helpful. +- Command-line flags should use dashes, not underscores +- API + + - According to RFC3986, URLs are "case sensitive". Karpor uses `kebab-case` for API URLs. + - e.g.: `POST /rest-api/v1/resource-group-rule` +- Naming + + - Please consider package name when selecting an interface name, and avoid + redundancy. + + - e.g.: `storage.Interface` is better than `storage.StorageInterface`. + - Do not use uppercase characters, underscores, or dashes in package + names. + - Please consider parent directory name when choosing a package name. + + - so pkg/manager/cluster/foo.go should say `package cluster` + not `package clustermanager`. + - Unless there's a good reason, the `package foo` line should match + the name of the directory in which the .go file exists. + - Importers can use a different name if they need to disambiguate. + - Locks should be called `lock` and should never be embedded (always `lock sync.Mutex`). When multiple locks are present, give each lock a distinct name + following Go conventions - `stateLock`, `mapLock` etc. + +## Bash or Script Conventions + +- [https://google.github.io/styleguide/shell.xml](https://google.github.io/styleguide/shell.xml) +- Ensure that build, release, test, and cluster-management scripts run on + macOS + +## Directory and File Conventions + +- Avoid package sprawl. Find an appropriate subdirectory for new packages. + + - Libraries with no more appropriate home belong in new package + subdirectories of pkg/util +- Avoid general utility packages. Packages called "util" are suspect. Instead, + derive a name that describes your desired function. For example, the utility + functions dealing with waiting for operations are in the "wait" package and + include functionality like Poll. So the full name is wait.Poll +- All filenames should be lowercase +- Go source files and directories use underscores, not dashes + + - Package directories should generally avoid using separators as much as + possible (when packages are multiple words, they usually should be in nested + subdirectories). +- Document directories and filenames should use dashes rather than underscores +- Contrived examples that illustrate system features belong in + `/docs/user-guide` or `/docs/admin`, depending on whether it is a feature primarily + intended for users that deploy applications or cluster administrators, + respectively. Actual application examples belong in /examples. + + - Examples should also illustrate [best practices for configuration and using the system](https://kubernetes.io/docs/concepts/configuration/overview/) +- Third-party code + + - Go code for normal third-party dependencies is managed using + [go modules](https://github.com/golang/go/wiki/Modules) + - Other third-party code belongs in `/third_party` + + - forked third party Go code goes in `/third_party/forked` + - forked _golang stdlib_ code goes in `/third_party/forked/golang` + - Third-party code must include licenses + - This includes modified third-party code and excerpts, as well + +## Linting and Formatting + +To ensure consistency across the Go codebase, we require all code to pass a number of linter checks. + +To run all linters, use the `lint` Makefile target: + +```shell +make lint +``` + +The command will clean code along with some lint checks. Please remember to check in all changes after that. diff --git a/docs/karpor/4-developer-guide/2-conventions/3-test-conventions.md b/docs/karpor/4-developer-guide/2-conventions/3-test-conventions.md new file mode 100644 index 00000000..9eb0e2e4 --- /dev/null +++ b/docs/karpor/4-developer-guide/2-conventions/3-test-conventions.md @@ -0,0 +1,267 @@ +--- +title: Test Conventions +--- +## Testing Principles + +In Karpor, we primarily focus on the following three types of tests: + +- Unit tests: Tests targeting the **smallest testable units** (such as functions, methods, utility classes, etc.) +- Integration tests: Tests targeting the interaction and integration between **multiple units (or modules)** +- End-to-End tests (e2e tests): Tests targeting the **entire system's behavior**, usually requiring the simulation of real user scenarios + +Each has its strengths, weaknesses, and suitable scenarios. To achieve a better development experience, we should adhere to the following principles when writing tests. + +**Testing principles**: + +- A case should only cover one scenario +- Follow the **7-2-1 principle**, i.e., 70% unit tests, 20% integration tests, and 10% end-to-end tests +- **Avoid using Mock frameworks in unit tests unless necessary** (e.g., `golang/mock`). If you feel the need to use a Mock framework in unit tests, what you actually need might be integration tests or even end-to-end tests + +## Technology Selection + +At the current point in time, the most popular testing frameworks in the Go language ecosystem are [Ginkgo](https://onsi.github.io/ginkgo/)/[Gomega](https://onsi.github.io/gomega/) and [Testify](https://github.com/stretchr/testify). Therefore, this section mainly discusses the characteristics, pros and cons, and the final selection of these two testing frameworks. + +### Ginkgo/Gomega + +**Advantages**: + +1. **BDD Support**: Ginkgo is favored by many developers for its support of Behavior-Driven Development (BDD) style. It offers a rich DSL syntax, making test cases more descriptive and readable through keywords like `Describe`, `Context`, `It`, etc. +2. **Parallel Execution**: Ginkgo can execute tests in parallel across different processes, improving the efficiency of test execution. +3. **Rich Matchers**: Used in conjunction with the Gomega matchers library, it provides a wealth of assertion capabilities, making tests more intuitive and convenient. +4. **Asynchronous Support**: Ginkgo provides native support for handling complex asynchronous scenarios, reducing the risk of deadlocks and timeouts. +5. **Test Case Organization**: Supports organizing test cases into Suites for easy management and expansion. +6. **Documentation**: Ginkgo's [official documentation](http://onsi.github.io/ginkgo/) is very detailed, offering guides from beginner to advanced usage. + +**Disadvantages**: + +1. **Learning Curve:** For developers not familiar with BDD, Ginkgo's DSL syntax may take some time to get used to. +2. **Complexity in Parallel Testing:** Although Ginkgo supports parallel execution, managing resources and environment for parallel tests can introduce additional complexity in some cases. + +### Testify + +**Advantages**: + +1. **Simplified API**: Testify provides a simple and intuitive API, easy to get started with, especially for developers accustomed to the `testing` package. +2. **Mock Support**: Testify offers powerful Mock functionalities, facilitating the simulation of dependencies and interfaces for unit testing. +3. **Table-Driven Tests**: Supports table-driven testing, allowing for easy provision of various inputs and expected outputs for the same test function, enhancing test case reusability. +4. **Compatibility with `testing` Package**: Testify is highly compatible with the Go standard library's `testing` package, allowing for seamless integration into existing testing workflows. +5. Documentation: Testify's [official documentation](https://pkg.go.dev/github.com/stretchr/testify) also provides rich introductions on how to use its assertion and mocking functionalities. + +**Disadvantages**: + +1. **Lack of BDD Support**: Testify does not support the BDD style, potentially less intuitive for developers looking to improve test case readability. +2. **Relatively Simple Features**: Compared to Ginkgo, Testify's features are relatively simple and may not meet some complex testing scenarios' requirements. + +### Summary + +In short, Ginkgo/Gomega offers better readability and maintainability, producing clean and clear tests, but with a higher learning curve requiring familiarity with BDD style. Testify is simpler, more practical, with a lower learning curve, but as time progresses, the testing code style may become more varied, lowering maintainability. + +Considering the actual situation of Karpor and the pros and cons of both frameworks, we decide to use these two frameworks in combination: + +- Use Testify for unit testing, adhering to [Table-Driven Testing](https://go.dev/wiki/TableDrivenTests) to constrain the code style and prevent decay; +- Utilize Ginkgo's BDD features for writing higher-level integration and end-to-end tests; + +This combination fully leverages the strengths of both frameworks, improving the overall efficiency, readability, and quality of testing. + +## Writing Specifications + +### Test Style + +[Table-Driven Testing](https://go.dev/wiki/TableDrivenTests) is a best practice for writing test cases, akin to design patterns in programming, and it is also the style recommended by the official Go language. Table-Driven Testing uses tables to provide a variety of inputs and expected outputs, allowing the same test function to verify different scenarios. The advantages of this method are that it increases the reusability of test cases, reduces repetitive code, and makes tests clearer and easier to maintain. + +While there is no direct syntax support for Table-Driven Testing in Go's `testing` package, it can be emulated by writing helper functions and using anonymous functions. + +Here is an example of Table-Driven Testing implemented in the Go standard library's `fmt` package: + +```go +var flagtests = []struct { + in string + out string +}{ + {"%a", "[%a]"}, + {"%-a", "[%-a]"}, + {"%+a", "[%+a]"}, + {"%#a", "[%#a]"}, + {"% a", "[% a]"}, + {"%0a", "[%0a]"}, + {"%1.2a", "[%1.2a]"}, + {"%-1.2a", "[%-1.2a]"}, + {"%+1.2a", "[%+1.2a]"}, + {"%-+1.2a", "[%+-1.2a]"}, + {"%-+1.2abc", "[%+-1.2a]bc"}, + {"%-1.2abc", "[%-1.2a]bc"}, +} +func TestFlagParser(t *testing.T) { + var flagprinter flagPrinter + for _, tt := range flagtests { + t.Run(tt.in, func(t *testing.T) { + s := Sprintf(tt.in, &flagprinter) + if s != tt.out { + t.Errorf("got %q, want %q", s, tt.out) + } + }) + } +} +``` + +It is worth noting that most mainstream IDEs have already integrated [gotests](https://github.com/cweill/gotests), enabling the automatic generation of table-driven style Go unit tests, which I believe can enhance the efficiency of writing your unit tests: + +- [GoLand](https://blog.jetbrains.com/go/2020/03/13/test-driven-development-with-goland/) +- [Visual Studio Code](https://juandes.com/go-test-vsc/) + +### File Naming + +- **Specification Content**:Test files should end with `_test.go` to distinguish between test code and production code. +- **Positive Example**:`xxx_test.go` +- **Negative Example**:`testFile.go`、`test_xxx.go` + +### Test Function Naming + +- **Specification**: The name of the test function should start with `Test`, followed by the name of the function being tested, using camel case notation. +- **Positive Example**: + ```go + func TestAdd(t *testing.T) { + // Test logic ... + } + ``` +- **Negative Example**: + ```go + func TestAddWrong(t *testing.T) { + // Test logic ... + } + ``` + +### Test Function Signature + +- **Specification Content**: The signature of the test function should be `func TestXxx(t *testing.T)`, where `t` is the test object, of type `*testing.T`, and there should be no other parameters or return values. +- **Positive Example**: + ```go + func TestSubtraction(t *testing.T) { + // Test logic ... + } + ``` +- **Negative Example**: + ```go + func TestSubtraction(value int) { + // Test logic ... + } + ``` + +### Test Organization + +- **Specification Content**:Test cases should be independent of each other to avoid mutual influence between tests; use sub-tests (`t.Run`) to organize complex test scenarios. +- **Positive Example**: + ```go + func TestMathOperations(t *testing.T) { + t.Run("Addition", func(t *testing.T) { + // Test addition logic ... + }) + t.Run("Subtraction", func(t *testing.T) { + // Test subtraction logic ... + }) + } + ``` +- **Negative Example**: + ```go + func TestMathOperations(t *testing.T) { + // Mixed addition and subtraction logic... + } + ``` + +### Test Coverage + +- **Specification Content**:Attention should be paid to test coverage, use the `go test -cover` command to examine the test coverage of the code. +- **Positive Example**: + + ```shell + $ go test -cover + ``` +- **Negative Example**: + + ```shell + $ go test # Without checking test coverage + ``` +- **Note**: Karpor has wrapped this command as `make cover`, which will output the coverage for each package and total coverage in the command line. If you would like to view the coverage report in the browser, please execute `make cover-html`. + +### Benchmark Tests + +- **Specification Content**:Benchmark test functions should start with `Benchmark` and accept an argument of type `*testing.B`, focusing on performance testing. +- **Positive Example**: + ```go + func BenchmarkAdd(b *testing.B) { + for i := 0; i < b.N; i++ { + add(1, 1) + } + } + ``` +- **Negative Example**: + ```go + func BenchmarkAddWrong(b *testing.B) { + for i := 0; i < 1000; i++ { + add(1, 1) + } + } + ``` + +### Concurrency Testing + +- **Specification Content**:For concurrent code, appropriate test cases should be written to ensure the correctness of the concurrency logic. +- **Positive Example**: + ```go + func TestConcurrentAccess(t *testing.T) { + // Set up concurrent environment ... + // Test logic for concurrent access ... + } + ``` +- **Negative Example**: + ```go + func TestConcurrentAccess(t *testing.T) { + // Only test single-thread logic... + } + ``` + +### Test Helper Functions + +- **Specification Content**:Helper functions can be defined within the test files to help set up the test environment or clean up resources. +- **Positive Example**: + ```go + func setupTest(t *testing.T) { + // Set up test environment ... + } + + func tearDownTest(t *testing.T) { + // Clean up resources ... + } + + func TestMyFunction(t *testing.T) { + t.Run("TestSetup", func(t *testing.T) { + setupTest(t) + // Test logic ... + }) + } + ``` +- **Negative Example**: + ```go + // Directly setting up and cleaning up resources in the test + func TestMyFunction(t *testing.T) { + // Set up test environment... + // Test logic... + // Clean up resources... + } + ``` + +### Avoid Using Global Variables + +- **Specification Content**: Try to avoid using global variables in tests to ensure test independence. +- **Positive Example**: Declare and use the necessary variables inside the test function. +- **Negative Example**: Declare global variables at the top of the test file. + +### Clear Error Messages + +- **Specification Content**: When a test fails, output clear and understandable error messages to help developers locate the problem. +- **Positive Example**: + - `t.Errorf("Expected value %d, but got %d", expected, real)` +- **Negative Example**: + - `t.Errorf("Error occurred")` + - `fmt.Println("Error occurred")` + - `panic("Error occurred")` diff --git a/docs/karpor/4-developer-guide/2-conventions/4-commit-conventions.md b/docs/karpor/4-developer-guide/2-conventions/4-commit-conventions.md new file mode 100644 index 00000000..fd3980e3 --- /dev/null +++ b/docs/karpor/4-developer-guide/2-conventions/4-commit-conventions.md @@ -0,0 +1,71 @@ +--- +title: Commit Conventions +--- +## Commit Message Structure + +Karpor adheres to [conventional-commits](https://www.conventionalcommits.org/en/v1.0.0/). + +Commit messages should be organized following this structure: + +``` +[optional scope]: + +[optional body] +``` + +## Example + +Commit message with scope: + +``` +feat(lang): add polish language +``` + +Commit message without body: + +``` +docs: correct spelling of CHANGELOG +``` + +Commit message with multiple body paragraphs:: + +``` +fix: correct minor typos in code + +see the issue for details + +on typos fixed. + +reviewed-by: Z +refs #133 +``` + +## ``(Required) + +The required type helps better capture the area of the commit, based on the [Angular guidelines](https://github.com/angular/angular/blob/22b96b9/CONTRIBUTING.md#-commit-message-guidelines). + +We use lowercase for `` to avoid spending time on case-sensitive issues. `` can be one of the following: + +- **feat**: A new feature +- **fix**: A bug fix +- **docs**: Documentation only changes +- **build**: Changes that affect the build system or external dependencies +- **style**: Changes that do not affect the meaning of the code (white-space, formatting, missing semi-colons, etc) +- **refactor**: A code change that neither fixes a bug nor adds a feature +- **perf**: A code change that improves performance +- **test**: Adding missing tests or correcting existing tests +- **chore**: Changes to the build process or auxiliary tools and libraries such as documentation generation + +## ``(Optional) + +Scope is optional and can be provided to the type of commit to provide additional contextual information, enclosed in parentheses. It can be anything specifying the place of the commit change. Github issue links are also valid scopes e.g., fix(ui), feat(api), fix(#233), etc. + +When the change affects multiple scopes, `*` can be used. + +## ``(Required) + +The subject must come immediately after the type/scope prefix, followed by a colon and space. It is a concise summary of the code changes, for example, "fix: array parsing issue when multiple spaces were contained in string", rather than "fix: bug". + +## ``(Required) + +A longer commit body can be provided after the brief subject, giving additional context information about the code change. The body must begin one line after the description. diff --git a/docs/karpor/4-developer-guide/2-conventions/_category_.json b/docs/karpor/4-developer-guide/2-conventions/_category_.json new file mode 100644 index 00000000..3287fa06 --- /dev/null +++ b/docs/karpor/4-developer-guide/2-conventions/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Conventions" +} diff --git a/docs/karpor/4-developer-guide/_category_.json b/docs/karpor/4-developer-guide/_category_.json new file mode 100644 index 00000000..8de262b6 --- /dev/null +++ b/docs/karpor/4-developer-guide/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Developer Guide" +} diff --git a/docs/karpor/5-references/1-cli-commands/1-karpor.md b/docs/karpor/5-references/1-cli-commands/1-karpor.md new file mode 100644 index 00000000..d68d355b --- /dev/null +++ b/docs/karpor/5-references/1-cli-commands/1-karpor.md @@ -0,0 +1,230 @@ +--- +title: karpor +--- +### Synopsis + +Launch an API server + +``` +karpor [flags] +``` + +### Options + +``` + --admission-control-config-file string File with admission control configuration. + --advertise-address ip The IP address on which to advertise the apiserver to members of the cluster. This address must be reachable by the rest of the cluster. If blank, the --bind-address will be used. If --bind-address is unspecified, the host's default interface will be used. + --anonymous-auth Enables anonymous requests to the secure port of the API server. Requests that are not rejected by another authentication method are treated as anonymous requests. Anonymous requests have a username of system:anonymous, and a group name of system:unauthenticated. (default true) + --api-audiences strings Identifiers of the API. The service account token authenticator will validate that tokens used against the API are bound to at least one of these audiences. If the --service-account-issuer flag is configured and this flag is not, this field defaults to a single element list containing the issuer URL. + --audit-log-batch-buffer-size int The size of the buffer to store events before batching and writing. Only used in batch mode. (default 10000) + --audit-log-batch-max-size int The maximum size of a batch. Only used in batch mode. (default 1) + --audit-log-batch-max-wait duration The amount of time to wait before force writing the batch that hadn't reached the max size. Only used in batch mode. + --audit-log-batch-throttle-burst int Maximum number of requests sent at the same moment if ThrottleQPS was not utilized before. Only used in batch mode. + --audit-log-batch-throttle-enable Whether batching throttling is enabled. Only used in batch mode. + --audit-log-batch-throttle-qps float32 Maximum average number of batches per second. Only used in batch mode. + --audit-log-compress If set, the rotated log files will be compressed using gzip. + --audit-log-format string Format of saved audits. "legacy" indicates 1-line text format for each event. "json" indicates structured json format. Known formats are legacy,json. (default "json") + --audit-log-maxage int The maximum number of days to retain old audit log files based on the timestamp encoded in their filename. + --audit-log-maxbackup int The maximum number of old audit log files to retain. Setting a value of 0 will mean there's no restriction on the number of files. + --audit-log-maxsize int The maximum size in megabytes of the audit log file before it gets rotated. + --audit-log-mode string Strategy for sending audit events. Blocking indicates sending events should block server responses. Batch causes the backend to buffer and write events asynchronously. Known modes are batch,blocking,blocking-strict. (default "blocking") + --audit-log-path string If set, all requests coming to the apiserver will be logged to this file. '-' means standard out. + --audit-log-truncate-enabled Whether event and batch truncating is enabled. + --audit-log-truncate-max-batch-size int Maximum size of the batch sent to the underlying backend. Actual serialized size can be several hundreds of bytes greater. If a batch exceeds this limit, it is split into several batches of smaller size. (default 10485760) + --audit-log-truncate-max-event-size int Maximum size of the audit event sent to the underlying backend. If the size of an event is greater than this number, first request and response are removed, and if this doesn't reduce the size enough, event is discarded. (default 102400) + --audit-log-version string API group and version used for serializing audit events written to log. (default "audit.k8s.io/v1") + --audit-policy-file string Path to the file that defines the audit policy configuration. + --audit-webhook-batch-buffer-size int The size of the buffer to store events before batching and writing. Only used in batch mode. (default 10000) + --audit-webhook-batch-max-size int The maximum size of a batch. Only used in batch mode. (default 400) + --audit-webhook-batch-max-wait duration The amount of time to wait before force writing the batch that hadn't reached the max size. Only used in batch mode. (default 30s) + --audit-webhook-batch-throttle-burst int Maximum number of requests sent at the same moment if ThrottleQPS was not utilized before. Only used in batch mode. (default 15) + --audit-webhook-batch-throttle-enable Whether batching throttling is enabled. Only used in batch mode. (default true) + --audit-webhook-batch-throttle-qps float32 Maximum average number of batches per second. Only used in batch mode. (default 10) + --audit-webhook-config-file string Path to a KubeConfig formatted file that defines the audit webhook configuration. + --audit-webhook-initial-backoff duration The amount of time to wait before retrying the first failed request. (default 10s) + --audit-webhook-mode string Strategy for sending audit events. Blocking indicates sending events should block server responses. Batch causes the backend to buffer and write events asynchronously. Known modes are batch,blocking,blocking-strict. (default "batch") + --audit-webhook-truncate-enabled Whether event and batch truncating is enabled. + --audit-webhook-truncate-max-batch-size int Maximum size of the batch sent to the underlying backend. Actual serialized size can be several hundreds of bytes greater. If a batch exceeds this limit, it is split into several batches of smaller size. (default 10485760) + --audit-webhook-truncate-max-event-size int Maximum size of the audit event sent to the underlying backend. If the size of an event is greater than this number, first request and response are removed, and if this doesn't reduce the size enough, event is discarded. (default 102400) + --audit-webhook-version string API group and version used for serializing audit events written to webhook. (default "audit.k8s.io/v1") + --authorization-mode strings Ordered list of plug-ins to do authorization on secure port. Comma-delimited list of: AlwaysAllow,AlwaysDeny,ABAC,Webhook,RBAC,Node. (default [RBAC]) + --authorization-policy-file string File with authorization policy in json line by line format, used with --authorization-mode=ABAC, on the secure port. + --authorization-webhook-cache-authorized-ttl duration The duration to cache 'authorized' responses from the webhook authorizer. (default 5m0s) + --authorization-webhook-cache-unauthorized-ttl duration The duration to cache 'unauthorized' responses from the webhook authorizer. (default 30s) + --authorization-webhook-config-file string File with webhook configuration in KubeConfig format, used with --authorization-mode=Webhook. The API server will query the remote service to determine access on the API server's secure port. + --authorization-webhook-version string The API version of the authorization.k8s.io SubjectAccessReview to send to and expect from the webhook. (default "v1beta1") + --bind-address ip The IP address on which to listen for the --secure-port port. The associated interface(s) must be reachable by the rest of the cluster, and by CLI/web clients. If blank or an unspecified address (0.0.0.0 or ::), all interfaces will be used. (default 0.0.0.0) + --cert-dir string The directory where the TLS certs are located. If --tls-cert-file and --tls-private-key-file are provided, this flag will be ignored. (default "apiserver.local.config/certificates") + --client-ca-file string If set, any request presenting a client certificate signed by one of the authorities in the client-ca-file is authenticated with an identity corresponding to the CommonName of the client certificate. + --contention-profiling Enable lock contention profiling, if profiling is enabled + --cors-allowed-origins strings List of allowed origins for CORS, comma separated. An allowed origin can be a regular expression to support subdomain matching. If this list is empty CORS will not be enabled. (default [.*]) + --delete-collection-workers int Number of workers spawned for DeleteCollection call. These are used to speed up namespace cleanup. (default 1) + --disable-admission-plugins strings admission plugins that should be disabled although they are in the default enabled plugins list (NamespaceLifecycle, MutatingAdmissionWebhook, ValidatingAdmissionPolicy, ValidatingAdmissionWebhook). Comma-delimited list of admission plugins: MutatingAdmissionWebhook, NamespaceLifecycle, ValidatingAdmissionPolicy, ValidatingAdmissionWebhook. The order of plugins in this flag does not matter. (default [MutatingAdmissionWebhook,NamespaceLifecycle,ValidatingAdmissionWebhook,ValidatingAdmissionPolicy]) + --egress-selector-config-file string File with apiserver egress selector configuration. + --elastic-search-addresses strings The elastic search address + --elastic-search-password string The elastic search password + --elastic-search-username string The elastic search username + --enable-admission-plugins strings admission plugins that should be enabled in addition to default enabled ones (NamespaceLifecycle, MutatingAdmissionWebhook, ValidatingAdmissionPolicy, ValidatingAdmissionWebhook). Comma-delimited list of admission plugins: MutatingAdmissionWebhook, NamespaceLifecycle, ValidatingAdmissionPolicy, ValidatingAdmissionWebhook. The order of plugins in this flag does not matter. + --enable-garbage-collector Enables the generic garbage collector. MUST be synced with the corresponding flag of the kube-controller-manager. (default true) + --enable-priority-and-fairness If true and the APIPriorityAndFairness feature gate is enabled, replace the max-in-flight handler with an enhanced one that queues and dispatches with priority and fairness (default true) + --encryption-provider-config string The file containing configuration for encryption providers to be used for storing secrets in etcd + --encryption-provider-config-automatic-reload Determines if the file set by --encryption-provider-config should be automatically reloaded if the disk contents change. Setting this to true disables the ability to uniquely identify distinct KMS plugins via the API server healthz endpoints. + --etcd-cafile string SSL Certificate Authority file used to secure etcd communication. + --etcd-certfile string SSL certification file used to secure etcd communication. + --etcd-compaction-interval duration The interval of compaction requests. If 0, the compaction request from apiserver is disabled. (default 5m0s) + --etcd-count-metric-poll-period duration Frequency of polling etcd for number of resources per type. 0 disables the metric collection. (default 1m0s) + --etcd-db-metric-poll-interval duration The interval of requests to poll etcd and update metric. 0 disables the metric collection (default 30s) + --etcd-healthcheck-timeout duration The timeout to use when checking etcd health. (default 2s) + --etcd-keyfile string SSL key file used to secure etcd communication. + --etcd-prefix string The prefix to prepend to all resource paths in etcd. (default "/registry/karpor") + --etcd-readycheck-timeout duration The timeout to use when checking etcd readiness (default 2s) + --etcd-servers strings List of etcd servers to connect with (scheme://ip:port), comma separated. + --etcd-servers-overrides strings Per-resource etcd servers overrides, comma separated. The individual override format: group/resource#servers, where servers are URLs, semicolon separated. Note that this applies only to resources compiled into this server binary. + --external-hostname string The hostname to use when generating externalized URLs for this master (e.g. Swagger API Docs or OpenID Discovery). + --feature-gates mapStringBool A set of key=value pairs that describe feature gates for alpha/experimental features. Options are: + APIListChunking=true|false (BETA - default=true) + APIPriorityAndFairness=true|false (BETA - default=true) + APIResponseCompression=true|false (BETA - default=true) + APISelfSubjectReview=true|false (ALPHA - default=false) + APIServerIdentity=true|false (BETA - default=true) + APIServerTracing=true|false (ALPHA - default=false) + AggregatedDiscoveryEndpoint=true|false (ALPHA - default=false) + AllAlpha=true|false (ALPHA - default=false) + AllBeta=true|false (BETA - default=false) + AnyVolumeDataSource=true|false (BETA - default=true) + AppArmor=true|false (BETA - default=true) + CPUManagerPolicyAlphaOptions=true|false (ALPHA - default=false) + CPUManagerPolicyBetaOptions=true|false (BETA - default=true) + CPUManagerPolicyOptions=true|false (BETA - default=true) + CSIMigrationPortworx=true|false (BETA - default=false) + CSIMigrationRBD=true|false (ALPHA - default=false) + CSINodeExpandSecret=true|false (ALPHA - default=false) + CSIVolumeHealth=true|false (ALPHA - default=false) + ComponentSLIs=true|false (ALPHA - default=false) + ContainerCheckpoint=true|false (ALPHA - default=false) + CronJobTimeZone=true|false (BETA - default=true) + CrossNamespaceVolumeDataSource=true|false (ALPHA - default=false) + CustomCPUCFSQuotaPeriod=true|false (ALPHA - default=false) + CustomResourceValidationExpressions=true|false (BETA - default=true) + DisableCloudProviders=true|false (ALPHA - default=false) + DisableKubeletCloudCredentialProviders=true|false (ALPHA - default=false) + DownwardAPIHugePages=true|false (BETA - default=true) + DynamicResourceAllocation=true|false (ALPHA - default=false) + EventedPLEG=true|false (ALPHA - default=false) + ExpandedDNSConfig=true|false (BETA - default=true) + ExperimentalHostUserNamespaceDefaulting=true|false (BETA - default=false) + GRPCContainerProbe=true|false (BETA - default=true) + GracefulNodeShutdown=true|false (BETA - default=true) + GracefulNodeShutdownBasedOnPodPriority=true|false (BETA - default=true) + HPAContainerMetrics=true|false (ALPHA - default=false) + HPAScaleToZero=true|false (ALPHA - default=false) + HonorPVReclaimPolicy=true|false (ALPHA - default=false) + IPTablesOwnershipCleanup=true|false (ALPHA - default=false) + InTreePluginAWSUnregister=true|false (ALPHA - default=false) + InTreePluginAzureDiskUnregister=true|false (ALPHA - default=false) + InTreePluginAzureFileUnregister=true|false (ALPHA - default=false) + InTreePluginGCEUnregister=true|false (ALPHA - default=false) + InTreePluginOpenStackUnregister=true|false (ALPHA - default=false) + InTreePluginPortworxUnregister=true|false (ALPHA - default=false) + InTreePluginRBDUnregister=true|false (ALPHA - default=false) + InTreePluginvSphereUnregister=true|false (ALPHA - default=false) + JobMutableNodeSchedulingDirectives=true|false (BETA - default=true) + JobPodFailurePolicy=true|false (BETA - default=true) + JobReadyPods=true|false (BETA - default=true) + KMSv2=true|false (ALPHA - default=false) + KubeletInUserNamespace=true|false (ALPHA - default=false) + KubeletPodResources=true|false (BETA - default=true) + KubeletPodResourcesGetAllocatable=true|false (BETA - default=true) + KubeletTracing=true|false (ALPHA - default=false) + LegacyServiceAccountTokenTracking=true|false (ALPHA - default=false) + LocalStorageCapacityIsolationFSQuotaMonitoring=true|false (ALPHA - default=false) + LogarithmicScaleDown=true|false (BETA - default=true) + MatchLabelKeysInPodTopologySpread=true|false (ALPHA - default=false) + MaxUnavailableStatefulSet=true|false (ALPHA - default=false) + MemoryManager=true|false (BETA - default=true) + MemoryQoS=true|false (ALPHA - default=false) + MinDomainsInPodTopologySpread=true|false (BETA - default=false) + MinimizeIPTablesRestore=true|false (ALPHA - default=false) + MultiCIDRRangeAllocator=true|false (ALPHA - default=false) + NetworkPolicyStatus=true|false (ALPHA - default=false) + NodeInclusionPolicyInPodTopologySpread=true|false (BETA - default=true) + NodeOutOfServiceVolumeDetach=true|false (BETA - default=true) + NodeSwap=true|false (ALPHA - default=false) + OpenAPIEnums=true|false (BETA - default=true) + OpenAPIV3=true|false (BETA - default=true) + PDBUnhealthyPodEvictionPolicy=true|false (ALPHA - default=false) + PodAndContainerStatsFromCRI=true|false (ALPHA - default=false) + PodDeletionCost=true|false (BETA - default=true) + PodDisruptionConditions=true|false (BETA - default=true) + PodHasNetworkCondition=true|false (ALPHA - default=false) + PodSchedulingReadiness=true|false (ALPHA - default=false) + ProbeTerminationGracePeriod=true|false (BETA - default=true) + ProcMountType=true|false (ALPHA - default=false) + ProxyTerminatingEndpoints=true|false (BETA - default=true) + QOSReserved=true|false (ALPHA - default=false) + ReadWriteOncePod=true|false (ALPHA - default=false) + RecoverVolumeExpansionFailure=true|false (ALPHA - default=false) + RemainingItemCount=true|false (BETA - default=true) + RetroactiveDefaultStorageClass=true|false (BETA - default=true) + RotateKubeletServerCertificate=true|false (BETA - default=true) + SELinuxMountReadWriteOncePod=true|false (ALPHA - default=false) + SeccompDefault=true|false (BETA - default=true) + ServerSideFieldValidation=true|false (BETA - default=true) + SizeMemoryBackedVolumes=true|false (BETA - default=true) + StatefulSetAutoDeletePVC=true|false (ALPHA - default=false) + StatefulSetStartOrdinal=true|false (ALPHA - default=false) + StorageVersionAPI=true|false (ALPHA - default=false) + StorageVersionHash=true|false (BETA - default=true) + TopologyAwareHints=true|false (BETA - default=true) + TopologyManager=true|false (BETA - default=true) + TopologyManagerPolicyAlphaOptions=true|false (ALPHA - default=false) + TopologyManagerPolicyBetaOptions=true|false (BETA - default=false) + TopologyManagerPolicyOptions=true|false (ALPHA - default=false) + UserNamespacesStatelessPodsSupport=true|false (ALPHA - default=false) + ValidatingAdmissionPolicy=true|false (ALPHA - default=false) + VolumeCapacityPriority=true|false (ALPHA - default=false) + WinDSR=true|false (ALPHA - default=false) + WinOverlay=true|false (BETA - default=true) + WindowsHostNetwork=true|false (ALPHA - default=true) (default APIPriorityAndFairness=true) + --goaway-chance float To prevent HTTP/2 clients from getting stuck on a single apiserver, randomly close a connection (GOAWAY). The client's other in-flight requests won't be affected, and the client will reconnect, likely landing on a different apiserver after going through the load balancer again. This argument sets the fraction of requests that will be sent a GOAWAY. Clusters with single apiservers, or which don't use a load balancer, should NOT enable this. Min is 0 (off), Max is .02 (1/50 requests); .001 (1/1000) is a recommended starting point. + -h, --help help for karpor + --http2-max-streams-per-connection int The limit that the server gives to clients for the maximum number of streams in an HTTP/2 connection. Zero means to use golang's default. (default 1000) + --lease-reuse-duration-seconds int The time in seconds that each lease is reused. A lower value could avoid large number of objects reusing the same lease. Notice that a too small value may cause performance problems at storage layer. (default 60) + --livez-grace-period duration This option represents the maximum amount of time it should take for apiserver to complete its startup sequence and become live. From apiserver's start time to when this amount of time has elapsed, /livez will assume that unfinished post-start hooks will complete successfully and therefore return true. + --max-mutating-requests-inflight int This and --max-requests-inflight are summed to determine the server's total concurrency limit (which must be positive) if --enable-priority-and-fairness is true. Otherwise, this flag limits the maximum number of mutating requests in flight, or a zero value disables the limit completely. (default 200) + --max-requests-inflight int This and --max-mutating-requests-inflight are summed to determine the server's total concurrency limit (which must be positive) if --enable-priority-and-fairness is true. Otherwise, this flag limits the maximum number of non-mutating requests in flight, or a zero value disables the limit completely. (default 400) + --min-request-timeout int An optional field indicating the minimum number of seconds a handler must keep a request open before timing it out. Currently only honored by the watch request handler, which picks a randomized value above this number as the connection timeout, to spread out load. (default 1800) + --permit-address-sharing If true, SO_REUSEADDR will be used when binding the port. This allows binding to wildcard IPs like 0.0.0.0 and specific IPs in parallel, and it avoids waiting for the kernel to release sockets in TIME_WAIT state. [default=false] + --permit-port-sharing If true, SO_REUSEPORT will be used when binding the port, which allows more than one instance to bind on the same address and port. [default=false] + --profiling Enable profiling via web interface host:port/debug/pprof/ (default true) + --read-only-mode turn on the read only mode + --request-timeout duration An optional field indicating the duration a handler must keep a request open before timing it out. This is the default request timeout for requests but may be overridden by flags such as --min-request-timeout for specific types of requests. (default 1m0s) + --requestheader-allowed-names strings List of client certificate common names to allow to provide usernames in headers specified by --requestheader-username-headers. If empty, any client certificate validated by the authorities in --requestheader-client-ca-file is allowed. + --requestheader-client-ca-file string Root certificate bundle to use to verify client certificates on incoming requests before trusting usernames in headers specified by --requestheader-username-headers. WARNING: generally do not depend on authorization being already done for incoming requests. + --requestheader-extra-headers-prefix strings List of request header prefixes to inspect. X-Remote-Extra- is suggested. + --requestheader-group-headers strings List of request headers to inspect for groups. X-Remote-Group is suggested. + --requestheader-username-headers strings List of request headers to inspect for usernames. X-Remote-User is common. + --search-storage-type string The search storage type + --secure-port int The port on which to serve HTTPS with authentication and authorization. If 0, don't serve HTTPS at all. (default 443) + --shutdown-delay-duration duration Time to delay the termination. During that time the server keeps serving requests normally. The endpoints /healthz and /livez will return success, but /readyz immediately returns failure. Graceful termination starts after this delay has elapsed. This can be used to allow load balancer to stop sending traffic to this server. + --shutdown-send-retry-after If true the HTTP Server will continue listening until all non long running request(s) in flight have been drained, during this window all incoming requests will be rejected with a status code 429 and a 'Retry-After' response header, in addition 'Connection: close' response header is set in order to tear down the TCP connection when idle. + --storage-backend string The storage backend for persistence. Options: 'etcd3' (default). + --storage-media-type string The media type to use to store objects in storage. Some resources or storage backends may only support a specific media type and will ignore this setting. Supported media types: [application/json, application/yaml, application/vnd.kubernetes.protobuf] (default "application/json") + --strict-transport-security-directives strings List of directives for HSTS, comma separated. If this list is empty, then HSTS directives will not be added. Example: 'max-age=31536000,includeSubDomains,preload' + --tls-cert-file string File containing the default x509 Certificate for HTTPS. (CA cert, if any, concatenated after server cert). If HTTPS serving is enabled, and --tls-cert-file and --tls-private-key-file are not provided, a self-signed certificate and key are generated for the public address and saved to the directory specified by --cert-dir. (default "apiserver.local.config/certificates/apiserver.crt") + --tls-cipher-suites strings Comma-separated list of cipher suites for the server. If omitted, the default Go cipher suites will be used. + Preferred values: TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_256_CBC_SHA, TLS_RSA_WITH_AES_256_GCM_SHA384. + Insecure values: TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_RSA_WITH_RC4_128_SHA, TLS_RSA_WITH_3DES_EDE_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA256, TLS_RSA_WITH_RC4_128_SHA. + --tls-min-version string Minimum TLS version supported. Possible values: VersionTLS10, VersionTLS11, VersionTLS12, VersionTLS13 + --tls-private-key-file string File containing the default x509 private key matching --tls-cert-file. (default "apiserver.local.config/certificates/apiserver.key") + --tls-sni-cert-key namedCertKey A pair of x509 certificate and private key file paths, optionally suffixed with a list of domain patterns which are fully qualified domain names, possibly with prefixed wildcard segments. The domain patterns also allow IP addresses, but IPs should only be used if the apiserver has visibility to the IP address requested by a client. If no domain patterns are provided, the names of the certificate are extracted. Non-wildcard matches trump over wildcard matches, explicit domain patterns trump over extracted names. For multiple key/certificate pairs, use the --tls-sni-cert-key multiple times. Examples: "example.crt,example.key" or "foo.crt,foo.key:*.foo.com,foo.com". (default []) + --tracing-config-file string File with apiserver tracing configuration. + --watch-cache Enable watch caching in the apiserver (default true) + --watch-cache-sizes strings Watch cache size settings for some resources (pods, nodes, etc.), comma separated. The individual setting format: resource[.group]#size, where resource is lowercase plural (no version), group is omitted for resources of apiVersion v1 (the legacy core API) and included for others, and size is a number. This option is only meaningful for resources built into the apiserver, not ones defined by CRDs or aggregated from external servers, and is only consulted if the watch-cache is enabled. The only meaningful size setting to supply here is zero, which means to disable watch caching for the associated resource; all non-zero values are equivalent and mean to not disable watch caching for that resource +``` + +### SEE ALSO + +* [karpor syncer](2-karpor-syncer.md) - start a resource syncer to sync resource from clusters + +###### Auto generated by spf13/cobra on 7-May-2024 diff --git a/docs/karpor/5-references/1-cli-commands/2-karpor-syncer.md b/docs/karpor/5-references/1-cli-commands/2-karpor-syncer.md new file mode 100644 index 00000000..d25245ae --- /dev/null +++ b/docs/karpor/5-references/1-cli-commands/2-karpor-syncer.md @@ -0,0 +1,25 @@ +--- +title: karpor syncer +--- +## karpor syncer + +start a resource syncer to sync resource from clusters + +``` +karpor syncer [flags] +``` + +### Options + +``` + --elastic-search-addresses strings The elastic search address. + --health-probe-bind-address string The address the probe endpoint binds to. (default ":8081") + -h, --help help for syncer + --metrics-bind-address string The address the metric endpoint binds to. (default ":8080") +``` + +### SEE ALSO + +* [karpor](1-karpor.md) - Launch an API server + +###### Auto generated by spf13/cobra on 7-May-2024 diff --git a/docs/karpor/5-references/1-cli-commands/_category_.json b/docs/karpor/5-references/1-cli-commands/_category_.json new file mode 100644 index 00000000..41757f5f --- /dev/null +++ b/docs/karpor/5-references/1-cli-commands/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "CLI Commands" +} diff --git a/docs/karpor/5-references/2-openapi.md b/docs/karpor/5-references/2-openapi.md new file mode 100644 index 00000000..81c0321d --- /dev/null +++ b/docs/karpor/5-references/2-openapi.md @@ -0,0 +1,1862 @@ +--- +title: OpenAPI +--- +## Informations + +### Version + +1.0 + +### Contact + +## Content negotiation + +### URI Schemes + +* http + +### Consumes + +* application/json +* multipart/form-data +* text/plain + +### Produces + +* application/json +* text/plain + +## All endpoints + +### cluster + +| Method | URI | Name | Summary | +| ------ | ------------------------------------ | ------------------------------------------------------------------------------------- | -------------------------------------------- | +| DELETE | /rest-api/v1/cluster/{clusterName} | [delete rest API v1 cluster cluster name](#delete-rest-api-v1-cluster-cluster-name) | Delete removes a cluster resource by name. | +| GET | /rest-api/v1/cluster/{clusterName} | [get rest API v1 cluster cluster name](#get-rest-api-v1-cluster-cluster-name) | Get returns a cluster resource by name. | +| GET | /rest-api/v1/clusters | [get rest API v1 clusters](#get-rest-api-v1-clusters) | List lists all cluster resources. | +| POST | /rest-api/v1/cluster/{clusterName} | [post rest API v1 cluster cluster name](#post-rest-api-v1-cluster-cluster-name) | Create creates a cluster resource. | +| POST | /rest-api/v1/cluster/config/file | [post rest API v1 cluster config file](#post-rest-api-v1-cluster-config-file) | Upload kubeConfig file for cluster | +| POST | /rest-api/v1/cluster/config/validate | [post rest API v1 cluster config validate](#post-rest-api-v1-cluster-config-validate) | Validate KubeConfig | +| PUT | /rest-api/v1/cluster/{clusterName} | [put rest API v1 cluster cluster name](#put-rest-api-v1-cluster-cluster-name) | Update updates the cluster metadata by name. | + +### debug + +| Method | URI | Name | Summary | +| ------ | ---------- | ------------------------------- | ---------------------------- | +| GET | /endpoints | [get endpoints](#get-endpoints) | List all available endpoints | + +### insight + +| Method | URI | Name | Summary | +| ------ | ----------------------------- | --------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------- | +| GET | /rest-api/v1/insight/audit | [get rest API v1 insight audit](#get-rest-api-v1-insight-audit) | Audit based on resource group. | +| GET | /rest-api/v1/insight/detail | [get rest API v1 insight detail](#get-rest-api-v1-insight-detail) | GetDetail returns a Kubernetes resource by name, namespace, cluster, apiVersion and kind. | +| GET | /rest-api/v1/insight/events | [get rest API v1 insight events](#get-rest-api-v1-insight-events) | GetEvents returns events for a Kubernetes resource by name, namespace, cluster, apiVersion and kind. | +| GET | /rest-api/v1/insight/score | [get rest API v1 insight score](#get-rest-api-v1-insight-score) | ScoreHandler calculates a score for the audited manifest. | +| GET | /rest-api/v1/insight/stats | [get rest API v1 insight stats](#get-rest-api-v1-insight-stats) | Get returns a global statistics info. | +| GET | /rest-api/v1/insight/summary | [get rest API v1 insight summary](#get-rest-api-v1-insight-summary) | Get returns a Kubernetes resource summary by name, namespace, cluster, apiVersion and kind. | +| GET | /rest-api/v1/insight/topology | [get rest API v1 insight topology](#get-rest-api-v1-insight-topology) | GetTopology returns a topology map for a Kubernetes resource by name, namespace, cluster, apiVersion and kind. | + +### resourcegroup + +| Method | URI | Name | Summary | +| ------ | ---------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | ------------------------------------------- | +| GET | /rest-api/v1/resource-groups/{resourceGroupRuleName} | [get rest API v1 resource groups resource group rule name](#get-rest-api-v1-resource-groups-resource-group-rule-name) | List lists all ResourceGroups by rule name. | + +### resourcegrouprule + +| Method | URI | Name | Summary | +| ------ | -------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------ | +| DELETE | /rest-api/v1/resource-group-rule/{resourceGroupRuleName} | [delete rest API v1 resource group rule resource group rule name](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name) | Delete removes a ResourceGroupRule by name. | +| GET | /rest-api/v1/resource-group-rule/{resourceGroupRuleName} | [get rest API v1 resource group rule resource group rule name](#get-rest-api-v1-resource-group-rule-resource-group-rule-name) | Get returns a ResourceGroupRule by name. | +| GET | /rest-api/v1/resource-group-rules | [get rest API v1 resource group rules](#get-rest-api-v1-resource-group-rules) | List lists all ResourceGroupRules. | +| POST | /rest-api/v1/resource-group-rule | [post rest API v1 resource group rule](#post-rest-api-v1-resource-group-rule) | Create creates a ResourceGroupRule. | +| PUT | /rest-api/v1/resource-group-rule | [put rest API v1 resource group rule](#put-rest-api-v1-resource-group-rule) | Update updates the ResourceGroupRule metadata by name. | + +### search + +| Method | URI | Name | Summary | +| ------ | ------------------- | ------------------------------------------------- | ----------------------------------------------------------------------------------------------------- | +| GET | /rest-api/v1/search | [get rest API v1 search](#get-rest-api-v1-search) | SearchForResource returns an array of Kubernetes runtime Object matched using the query from context. | + +## Paths + +### Delete removes a cluster resource by name. (*DeleteRestAPIV1ClusterClusterName*) + +``` +DELETE /rest-api/v1/cluster/{clusterName} +``` + +This endpoint deletes the cluster resource by name. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ----------- | ------ | ------ | -------- | --------- | :------: | ------- | ----------------------- | +| clusterName | `path` | string | `string` | | ✓ | | The name of the cluster | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| --------------------------------------------------- | --------------------- | --------------------- | :---------: | ------------------------------------------------------------- | +| [200](#delete-rest-api-v1-cluster-cluster-name-200) | OK | Operation status | | [schema](#delete-rest-api-v1-cluster-cluster-name-200-schema) | +| [400](#delete-rest-api-v1-cluster-cluster-name-400) | Bad Request | Bad Request | | [schema](#delete-rest-api-v1-cluster-cluster-name-400-schema) | +| [401](#delete-rest-api-v1-cluster-cluster-name-401) | Unauthorized | Unauthorized | | [schema](#delete-rest-api-v1-cluster-cluster-name-401-schema) | +| [404](#delete-rest-api-v1-cluster-cluster-name-404) | Not Found | Not Found | | [schema](#delete-rest-api-v1-cluster-cluster-name-404-schema) | +| [405](#delete-rest-api-v1-cluster-cluster-name-405) | Method Not Allowed | Method Not Allowed | | [schema](#delete-rest-api-v1-cluster-cluster-name-405-schema) | +| [429](#delete-rest-api-v1-cluster-cluster-name-429) | Too Many Requests | Too Many Requests | | [schema](#delete-rest-api-v1-cluster-cluster-name-429-schema) | +| [500](#delete-rest-api-v1-cluster-cluster-name-500) | Internal Server Error | Internal Server Error | | [schema](#delete-rest-api-v1-cluster-cluster-name-500-schema) | + +#### Responses + +##### 200 - Operation status + +Status: OK + +###### Schema + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### Delete removes a ResourceGroupRule by name. (*DeleteRestAPIV1ResourceGroupRuleResourceGroupRuleName*) + +``` +DELETE /rest-api/v1/resource-group-rule/{resourceGroupRuleName} +``` + +This endpoint deletes the ResourceGroupRule by name. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| --------------------- | ------ | ------ | -------- | --------- | :------: | ------- | ----------------------------------- | +| resourceGroupRuleName | `path` | string | `string` | | ✓ | | The name of the resource group rule | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| --------------------------------------------------------------------------- | --------------------- | --------------------- | :---------: | ------------------------------------------------------------------------------------- | +| [200](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-200) | OK | Operation status | | [schema](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-200-schema) | +| [400](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-400) | Bad Request | Bad Request | | [schema](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-400-schema) | +| [401](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-401) | Unauthorized | Unauthorized | | [schema](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-401-schema) | +| [404](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-404) | Not Found | Not Found | | [schema](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-404-schema) | +| [405](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-405) | Method Not Allowed | Method Not Allowed | | [schema](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-405-schema) | +| [429](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-429) | Too Many Requests | Too Many Requests | | [schema](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-429-schema) | +| [500](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-500) | Internal Server Error | Internal Server Error | | [schema](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-500-schema) | + +#### Responses + +##### 200 - Operation status + +Status: OK + +###### Schema + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### List all available endpoints (*GetEndpoints*) + +``` +GET /endpoints +``` + +List all registered endpoints in the router + +#### Consumes + +* text/plain + +#### Produces + +* text/plain + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ------------------------- | ------ | ----------------------------- | :---------: | ----------------------------------- | +| [200](#get-endpoints-200) | OK | Endpoints listed successfully | | [schema](#get-endpoints-200-schema) | + +#### Responses + +##### 200 - Endpoints listed successfully + +Status: OK + +###### Schema + +### Get returns a cluster resource by name. (*GetRestAPIV1ClusterClusterName*) + +``` +GET /rest-api/v1/cluster/{clusterName} +``` + +This endpoint returns a cluster resource by name. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ----------- | ------- | ------ | -------- | --------- | :------: | ------- | -------------------------------------------------- | +| clusterName | `path` | string | `string` | | ✓ | | The name of the cluster | +| format | `query` | string | `string` | | | | The format of the response. Either in json or yaml | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ------------------------------------------------ | --------------------- | --------------------- | :---------: | ---------------------------------------------------------- | +| [200](#get-rest-api-v1-cluster-cluster-name-200) | OK | Unstructured object | | [schema](#get-rest-api-v1-cluster-cluster-name-200-schema) | +| [400](#get-rest-api-v1-cluster-cluster-name-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-cluster-cluster-name-400-schema) | +| [401](#get-rest-api-v1-cluster-cluster-name-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-cluster-cluster-name-401-schema) | +| [404](#get-rest-api-v1-cluster-cluster-name-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-cluster-cluster-name-404-schema) | +| [405](#get-rest-api-v1-cluster-cluster-name-405) | Method Not Allowed | Method Not Allowed | | [schema](#get-rest-api-v1-cluster-cluster-name-405-schema) | +| [429](#get-rest-api-v1-cluster-cluster-name-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-cluster-cluster-name-429-schema) | +| [500](#get-rest-api-v1-cluster-cluster-name-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-cluster-cluster-name-500-schema) | + +#### Responses + +##### 200 - Unstructured object + +Status: OK + +###### Schema + +[UnstructuredUnstructured](#unstructured-unstructured) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### List lists all cluster resources. (*GetRestAPIV1Clusters*) + +``` +GET /rest-api/v1/clusters +``` + +This endpoint lists all cluster resources. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ---------- | ------- | ------- | -------- | --------- | :------: | ------- | -------------------------------------------------------------- | +| descending | `query` | boolean | `bool` | | | | Whether to sort the list in descending order. Default to false | +| orderBy | `query` | string | `string` | | | | The order to list the cluster. Default to order by name | +| summary | `query` | boolean | `bool` | | | | Whether to display summary or not. Default to false | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ------------------------------------ | --------------------- | ----------------------- | :---------: | ---------------------------------------------- | +| [200](#get-rest-api-v1-clusters-200) | OK | List of cluster objects | | [schema](#get-rest-api-v1-clusters-200-schema) | +| [400](#get-rest-api-v1-clusters-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-clusters-400-schema) | +| [401](#get-rest-api-v1-clusters-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-clusters-401-schema) | +| [404](#get-rest-api-v1-clusters-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-clusters-404-schema) | +| [405](#get-rest-api-v1-clusters-405) | Method Not Allowed | Method Not Allowed | | [schema](#get-rest-api-v1-clusters-405-schema) | +| [429](#get-rest-api-v1-clusters-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-clusters-429-schema) | +| [500](#get-rest-api-v1-clusters-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-clusters-500-schema) | + +#### Responses + +##### 200 - List of cluster objects + +Status: OK + +###### Schema + +[][UnstructuredUnstructured](#unstructured-unstructured) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### Audit based on resource group. (*GetRestAPIV1InsightAudit*) + +``` +GET /rest-api/v1/insight/audit +``` + +This endpoint audits based on the specified resource group. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ---------- | ------- | ------- | -------- | --------- | :------: | ------- | ----------------------------------------------------- | +| apiVersion | `query` | string | `string` | | | | The specified apiVersion, such as 'apps/v1' | +| cluster | `query` | string | `string` | | | | The specified cluster name, such as 'example-cluster' | +| forceNew | `query` | boolean | `bool` | | | | Switch for forced scanning, default is 'false' | +| kind | `query` | string | `string` | | | | The specified kind, such as 'Deployment' | +| name | `query` | string | `string` | | | | The specified resource name, such as 'foo' | +| namespace | `query` | string | `string` | | | | The specified namespace, such as 'default' | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ----------------------------------------- | --------------------- | --------------------- | :---------: | --------------------------------------------------- | +| [200](#get-rest-api-v1-insight-audit-200) | OK | Audit results | | [schema](#get-rest-api-v1-insight-audit-200-schema) | +| [400](#get-rest-api-v1-insight-audit-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-insight-audit-400-schema) | +| [401](#get-rest-api-v1-insight-audit-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-insight-audit-401-schema) | +| [404](#get-rest-api-v1-insight-audit-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-insight-audit-404-schema) | +| [429](#get-rest-api-v1-insight-audit-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-insight-audit-429-schema) | +| [500](#get-rest-api-v1-insight-audit-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-insight-audit-500-schema) | + +#### Responses + +##### 200 - Audit results + +Status: OK + +###### Schema + +[ScannerAuditData](#scanner-audit-data) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### GetDetail returns a Kubernetes resource by name, namespace, cluster, apiVersion and kind. (*GetRestAPIV1InsightDetail*) + +``` +GET /rest-api/v1/insight/detail +``` + +This endpoint returns a Kubernetes resource by name, namespace, cluster, apiVersion and kind. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ---------- | ------- | ------ | -------- | --------- | :------: | ------- | ---------------------------------------------------------------------- | +| apiVersion | `query` | string | `string` | | | | The specified apiVersion, such as 'apps/v1'. Should be percent-encoded | +| cluster | `query` | string | `string` | | | | The specified cluster name, such as 'example-cluster' | +| format | `query` | string | `string` | | | | The format of the response. Either in json or yaml. Default to json | +| kind | `query` | string | `string` | | | | The specified kind, such as 'Deployment' | +| name | `query` | string | `string` | | | | The specified resource name, such as 'foo' | +| namespace | `query` | string | `string` | | | | The specified namespace, such as 'default' | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ------------------------------------------ | --------------------- | --------------------- | :---------: | ---------------------------------------------------- | +| [200](#get-rest-api-v1-insight-detail-200) | OK | Unstructured object | | [schema](#get-rest-api-v1-insight-detail-200-schema) | +| [400](#get-rest-api-v1-insight-detail-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-insight-detail-400-schema) | +| [401](#get-rest-api-v1-insight-detail-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-insight-detail-401-schema) | +| [404](#get-rest-api-v1-insight-detail-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-insight-detail-404-schema) | +| [405](#get-rest-api-v1-insight-detail-405) | Method Not Allowed | Method Not Allowed | | [schema](#get-rest-api-v1-insight-detail-405-schema) | +| [429](#get-rest-api-v1-insight-detail-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-insight-detail-429-schema) | +| [500](#get-rest-api-v1-insight-detail-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-insight-detail-500-schema) | + +#### Responses + +##### 200 - Unstructured object + +Status: OK + +###### Schema + +[UnstructuredUnstructured](#unstructured-unstructured) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### GetEvents returns events for a Kubernetes resource by name, namespace, cluster, apiVersion and kind. (*GetRestAPIV1InsightEvents*) + +``` +GET /rest-api/v1/insight/events +``` + +This endpoint returns events for a Kubernetes resource YAML by name, namespace, cluster, apiVersion and kind. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ---------- | ------- | ------ | -------- | --------- | :------: | ------- | ---------------------------------------------------------------------- | +| apiVersion | `query` | string | `string` | | | | The specified apiVersion, such as 'apps/v1'. Should be percent-encoded | +| cluster | `query` | string | `string` | | | | The specified cluster name, such as 'example-cluster' | +| kind | `query` | string | `string` | | | | The specified kind, such as 'Deployment' | +| name | `query` | string | `string` | | | | The specified resource name, such as 'foo' | +| namespace | `query` | string | `string` | | | | The specified namespace, such as 'default' | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ------------------------------------------ | --------------------- | --------------------- | :---------: | ---------------------------------------------------- | +| [200](#get-rest-api-v1-insight-events-200) | OK | List of events | | [schema](#get-rest-api-v1-insight-events-200-schema) | +| [400](#get-rest-api-v1-insight-events-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-insight-events-400-schema) | +| [401](#get-rest-api-v1-insight-events-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-insight-events-401-schema) | +| [404](#get-rest-api-v1-insight-events-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-insight-events-404-schema) | +| [405](#get-rest-api-v1-insight-events-405) | Method Not Allowed | Method Not Allowed | | [schema](#get-rest-api-v1-insight-events-405-schema) | +| [429](#get-rest-api-v1-insight-events-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-insight-events-429-schema) | +| [500](#get-rest-api-v1-insight-events-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-insight-events-500-schema) | + +#### Responses + +##### 200 - List of events + +Status: OK + +###### Schema + +[][UnstructuredUnstructured](#unstructured-unstructured) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### ScoreHandler calculates a score for the audited manifest. (*GetRestAPIV1InsightScore*) + +``` +GET /rest-api/v1/insight/score +``` + +This endpoint calculates a score for the provided manifest based on the number and severity of issues detected during the audit. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ---------- | ------- | ------- | -------- | --------- | :------: | ------- | ----------------------------------------------------- | +| apiVersion | `query` | string | `string` | | | | The specified apiVersion, such as 'apps/v1' | +| cluster | `query` | string | `string` | | | | The specified cluster name, such as 'example-cluster' | +| forceNew | `query` | boolean | `bool` | | | | Switch for forced compute score, default is 'false' | +| kind | `query` | string | `string` | | | | The specified kind, such as 'Deployment' | +| name | `query` | string | `string` | | | | The specified resource name, such as 'foo' | +| namespace | `query` | string | `string` | | | | The specified namespace, such as 'default' | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ----------------------------------------- | --------------------- | ------------------------ | :---------: | --------------------------------------------------- | +| [200](#get-rest-api-v1-insight-score-200) | OK | Score calculation result | | [schema](#get-rest-api-v1-insight-score-200-schema) | +| [400](#get-rest-api-v1-insight-score-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-insight-score-400-schema) | +| [401](#get-rest-api-v1-insight-score-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-insight-score-401-schema) | +| [404](#get-rest-api-v1-insight-score-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-insight-score-404-schema) | +| [429](#get-rest-api-v1-insight-score-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-insight-score-429-schema) | +| [500](#get-rest-api-v1-insight-score-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-insight-score-500-schema) | + +#### Responses + +##### 200 - Score calculation result + +Status: OK + +###### Schema + +[InsightScoreData](#insight-score-data) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### Get returns a global statistics info. (*GetRestAPIV1InsightStats*) + +``` +GET /rest-api/v1/insight/stats +``` + +This endpoint returns a global statistics info. + +#### Produces + +* application/json + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ----------------------------------------- | --------------------- | ---------------------- | :---------: | --------------------------------------------------- | +| [200](#get-rest-api-v1-insight-stats-200) | OK | Global statistics info | | [schema](#get-rest-api-v1-insight-stats-200-schema) | +| [400](#get-rest-api-v1-insight-stats-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-insight-stats-400-schema) | +| [401](#get-rest-api-v1-insight-stats-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-insight-stats-401-schema) | +| [404](#get-rest-api-v1-insight-stats-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-insight-stats-404-schema) | +| [405](#get-rest-api-v1-insight-stats-405) | Method Not Allowed | Method Not Allowed | | [schema](#get-rest-api-v1-insight-stats-405-schema) | +| [429](#get-rest-api-v1-insight-stats-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-insight-stats-429-schema) | +| [500](#get-rest-api-v1-insight-stats-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-insight-stats-500-schema) | + +#### Responses + +##### 200 - Global statistics info + +Status: OK + +###### Schema + +[InsightStatistics](#insight-statistics) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### Get returns a Kubernetes resource summary by name, namespace, cluster, apiVersion and kind. (*GetRestAPIV1InsightSummary*) + +``` +GET /rest-api/v1/insight/summary +``` + +This endpoint returns a Kubernetes resource summary by name, namespace, cluster, apiVersion and kind. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ---------- | ------- | ------ | -------- | --------- | :------: | ------- | ---------------------------------------------------------------------- | +| apiVersion | `query` | string | `string` | | | | The specified apiVersion, such as 'apps/v1'. Should be percent-encoded | +| cluster | `query` | string | `string` | | | | The specified cluster name, such as 'example-cluster' | +| kind | `query` | string | `string` | | | | The specified kind, such as 'Deployment' | +| name | `query` | string | `string` | | | | The specified resource name, such as 'foo' | +| namespace | `query` | string | `string` | | | | The specified namespace, such as 'default' | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ------------------------------------------- | --------------------- | --------------------- | :---------: | ----------------------------------------------------- | +| [200](#get-rest-api-v1-insight-summary-200) | OK | Resource Summary | | [schema](#get-rest-api-v1-insight-summary-200-schema) | +| [400](#get-rest-api-v1-insight-summary-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-insight-summary-400-schema) | +| [401](#get-rest-api-v1-insight-summary-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-insight-summary-401-schema) | +| [404](#get-rest-api-v1-insight-summary-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-insight-summary-404-schema) | +| [405](#get-rest-api-v1-insight-summary-405) | Method Not Allowed | Method Not Allowed | | [schema](#get-rest-api-v1-insight-summary-405-schema) | +| [429](#get-rest-api-v1-insight-summary-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-insight-summary-429-schema) | +| [500](#get-rest-api-v1-insight-summary-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-insight-summary-500-schema) | + +#### Responses + +##### 200 - Resource Summary + +Status: OK + +###### Schema + +[InsightResourceSummary](#insight-resource-summary) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### GetTopology returns a topology map for a Kubernetes resource by name, namespace, cluster, apiVersion and kind. (*GetRestAPIV1InsightTopology*) + +``` +GET /rest-api/v1/insight/topology +``` + +This endpoint returns a topology map for a Kubernetes resource by name, namespace, cluster, apiVersion and kind. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ---------- | ------- | ------- | -------- | --------- | :------: | ------- | ---------------------------------------------------------------------- | +| apiVersion | `query` | string | `string` | | | | The specified apiVersion, such as 'apps/v1'. Should be percent-encoded | +| cluster | `query` | string | `string` | | | | The specified cluster name, such as 'example-cluster' | +| forceNew | `query` | boolean | `bool` | | | | Force re-generating the topology, default is 'false' | +| kind | `query` | string | `string` | | | | The specified kind, such as 'Deployment' | +| name | `query` | string | `string` | | | | The specified resource name, such as 'foo' | +| namespace | `query` | string | `string` | | | | The specified namespace, such as 'default' | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| -------------------------------------------- | --------------------- | -------------------------------------------- | :---------: | ------------------------------------------------------ | +| [200](#get-rest-api-v1-insight-topology-200) | OK | map from string to resource.ResourceTopology | | [schema](#get-rest-api-v1-insight-topology-200-schema) | +| [400](#get-rest-api-v1-insight-topology-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-insight-topology-400-schema) | +| [401](#get-rest-api-v1-insight-topology-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-insight-topology-401-schema) | +| [404](#get-rest-api-v1-insight-topology-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-insight-topology-404-schema) | +| [405](#get-rest-api-v1-insight-topology-405) | Method Not Allowed | Method Not Allowed | | [schema](#get-rest-api-v1-insight-topology-405-schema) | +| [429](#get-rest-api-v1-insight-topology-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-insight-topology-429-schema) | +| [500](#get-rest-api-v1-insight-topology-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-insight-topology-500-schema) | + +#### Responses + +##### 200 - map from string to resource.ResourceTopology + +Status: OK + +###### Schema + +map of [InsightResourceTopology](#insight-resource-topology) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### Get returns a ResourceGroupRule by name. (*GetRestAPIV1ResourceGroupRuleResourceGroupRuleName*) + +``` +GET /rest-api/v1/resource-group-rule/{resourceGroupRuleName} +``` + +This endpoint returns a ResourceGroupRule by name. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| --------------------- | ------ | ------ | -------- | --------- | :------: | ------- | ----------------------------------- | +| resourceGroupRuleName | `path` | string | `string` | | ✓ | | The name of the resource group rule | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ------------------------------------------------------------------------ | --------------------- | --------------------- | :---------: | ---------------------------------------------------------------------------------- | +| [200](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-200) | OK | Unstructured object | | [schema](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-200-schema) | +| [400](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-400-schema) | +| [401](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-401-schema) | +| [404](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-404-schema) | +| [405](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-405) | Method Not Allowed | Method Not Allowed | | [schema](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-405-schema) | +| [429](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-429-schema) | +| [500](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-500-schema) | + +#### Responses + +##### 200 - Unstructured object + +Status: OK + +###### Schema + +[UnstructuredUnstructured](#unstructured-unstructured) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### List lists all ResourceGroupRules. (*GetRestAPIV1ResourceGroupRules*) + +``` +GET /rest-api/v1/resource-group-rules +``` + +This endpoint lists all ResourceGroupRules. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ---------- | ------- | ------- | -------- | --------- | :------: | ------- | ----------------------------------------------------------------- | +| descending | `query` | boolean | `bool` | | | | Whether to sort the list in descending order. Default to false | +| orderBy | `query` | string | `string` | | | | The order to list the resourceGroupRule. Default to order by name | +| summary | `query` | boolean | `bool` | | | | Whether to display summary or not. Default to false | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ------------------------------------------------ | --------------------- | --------------------------------- | :---------: | ---------------------------------------------------------- | +| [200](#get-rest-api-v1-resource-group-rules-200) | OK | List of resourceGroupRule objects | | [schema](#get-rest-api-v1-resource-group-rules-200-schema) | +| [400](#get-rest-api-v1-resource-group-rules-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-resource-group-rules-400-schema) | +| [401](#get-rest-api-v1-resource-group-rules-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-resource-group-rules-401-schema) | +| [404](#get-rest-api-v1-resource-group-rules-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-resource-group-rules-404-schema) | +| [405](#get-rest-api-v1-resource-group-rules-405) | Method Not Allowed | Method Not Allowed | | [schema](#get-rest-api-v1-resource-group-rules-405-schema) | +| [429](#get-rest-api-v1-resource-group-rules-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-resource-group-rules-429-schema) | +| [500](#get-rest-api-v1-resource-group-rules-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-resource-group-rules-500-schema) | + +#### Responses + +##### 200 - List of resourceGroupRule objects + +Status: OK + +###### Schema + +[][UnstructuredUnstructured](#unstructured-unstructured) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### List lists all ResourceGroups by rule name. (*GetRestAPIV1ResourceGroupsResourceGroupRuleName*) + +``` +GET /rest-api/v1/resource-groups/{resourceGroupRuleName} +``` + +This endpoint lists all ResourceGroups. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| --------------------- | ------ | ------ | -------- | --------- | :------: | ------- | ----------------------------------- | +| resourceGroupRuleName | `path` | string | `string` | | ✓ | | The name of the resource group rule | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| -------------------------------------------------------------------- | --------------------- | ----------------------------- | :---------: | ------------------------------------------------------------------------------ | +| [200](#get-rest-api-v1-resource-groups-resource-group-rule-name-200) | OK | List of resourceGroup objects | | [schema](#get-rest-api-v1-resource-groups-resource-group-rule-name-200-schema) | +| [400](#get-rest-api-v1-resource-groups-resource-group-rule-name-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-resource-groups-resource-group-rule-name-400-schema) | +| [401](#get-rest-api-v1-resource-groups-resource-group-rule-name-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-resource-groups-resource-group-rule-name-401-schema) | +| [404](#get-rest-api-v1-resource-groups-resource-group-rule-name-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-resource-groups-resource-group-rule-name-404-schema) | +| [405](#get-rest-api-v1-resource-groups-resource-group-rule-name-405) | Method Not Allowed | Method Not Allowed | | [schema](#get-rest-api-v1-resource-groups-resource-group-rule-name-405-schema) | +| [429](#get-rest-api-v1-resource-groups-resource-group-rule-name-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-resource-groups-resource-group-rule-name-429-schema) | +| [500](#get-rest-api-v1-resource-groups-resource-group-rule-name-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-resource-groups-resource-group-rule-name-500-schema) | + +#### Responses + +##### 200 - List of resourceGroup objects + +Status: OK + +###### Schema + +[][UnstructuredUnstructured](#unstructured-unstructured) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### SearchForResource returns an array of Kubernetes runtime Object matched using the query from context. (*GetRestAPIV1Search*) + +``` +GET /rest-api/v1/search +``` + +This endpoint returns an array of Kubernetes runtime Object matched using the query from context. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| -------- | ------- | ------ | -------- | --------- | :------: | ------- | ------------------------------------------------------ | +| page | `query` | string | `string` | | | | The current page to fetch. Default to 1 | +| pageSize | `query` | string | `string` | | | | The size of the page. Default to 10 | +| pattern | `query` | string | `string` | | ✓ | | The search pattern. Can be either sql or dsl. Required | +| query | `query` | string | `string` | | ✓ | | The query to use for search. Required | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ---------------------------------- | --------------------- | ----------------------- | :---------: | -------------------------------------------- | +| [200](#get-rest-api-v1-search-200) | OK | Array of runtime.Object | | [schema](#get-rest-api-v1-search-200-schema) | +| [400](#get-rest-api-v1-search-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-search-400-schema) | +| [401](#get-rest-api-v1-search-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-search-401-schema) | +| [404](#get-rest-api-v1-search-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-search-404-schema) | +| [405](#get-rest-api-v1-search-405) | Method Not Allowed | Method Not Allowed | | [schema](#get-rest-api-v1-search-405-schema) | +| [429](#get-rest-api-v1-search-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-search-429-schema) | +| [500](#get-rest-api-v1-search-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-search-500-schema) | + +#### Responses + +##### 200 - Array of runtime.Object + +Status: OK + +###### Schema + +[][interface{}](#interface) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### Create creates a cluster resource. (*PostRestAPIV1ClusterClusterName*) + +``` +POST /rest-api/v1/cluster/{clusterName} +``` + +This endpoint creates a new cluster resource using the payload. + +#### Consumes + +* application/json +* text/plain + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ----------- | ------ | ------------------------------------------------- | ------------------------------ | --------- | :------: | ------- | ---------------------------------------------------- | +| clusterName | `path` | string | `string` | | ✓ | | The name of the cluster | +| request | `body` | [ClusterClusterPayload](#cluster-cluster-payload) | `models.ClusterClusterPayload` | | ✓ | | cluster to create (either plain text or JSON format) | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ------------------------------------------------- | --------------------- | --------------------- | :---------: | ----------------------------------------------------------- | +| [200](#post-rest-api-v1-cluster-cluster-name-200) | OK | Unstructured object | | [schema](#post-rest-api-v1-cluster-cluster-name-200-schema) | +| [400](#post-rest-api-v1-cluster-cluster-name-400) | Bad Request | Bad Request | | [schema](#post-rest-api-v1-cluster-cluster-name-400-schema) | +| [401](#post-rest-api-v1-cluster-cluster-name-401) | Unauthorized | Unauthorized | | [schema](#post-rest-api-v1-cluster-cluster-name-401-schema) | +| [404](#post-rest-api-v1-cluster-cluster-name-404) | Not Found | Not Found | | [schema](#post-rest-api-v1-cluster-cluster-name-404-schema) | +| [405](#post-rest-api-v1-cluster-cluster-name-405) | Method Not Allowed | Method Not Allowed | | [schema](#post-rest-api-v1-cluster-cluster-name-405-schema) | +| [429](#post-rest-api-v1-cluster-cluster-name-429) | Too Many Requests | Too Many Requests | | [schema](#post-rest-api-v1-cluster-cluster-name-429-schema) | +| [500](#post-rest-api-v1-cluster-cluster-name-500) | Internal Server Error | Internal Server Error | | [schema](#post-rest-api-v1-cluster-cluster-name-500-schema) | + +#### Responses + +##### 200 - Unstructured object + +Status: OK + +###### Schema + +[UnstructuredUnstructured](#unstructured-unstructured) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### Upload kubeConfig file for cluster (*PostRestAPIV1ClusterConfigFile*) + +``` +POST /rest-api/v1/cluster/config/file +``` + +Uploads a KubeConfig file for cluster, with a maximum size of 2MB. + +#### Consumes + +* multipart/form-data + +#### Produces + +* text/plain + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ----------- | ---------- | ------ | --------------- | --------- | :------: | ------- | ---------------------------------- | +| description | `formData` | string | `string` | | ✓ | | cluster description | +| displayName | `formData` | string | `string` | | ✓ | | cluster display name | +| file | `formData` | file | `io.ReadCloser` | | ✓ | | Upload file with field name 'file' | +| name | `formData` | string | `string` | | ✓ | | cluster name | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ------------------------------------------------ | --------------------- | --------------------------------------------------------- | :---------: | ---------------------------------------------------------- | +| [200](#post-rest-api-v1-cluster-config-file-200) | OK | Returns the content of the uploaded KubeConfig file. | | [schema](#post-rest-api-v1-cluster-config-file-200-schema) | +| [400](#post-rest-api-v1-cluster-config-file-400) | Bad Request | The uploaded file is too large or the request is invalid. | | [schema](#post-rest-api-v1-cluster-config-file-400-schema) | +| [500](#post-rest-api-v1-cluster-config-file-500) | Internal Server Error | Internal server error. | | [schema](#post-rest-api-v1-cluster-config-file-500-schema) | + +#### Responses + +##### 200 - Returns the content of the uploaded KubeConfig file. + +Status: OK + +###### Schema + +[ClusterUploadData](#cluster-upload-data) + +##### 400 - The uploaded file is too large or the request is invalid. + +Status: Bad Request + +###### Schema + +##### 500 - Internal server error. + +Status: Internal Server Error + +###### Schema + +### Validate KubeConfig (*PostRestAPIV1ClusterConfigValidate*) + +``` +POST /rest-api/v1/cluster/config/validate +``` + +Validates the provided KubeConfig using cluster manager methods. + +#### Consumes + +* application/json +* text/plain + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ------- | ------ | --------------------------------------------------- | ------------------------------- | --------- | :------: | ------- | ------------------------------ | +| request | `body` | [ClusterValidatePayload](#cluster-validate-payload) | `models.ClusterValidatePayload` | | ✓ | | KubeConfig payload to validate | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ---------------------------------------------------- | --------------------- | ---------------------------------- | :---------: | -------------------------------------------------------------- | +| [200](#post-rest-api-v1-cluster-config-validate-200) | OK | Verification passed server version | | [schema](#post-rest-api-v1-cluster-config-validate-200-schema) | +| [400](#post-rest-api-v1-cluster-config-validate-400) | Bad Request | Bad Request | | [schema](#post-rest-api-v1-cluster-config-validate-400-schema) | +| [401](#post-rest-api-v1-cluster-config-validate-401) | Unauthorized | Unauthorized | | [schema](#post-rest-api-v1-cluster-config-validate-401-schema) | +| [404](#post-rest-api-v1-cluster-config-validate-404) | Not Found | Not Found | | [schema](#post-rest-api-v1-cluster-config-validate-404-schema) | +| [429](#post-rest-api-v1-cluster-config-validate-429) | Too Many Requests | Too Many Requests | | [schema](#post-rest-api-v1-cluster-config-validate-429-schema) | +| [500](#post-rest-api-v1-cluster-config-validate-500) | Internal Server Error | Internal Server Error | | [schema](#post-rest-api-v1-cluster-config-validate-500-schema) | + +#### Responses + +##### 200 - Verification passed server version + +Status: OK + +###### Schema + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### Create creates a ResourceGroupRule. (*PostRestAPIV1ResourceGroupRule*) + +``` +POST /rest-api/v1/resource-group-rule +``` + +This endpoint creates a new ResourceGroupRule using the payload. + +#### Consumes + +* application/json +* text/plain + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ------- | ------ | ------------------------------------------------------------------------------------------- | -------------------------------------------------- | --------- | :------: | ------- | -------------------------------------------------------------- | +| request | `body` | [ResourcegroupruleResourceGroupRulePayload](#resourcegrouprule-resource-group-rule-payload) | `models.ResourcegroupruleResourceGroupRulePayload` | | ✓ | | resourceGroupRule to create (either plain text or JSON format) | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ------------------------------------------------ | --------------------- | --------------------- | :---------: | ---------------------------------------------------------- | +| [200](#post-rest-api-v1-resource-group-rule-200) | OK | Unstructured object | | [schema](#post-rest-api-v1-resource-group-rule-200-schema) | +| [400](#post-rest-api-v1-resource-group-rule-400) | Bad Request | Bad Request | | [schema](#post-rest-api-v1-resource-group-rule-400-schema) | +| [401](#post-rest-api-v1-resource-group-rule-401) | Unauthorized | Unauthorized | | [schema](#post-rest-api-v1-resource-group-rule-401-schema) | +| [404](#post-rest-api-v1-resource-group-rule-404) | Not Found | Not Found | | [schema](#post-rest-api-v1-resource-group-rule-404-schema) | +| [405](#post-rest-api-v1-resource-group-rule-405) | Method Not Allowed | Method Not Allowed | | [schema](#post-rest-api-v1-resource-group-rule-405-schema) | +| [429](#post-rest-api-v1-resource-group-rule-429) | Too Many Requests | Too Many Requests | | [schema](#post-rest-api-v1-resource-group-rule-429-schema) | +| [500](#post-rest-api-v1-resource-group-rule-500) | Internal Server Error | Internal Server Error | | [schema](#post-rest-api-v1-resource-group-rule-500-schema) | + +#### Responses + +##### 200 - Unstructured object + +Status: OK + +###### Schema + +[UnstructuredUnstructured](#unstructured-unstructured) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### Update updates the cluster metadata by name. (*PutRestAPIV1ClusterClusterName*) + +``` +PUT /rest-api/v1/cluster/{clusterName} +``` + +This endpoint updates the display name and description of an existing cluster resource. + +#### Consumes + +* application/json +* text/plain + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ----------- | ------ | ------------------------------------------------- | ------------------------------ | --------- | :------: | ------- | ---------------------------------------------------- | +| clusterName | `path` | string | `string` | | ✓ | | The name of the cluster | +| request | `body` | [ClusterClusterPayload](#cluster-cluster-payload) | `models.ClusterClusterPayload` | | ✓ | | cluster to update (either plain text or JSON format) | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ------------------------------------------------ | --------------------- | --------------------- | :---------: | ---------------------------------------------------------- | +| [200](#put-rest-api-v1-cluster-cluster-name-200) | OK | Unstructured object | | [schema](#put-rest-api-v1-cluster-cluster-name-200-schema) | +| [400](#put-rest-api-v1-cluster-cluster-name-400) | Bad Request | Bad Request | | [schema](#put-rest-api-v1-cluster-cluster-name-400-schema) | +| [401](#put-rest-api-v1-cluster-cluster-name-401) | Unauthorized | Unauthorized | | [schema](#put-rest-api-v1-cluster-cluster-name-401-schema) | +| [404](#put-rest-api-v1-cluster-cluster-name-404) | Not Found | Not Found | | [schema](#put-rest-api-v1-cluster-cluster-name-404-schema) | +| [405](#put-rest-api-v1-cluster-cluster-name-405) | Method Not Allowed | Method Not Allowed | | [schema](#put-rest-api-v1-cluster-cluster-name-405-schema) | +| [429](#put-rest-api-v1-cluster-cluster-name-429) | Too Many Requests | Too Many Requests | | [schema](#put-rest-api-v1-cluster-cluster-name-429-schema) | +| [500](#put-rest-api-v1-cluster-cluster-name-500) | Internal Server Error | Internal Server Error | | [schema](#put-rest-api-v1-cluster-cluster-name-500-schema) | + +#### Responses + +##### 200 - Unstructured object + +Status: OK + +###### Schema + +[UnstructuredUnstructured](#unstructured-unstructured) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### Update updates the ResourceGroupRule metadata by name. (*PutRestAPIV1ResourceGroupRule*) + +``` +PUT /rest-api/v1/resource-group-rule +``` + +This endpoint updates the display name and description of an existing ResourceGroupRule. + +#### Consumes + +* application/json +* text/plain + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ------- | ------ | ------------------------------------------------------------------------------------------- | -------------------------------------------------- | --------- | :------: | ------- | -------------------------------------------------------------- | +| request | `body` | [ResourcegroupruleResourceGroupRulePayload](#resourcegrouprule-resource-group-rule-payload) | `models.ResourcegroupruleResourceGroupRulePayload` | | ✓ | | resourceGroupRule to update (either plain text or JSON format) | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ----------------------------------------------- | --------------------- | --------------------- | :---------: | --------------------------------------------------------- | +| [200](#put-rest-api-v1-resource-group-rule-200) | OK | Unstructured object | | [schema](#put-rest-api-v1-resource-group-rule-200-schema) | +| [400](#put-rest-api-v1-resource-group-rule-400) | Bad Request | Bad Request | | [schema](#put-rest-api-v1-resource-group-rule-400-schema) | +| [401](#put-rest-api-v1-resource-group-rule-401) | Unauthorized | Unauthorized | | [schema](#put-rest-api-v1-resource-group-rule-401-schema) | +| [404](#put-rest-api-v1-resource-group-rule-404) | Not Found | Not Found | | [schema](#put-rest-api-v1-resource-group-rule-404-schema) | +| [405](#put-rest-api-v1-resource-group-rule-405) | Method Not Allowed | Method Not Allowed | | [schema](#put-rest-api-v1-resource-group-rule-405-schema) | +| [429](#put-rest-api-v1-resource-group-rule-429) | Too Many Requests | Too Many Requests | | [schema](#put-rest-api-v1-resource-group-rule-429-schema) | +| [500](#put-rest-api-v1-resource-group-rule-500) | Internal Server Error | Internal Server Error | | [schema](#put-rest-api-v1-resource-group-rule-500-schema) | + +#### Responses + +##### 200 - Unstructured object + +Status: OK + +###### Schema + +[UnstructuredUnstructured](#unstructured-unstructured) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +## Models + +### cluster.ClusterPayload + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| ----------- | ------ | -------- | :------: | ------- | --------------------------------------------------------------- | ------- | +| description | string | `string` | | | ClusterDescription is the description of cluster to be created | | +| displayName | string | `string` | | | ClusterDisplayName is the display name of cluster to be created | | +| kubeconfig | string | `string` | | | ClusterKubeConfig is the kubeconfig of cluster to be created | | + +### cluster.UploadData + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| ----------------------- | ------- | -------- | :------: | ------- | ----------- | ------- | +| content | string | `string` | | | | | +| fileName | string | `string` | | | | | +| fileSize | integer | `int64` | | | | | +| sanitizedClusterContent | string | `string` | | | | | + +### cluster.ValidatePayload + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| ---------- | ------ | -------- | :------: | ------- | ----------- | ------- | +| kubeConfig | string | `string` | | | | | + +### entity.ResourceGroup + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| ----------- | ------------- | ------------------- | :------: | ------- | ----------- | ------- | +| annotations | map of string | `map[string]string` | | | | | +| apiVersion | string | `string` | | | | | +| cluster | string | `string` | | | | | +| kind | string | `string` | | | | | +| labels | map of string | `map[string]string` | | | | | +| name | string | `string` | | | | | +| namespace | string | `string` | | | | | + +### insight.ResourceSummary + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| ----------------- | --------------------------------------------- | --------------------- | :------: | ------- | ----------- | ------- | +| creationTimestamp | string | `string` | | | | | +| resource | [EntityResourceGroup](#entity-resource-group) | `EntityResourceGroup` | | | | | +| resourceVersion | string | `string` | | | | | +| uid | string | `string` | | | | | + +### insight.ResourceTopology + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| ------------- | --------------------------------------------- | --------------------- | :------: | ------- | ----------- | ------- | +| children | []string | `[]string` | | | | | +| parents | []string | `[]string` | | | | | +| resourceGroup | [EntityResourceGroup](#entity-resource-group) | `EntityResourceGroup` | | | | | + +### insight.ScoreData + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| ------------------------------------------------------------------------- | -------------- | ------------------ | :------: | ------- | ----------------------------------------------------------------------- | ------- | +| issuesTotal | integer | `int64` | | | IssuesTotal is the total count of all issues found during the audit. | | +| This count can be used to understand the overall number of problems | | | | | | | +| that need to be addressed. | | | | | | | +| resourceTotal | integer | `int64` | | | ResourceTotal is the count of unique resources audited during the scan. | | +| score | number | `float64` | | | Score represents the calculated score of the audited manifest based on | | +| the number and severity of issues. It provides a quantitative measure | | | | | | | +| of the security posture of the resources in the manifest. | | | | | | | +| severityStatistic | map of integer | `map[string]int64` | | | SeverityStatistic is a mapping of severity levels to their respective | | +| number of occurrences. It allows for a quick overview of the distribution | | | | | | | +| of issues across different severity categories. | | | | | | | + +### insight.Statistics + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| ---------------------- | ------- | ------- | :------: | ------- | ----------- | ------- | +| clusterCount | integer | `int64` | | | | | +| resourceCount | integer | `int64` | | | | | +| resourceGroupRuleCount | integer | `int64` | | | | | + +### resourcegrouprule.ResourceGroupRulePayload + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| ----------- | -------- | ---------- | :------: | ------- | ----------- | ------- | +| description | string | `string` | | | | | +| fields | []string | `[]string` | | | | | +| name | string | `string` | | | | | + +### scanner.AuditData + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| ------------- | ------------------------------------------- | ---------------------- | :------: | ------- | ----------- | ------- | +| bySeverity | map of integer | `map[string]int64` | | | | | +| issueGroups | [][ScannerIssueGroup](#scanner-issue-group) | `[]*ScannerIssueGroup` | | | | | +| issueTotal | integer | `int64` | | | | | +| resourceTotal | integer | `int64` | | | | | + +### scanner.Issue + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| -------- | ------- | -------- | :------: | ------- | ------------------------------------------------------------------------------------- | ------- | +| message | string | `string` | | | Message provides a detailed human-readable description of the issue. | | +| scanner | string | `string` | | | Scanner is the name of the scanner that discovered the issue. | | +| severity | integer | `int64` | | | Severity indicates how critical the issue is, using the IssueSeverityLevel constants. | | +| title | string | `string` | | | Title is a brief summary of the issue. | | + +### scanner.IssueGroup + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| -------------- | ----------------------------------------------- | ------------------------ | :------: | ------- | ----------- | ------- | +| issue | [ScannerIssue](#scanner-issue) | `ScannerIssue` | | | | | +| resourceGroups | [][EntityResourceGroup](#entity-resource-group) | `[]*EntityResourceGroup` | | | | | + +### unstructured.Unstructured + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| ---------------------- | ------------------------- | ------------- | :------: | ------- | -------------------------------------------------------------------------------- | ------- | +| object | [interface{}](#interface) | `interface{}` | | | Object is a JSON compatible map with string, float, int, bool, []interface{}, or | | +| map[string]interface{} | | | | | | | +| children. | | | | | | | diff --git a/docs/karpor/5-references/3-search-methods.md b/docs/karpor/5-references/3-search-methods.md new file mode 100644 index 00000000..e1e63903 --- /dev/null +++ b/docs/karpor/5-references/3-search-methods.md @@ -0,0 +1,109 @@ +--- +title: Search Methods +--- +Karpor is an open-source project that offers robust capabilities for searching resources across multiple clusters. This document outlines the two main search methods supported by Karpor: DSL (Domain Specific Language) and SQL (Structured Query Language), and explains how to utilize them for resource searches. + +## Keywords + +Karpor facilitates resource searches using two methods: DSL and SQL. Both methodologies leverage the following keywords for resource discovery: + +- cluster +- apiVersion +- kind +- namespace +- name +- creationTimestamp +- deletionTimestamp +- ownerReferences +- resourceVersion +- labels.`key` +- annotations.`key` +- content + +## SQL + +Karpor offers a SQL-like approach for querying Kubernetes resources, enabling users to employ SQL syntax for their searches. Below are examples illustrating the use of SQL syntax for various search scenarios: + +**Query resources of the Namespace kind** + +```sql +select * from resources where kind='Namespace' +``` + +**Query resources where the labels contain the key 'key1' with value 'value1'** + +```sql +select * from resources where labels.key1='value1' +``` + +**Query resources where the annotations contain the key 'key1' with value 'value1'** + +```sql +select * from resources where annotations.key1='value1' +``` + +**Query resources that are not of the Pod kind** + +```sql +select * from resources where kind!='Pod' +``` + +**Query resources of the Pod kind within a specific cluster** + +```sql +select * from resources where cluster='demo' and kind='Pod' +``` + +**Query resources of kind within a specified list** + +```sql +select * from resources where kind in ('pod','service') +``` + +**Query resources of kinds not within a specified list** + +```sql +select * from resources where kind not in ('pod','service') +``` + +**Query resources where the namespace starts with appl (where % represents any number of characters)** + +```sql +select * from resources where namespace like 'appl%' +``` + +**Query resources where the namespace contains banan (where \_ represents any single character)** + +```sql +select * from resources where namespace like 'banan_' +``` + +**Query resources where the namespace does not start with appl** + +```sql +select * from resources where namespace not like 'appl%' +``` + +**Query resources where the namespace does not contain banan** + +```sql +select * from resources where namespace notlike 'banan_' +``` + +**Query resources of kind Deployment and created before January 1, 2024, at 18:00:00** + +```sql +select * from resources where kind='Deployment' and creationTimestamp < '2024-01-01T18:00:00Z' +``` + +**Query resources of kind Service and order by creation timestamp in descending order** + +```sql +select * from resources where kind='Service' order by creationTimestamp desc +``` + +**Query resources whose content contains apple** + +```sql +select * from resources where contains(content, 'apple') +``` diff --git a/docs/karpor/5-references/_category_.json b/docs/karpor/5-references/_category_.json new file mode 100644 index 00000000..1fd07096 --- /dev/null +++ b/docs/karpor/5-references/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "References" +} diff --git a/docs/karpor/6-roadmap/README.md b/docs/karpor/6-roadmap/README.md new file mode 100644 index 00000000..b8cf11a3 --- /dev/null +++ b/docs/karpor/6-roadmap/README.md @@ -0,0 +1,18 @@ +--- +title: Roadmap +--- +Karpor is an emerging open-source project, and we are committed to diligently polishing it into a **small and beautiful, vendor-neutral, developer-friendly, community-driven** open-source project! 🚀 Moving forward, we will focus our efforts in the following areas: + +- Refine Karpor's **usability** to lower the barrier of entry and make it sufficiently "user-friendly." +- Strengthen Karpor's **reliability** to ensure it is dependable in production environments. +- Deepen the **ecosystem integration** with more community tools to ensure openness. +- Explore **AI + Karpor** to create more possibilities. +- Embrace the open-source community; we love the **open-source spirit**. If you're interested in open source, then start here! +- ...... + +Karpor follows the [Release Process and Cadence Guide](../4-developer-guide/2-conventions/1-release-process.md), but actions may not strictly adhere to the roadmap. We may adjust milestones based on feedback from community meetings and [GitHub issues](https://github.com/KusionStack/karpor/issues), expecting all community members to join the discussions. For final decisions, please refer to the [GitHub milestones](https://github.com/KusionStack/karpor/milestones). + +Below is the detailed roadmap, which we will continue to update ⬇️ + +- **2024 Roadmap**: [https://github.com/KusionStack/karpor/issues/273](https://github.com/KusionStack/karpor/issues/273) + diff --git a/docs/kuperator/concepts/_category_.json b/docs/kuperator/concepts/_category_.json new file mode 100644 index 00000000..1d3167d4 --- /dev/null +++ b/docs/kuperator/concepts/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Concepts", + "position": 3 +} diff --git a/docs/kuperator/concepts/podopslifecycle.md b/docs/kuperator/concepts/podopslifecycle.md new file mode 100644 index 00000000..88068133 --- /dev/null +++ b/docs/kuperator/concepts/podopslifecycle.md @@ -0,0 +1,232 @@ +--- +sidebar_position: 2 +--- + +# PodOpsLifecycle + +## Background + +Kubernetes provides a set of default controllers for workload management, such as StatefulSet, Deployment, and DaemonSet, which are responsible for Pod operations. +Meanwhile, application users may also have some services outside the Kubernetes cluster that are closely related to the Pod Lifecycle, including traffic routing, service discovery, or alert monitoring. +However, they face challenges in participating in the operational lifecycle of a Pod, even if they are connected to Kubernetes by developing a controller that watches the Pods. + +PodOpsLifecycle aims to offer Kubernetes administrators and developers finer-grained control over the entire lifecycle of a Pod. +It enables developers to execute necessary actions before, during, and after specific phases of a Pod operation. +For instance, removing the Pod's IP from the traffic route before initiating the Pod operation, performing the actual Pod operations, and adding it back after the Pod operation is completed to achieve a smooth and graceful Pod operation, and prevent any traffic loss. + +## Introduction + +In PodOpsLifecycle, participants are classified into two roles: `operation controllers` and `cooperation controllers`. +- **Operation controllers** are responsible for operating Pods, such as Deployments and StatefulSets from Kubernetes, and CollaSets from Kuperator which intend to scale, update, or recreate Pods. +- **Cooperation controllers** are sensitive with Pod status. They handle resources or configurations around Pods, which may include traffic controller, alert monitoring controller, etc. These controllers typically reconcile Kubernetes resources around Pods with external services, such as sync Pod IPs with the LB provider, or maintaining Pods' metadata with application monitoring system. + +The two types of controllers do not need to be aware of each other. All controllers are organized by PodOpsLifecycle. Additionally, KusionStack Kuperator introduces extra phases around the native Kubernetes Pod Lifecycle: ServiceAvailable, Preparing, and Completing. + +![pod-ops-lifecycle](/img/kuperator/concepts/podopslifecycle/pod-ops-lifecycle.png) + +- **Completing**: After a Pod is created or updated and becomes ready, Kuperator marks its PodOpsLifecycle as the `Completing` phase. During this phase, the Pod is in a ready condition, prompting cooperation controllers to perform actions such as registering the Pod IP in the traffic route. Once all cooperation controllers complete their tasks, Kuperator sets the PodOpsLifecycle to the `ServiceAvailable` phase. +- **ServiceAvailable**: This phase indicates that the Pod is in a normal state and ready to serve. If everything goes smoothly, the Pod remains in the `ServiceAvailable` phase until the next operation. +- **Preparing**: When an operation controller needs to operate the Pod, it triggers a new PodOpsLifecycle. The Pod then transitions from the `ServiceAvailable` phase to the `Preparing` phase. During this phase, the Pod is initially marked as Unready by setting ReadinessGate to false. All cooperation controllers then begin preparing tasks, such as removing the Pod's IP from the traffic route. After completing these tasks, the Pod enters the `Operating` phase. +- **Operating**: If a Pod enters the `Operating` phase, it is expected to accept any kind of operation without any damage, including recreation, scaling-in, upgrading, etc. Operation controllers are permitted to apply any changes to this Pod. Once all these operations are completed, the Pod advances to the next phase — `Completing`, and the PodOpsLifecycle continues. + +The PodOpsLifecycle detail and the relationship with Kubernetes native Pod Lifecycle is showed by following sequence diagram. + +![pod-ops-lifecycle-sequence-diagram](/img/kuperator/concepts/podopslifecycle/pod-ops-lifecycle-sequence-diagram.png) + +## Developer's Guide + +This section introduces how to develop operation controllers and cooperation controllers to interact with PodOpsLifecycle. +- The operation controller is responsible for a set of Pod operation tasks. KusionStack Kuperator has already provided various types of operation controllers. Users only need to develop a new operation controller if a new kind of Pod operation needs to be added. +- The cooperation controller participates in PodOpsLifecycle before and after operating on a Pod, such as the Traffic controller, alert monitoring controller, and other controllers responsible for maintaining the Pod and application status. Users should develop a new cooperation controller only when there is a new type of service or status around the Pod that needs to be maintained, such as integrating with a new traffic provider. + +### Operation Controller + +The operation controller is responsible for Pod operations. The tasks that an operation controller needs to perform during PodOpsLifecycle include triggering a PodOpsLifecycle, checking whether the Pod has entered the Operating phase, performing Pod operations, and marking Pod operations as finished. These actions interacting with PodOpsLifecycle are provided in the package `kusionstack.io/kuperator/pkg/controllers/utils/podopslifecycle/utils.go`. + +A simple operation controller reconcile method would look like this: + +```go +import ( + "context" + + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/client" + + "kusionstack.io/kuperator/pkg/controllers/utils/podopslifecycle" +) + +var operationAdapter = &OperationOpsLifecycleAdapter{} + +type OperationOpsLifecycleAdapter struct { +} + +// GetID indicates ID of the PodOpsLifecycle +func (a *OperationOpsLifecycleAdapter) GetID() string { + return "new-id" +} + +// GetType indicates type for this Operation Controller +func (a *OperationOpsLifecycleAdapter) GetType() podopslifecycle.OperationType { + return "new-type" +} + +// AllowMultiType indicates whether multiple IDs which have the same Type are allowed +func (a *OperationOpsLifecycleAdapter) AllowMultiType() bool { + return true +} + +// WhenBegin is a hook, which will be executed when begin a lifecycle +func (a *OperationOpsLifecycleAdapter) WhenBegin(pod client.Object) (bool, error) { + return false, nil +} + +// WhenFinish is a hook, which will be executed when finish a lifecycle +func (a *OperationOpsLifecycleAdapter) WhenFinish(pod client.Object) (bool, error) { + return false, nil +} + +...... +func (r *PodOperationReconciler) Reconcile(ctx context.Context, req reconcile.Request) (ctrl.Result, error) { + // get the Pod + pod := &corev1.Pod{} + if err := r.Get(ctx, req.NamespacedName, pod); err != nil { + if !errors.IsNotFound(err) { + return reconcile.Result{}, err + } + return reconcile.Result{}, nil + } + + // check if the Pod needs operation + if !r.needOperation(pod) { + return reconcile.Result{}, nil + } + + // if PodOpsLifecycle has not been triggered, trigger it + if !podopslifecycle.IsDuringOps(OpsLifecycleAdapter, pod) { + if updated, err := podopslifecycle.Begin(r, operationAdapter, pod); err != nil { + return reconcile.Result{}, err + } else if updated { + return reconcile.Result{}, nil + } + } + + // waiting until Pod enters operating phase + if _, allowed := podopslifecycle.AllowOps(operationAdapter, 0, pod); !allowed { + return reconcile.Result{}, nil + } + + // do operation works + if completed := r.doPodOperation(pod); !completed { + return reconcile.Result{}, nil + } + + // after operation works completed, finish operating phase to continue PodOpsLifecycle + if _, err := podopslifecycle.Finish(r, operationAdapter, pod); err != nil { + return reconcile.Result{}, err + } +} +``` + +### Pod Cooperation Controller + +There are two ways to develop a cooperation controller. +One way is to develop a controller using the controller runtime and adhering to some conventions of PodOpsLifecycle and Kubernetes. +Another way is to take the use of [ResourceConsist](https://github.com/KusionStack/resourceconsist) framework provided by KusionStack, which can be referenced from its [documentation](https://www.kusionstack.io/docs/kuperator/manuals/resourceconsist). + +The following outlines the first approach. + +```go +import ( + "context" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + k8spod "k8s.io/kubernetes/pkg/api/v1/pod/util.go" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + appsv1alpha1 "kusionstack.io/kuperator/apis/apps/v1alpha1" +) + +const ( + // Finalizer needs to have prefix: `prot.podopslifecycle.kusionstack.io`. + // KusionStack Kuperator keeps this prefix back-compatible, + // so that it can be hard code to decouple with KusionStack Kuperator. + finalizerPrefix = appsv1alpha1.PodOperationProtectionFinalizerPrefix + + protectionFinalizer = finalizerPrefix + "/" + "unique-id" +) + +...... +func (r *PodResourceReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { + // get the Pod + pod := &corev1.Pod{} + if err := r.Get(ctx, req.NamespacedName, pod); err != nil { + if !errors.IsNotFound(err) { + return reconcile.Result{}, err + } + return reconcile.Result{}, nil + } + + if k8spod.IsPodReady(pod) { + // do resource reconcile like add Pod IP to traffic route + r.trafficOn(pod.status.PodIP) + // It is important to add a unique finalizer on this Pod + return reconcile.Result{}, r.addFinalizer(ctx, pod, protectionFinalizer) + } + + if !k8spod.IsPodReady(pod) { + // do resource reconcile like remove Pod IP from traffic route + r.trafficOff(pod.status.PodIP) + // It is important to remove the unique finalizer from this Pod + return reconcile.Result{}, r.removeFinalizer(ctx, pod, protectionFinalizer) + } +} + +func (r *PodResourceReconciler) addFinalizer(ctx context.Context, pod *corev1.Pod, finalizer string) error { + if controllerutil.ContainsFinalizer(pod, finalizer) { + return nil + } + + controllerutil.AddFinalizer(pod, finalizer) + return r.Update(ctx, pod) +} + +func (r *PodResourceReconciler) removeFinalizer(ctx context.Context, pod *corev1.Pod, finalizer string) error { + if !controllerutil.ContainsFinalizer(pod, finalizer) { + return nil + } + + controllerutil.RemoveFinalizer(pod, finalizer) + return r.Update(ctx, pod) +} +``` + +## Key Features + +### Concurrency Support + +PodOpsLifecycle in KusionStack Kuperator supports concurrency. +It means PodOpsLifecycle is able to organize and track multi controllers operating the same pod at the same time. +For example, when a controller is going to update Pod, other controllers are allowed to do other operations at the same time, like delete, restart, recreate it, +although the result may not be meaningful. + +### General Workload Support + +PodOpsLifecycle offers seamless integration with various workload types, including Deployment and StatefulSet. +To enable this functionality, ensure the feature gate for `GraceDeleteWebhook` is enabled when starting the KusionStack Kuperator controller: + +```shell +# Enable the GraceDeleteWebhook feature when starting the controller with this argument +$ /manager --feature-gates=GraceDeleteWebhook=true +``` + +Once enabled, any Pod labeled with `kusionstack.io/control=true` under a general workload, such as Deployment, becomes manageable by PodOpsLifecycle. +This feature provides workloads a way to work closer with Pod Cooperation Controllers. + +> Due to the Kubernetes webhook mechanism, the following error will be returned when workloads or users delete a pod. This error is intentional and serves to indicate that the pod deletion process has started and is being managed by PodOpsLifecycle. +> ```shell +> $ kubectl -n default delete pod collaset-sample-74fsv +> Error from server (failed to validate GraceDeleteWebhook, pod deletion process is underway and being managed by PodOpsLifecycle): admission webhook "validating-pod.apps.kusionstack.io" denied the request: failed to validate GraceDeleteWebhook, pod deletion process is underway and being managed by PodOpsLifecycle +> ``` \ No newline at end of file diff --git a/docs/_category_.json b/docs/kuperator/introduction/_category_.json similarity index 100% rename from docs/_category_.json rename to docs/kuperator/introduction/_category_.json diff --git a/docs/kuperator/introduction/introduction.md b/docs/kuperator/introduction/introduction.md new file mode 100644 index 00000000..5adf228c --- /dev/null +++ b/docs/kuperator/introduction/introduction.md @@ -0,0 +1,49 @@ +# What is KusionStack Kuperator? + +KusionStack Kuperator consists of workloads and operators built on Kubernetes Custom Resource Definitions, +with a primary aim of bridging the gap between platform development and Kubernetes. + +By keeping more operation works finished in Kubernetes layer, +KusionStack Kuperator reduces complexity when interacting with Kubernetes +and enhances convenience for platform developers. + +## Key features + +KusionStack Kuperator currently provides the following features, +streamlining application operations when developing platforms based on Kubernetes: + +### Fine-grained operation + +KusionStack Kuperator introduces PodOpsLifecycle to extend native Pod lifecycle with additional phases such as PreCheck, Preparing, etc. +All operators within KusionStack Kuperator will respect PodOpsLifecycle, +so that PodOpsLifecycle is able to orchestrate all of these operators to operate each Pod coordinately. + +### Advanced workloads + +KusionStack Kuperator offers several workloads to ensure it is convenient and effective to delivery and operate application resources. + +Recently, Kuperator provides the workload CollaSet. +Besides the basic ability of scaling and updating Pods like Deployment and StatefulSet of Kubernetes, +CollaSet also provides a range of scale and update strategies, +like in-place update with container image and pod revision consistency. + +### Streamlined Pod Operation + +KusionStack Kuperator introduces resource consist framework that offers a graceful way +to integrate resource management around Pods, including traffic control, into the PodOpsLifecycle. +This simplifies the works for platform developers dealing with Pod operation details. +KusionStack also integrates some resources by default, such as Aliyun SLB. + +### Risk management + +Building upon the PodOpsLifecycle, KusionStack Kuperator introduces the workload named PodTransitionRule +which will keep risks of pod operation under control. +By providing a MaxUnavailable rule similar to Kubernetes' PodDisruptionBudget (PDB), +it ensures there are always enough Pods available for service. +Furthermore, it allows for custom rules through extension via webhooks and label hooks. + +## Future works + +KusionStack Kuperator project is currently in its early stages. +Our goal is to simplify platform development. We will continue building in areas such as application operations, +observability, and insight. We hope the Kuperator will make it easier for you to build platforms. \ No newline at end of file diff --git a/docs/kuperator/manuals/_category_.json b/docs/kuperator/manuals/_category_.json new file mode 100644 index 00000000..795f138a --- /dev/null +++ b/docs/kuperator/manuals/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Manuals", + "position": 4 +} diff --git a/docs/kuperator/manuals/collaset.md b/docs/kuperator/manuals/collaset.md new file mode 100644 index 00000000..b5ebcfaf --- /dev/null +++ b/docs/kuperator/manuals/collaset.md @@ -0,0 +1,942 @@ +--- +sidebar_position: 1 +--- + +# CollaSet +CollaSet is responsible for managing a set of Pods. Similar to Kubernetes Deployment and StatefulSet, it also supports scaling and updating Pods. Additionally, CollaSet offers advanced features to provide users with more granular control over managing Pods. + +A basic CollaSet configuration is represented in the following YAML format: + +``` yaml +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: collaset-sample +spec: + replicas: 2 + selector: + matchLabels: + app: foo + template: + metadata: + labels: + app: foo + spec: + containers: + - image: nginx:1.25.2 + name: nginx +``` +Let's explore the features of CollaSet. + +## Basic Features +### Scaling Pods +CollaSet utilizes the field spec.replicas to indicate the number of Pods under management. + +``` yaml +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: collaset-sample +spec: + replicas: 3 # indicate the number of Pods to manage + selector: + matchLabels: + app: foo + template: + metadata: + labels: + app: foo + spec: + containers: + - image: nginx:1.25.2 + name: nginx +... +``` +Pods can be provisioned by CollaSet. + +``` shell +$ kubectl -n default apply -f ./config/samples/apps_v1alpha1_collaset.yaml +collaset.apps.kusionstack.io/collaset-sample created + +$ kubectl -n default get pod +NAME READY STATUS RESTARTS AGE +collaset-sample-85q7g 1/1 Running 0 57s +collaset-sample-vx5ws 1/1 Running 0 57s +collaset-sample-hr7pv 1/1 Running 0 57s + +$ kubectl -n default get cls +NAME DESIRED CURRENT UPDATED UPDATED_READY UPDATED_AVAILABLE CURRENT_REVISION UPDATED_REVISION AGE +collaset-sample 3 3 3 3 3 collaset-sample-6d7b7c58f collaset-sample-6d7b7c58f 64s +``` + +By default, CollaSet always creates new Pods using the latest template specified in `spec.template`. CollaSet establishes ownership over a set of Pods through the label selector defined in `spec.selector`. Thus, it's important to ensure that the labels provided in `spec.selector` match those in `spec.template.metadata.labels`. + +CollaSet status provides general information about this CollaSet and all Pods under it. + +``` shell +$ kubectl -n default get cls collaset-sample -o yaml +...... +status: + availableReplicas: 3 + collisionCount: 0 + conditions: + - lastTransitionTime: "2023-09-01T03:56:09Z" + reason: Updated + status: "True" + type: Update + currentRevision: collaset-sample-6d7b7c58f + observedGeneration: 1 + operatingReplicas: 0 + readyReplicas: 3 + replicas: 3 + scheduledReplicas: 3 + updatedAvailableReplicas: 3 + updatedReadyReplicas: 3 + updatedReplicas: 3 + updatedRevision: collaset-sample-6d7b7c58f +``` + +Some fields in CollaSet status are explained here: + +`updatedRevision` indicates the latest revision that CollaSet uses to create or update Pods. + +`currentRevision` indicates the last updated revision. It will be set to updatedRevision after all Pods are updated, and their PodReady conditions become True. + +`replicas` indicates the count of Pods under this CollaSet. + +`scheduledReplicas` indicates the count of Pods under this CollaSet that successfully got scheduled. + +`availableReplicas` indicates the count of Pods under this CollaSet that have all expected finalizers attached. + +`updatedReplicas` indicates the count of Pods under this CollaSet that have the updated revision. + +`updatedReadyReplicas` indicates the count of Pods under this CollaSet that are counted in `updatedReplicas` and have their PodReady conditions set to True. + +`updatedAvailableReplicas` indicates the count of Pods under this CollaSet that is counted in `updatedReadyReplicas` and have all expected finalizers attached. + +### Updating Pods +CollaSet generates Pods according to the pod template described in `spec.template`. This template can be updated to signal CollaSet to update each owned Pod: + +``` shell +$ kubectl -n default edit cls collaset-sample +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: +...... +spec: +...... + template: + ...... + spec: + containers: + - image: nginx:1.24.0 # changed from nginx:1.25.2 +...... +``` + +CollaSet immediately updates all Pods it owns with the new Pod template by default. + +``` shell +$ kubectl -n default get pod -o yaml | grep "image: nginx" + - image: nginx:1.24.0 + - image: nginx:1.24.0 + - image: nginx:1.24.0 +``` + +The update progress can be controlled using partition. + +#### Partition +Similar to StatefulSet, `partition` is used to control the upgrade progress. + +By default, if not indicated, all Pods will be updated when spec.template changes. The `partition` can be adjusted from 0 to `spec.replicas` to specify how many Pods CollaSet should update. **Unlike StatefulSet, the partition in CollaSet represents the number of Pods to update.** + +Let's update the image back to nginx:1.25.2: + +``` shell +$ kubectl -n default edit cls collaset-sample +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: collaset-sample +spec: + template: + ...... + spec: + containers: + - image: nginx:1.25.2 # changed from nginx:1.24.0 + ... + updateStrategy: + rollingUpdate: + byPartition: + partition: 1 # use partition to control upgrade progress +``` + +In this case, CollaSet only updates 1 Pod to the updated revision. + +``` shell +$ kubectl -n default get pod -o yaml | grep "image: nginx" + - image: nginx:1.24.0 + - image: nginx:1.25.2 # only 1 Pod updated + - image: nginx:1.24.0 +``` + +#### Update by Label +By configuring the `byLabel` rolling update policy, users can precisely specify which Pods they want to update by using labels. + +If you go back to the sample in the [section Partition](#Partition) and change `byPartition` to `byLabel` like the following: + +``` shell +$ kubectl -n default edit cls collaset-sample +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: collaset-sample +spec: + ... + updateStrategy: + rollingUpdate: +- byPartition: +- partition: 1 ++ byLabel: {} +``` + +Subsequently, each Pod will only be updated if it's marked with the label `collaset.kusionstack.io/update-included`. + +## Advanced Features +### Pod Instance ID +Each Pod created by CollaSet has a unique ID held by the label `collaset.kusionstack.io/instance-id`, which can be used to identify each individual Pod. + +``` yaml +apiVersion: v1 +kind: Pod +metadata: + labels: + collaset.kusionstack.io/instance-id: "0" # Pod instance ID +... +``` + +CollaSet provides a context to specify an ID pool, which defaults to the same name as the CollaSet and is immutable. + +``` yaml +... +spec: + scaleStrategy: + context: +``` + +The same ID pool name can be indicated for multiple CollaSets, allowing them to share a single ID pool. Consequently, each Pod created by these CollaSets will be assigned a unique ID. + +For example, these are two CollaSets with the same context: + +``` shell +$ cat ~/sample.yaml +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: collaset-sample-a +spec: + replicas: 2 + scaleStrategy: + context: foo # with the same context foo + selector: + matchLabels: + app: foo + template: + metadata: + labels: + app: foo + spec: + containers: + - image: nginx:1.25.2 + name: nginx +--- + +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: collaset-sample-b +spec: + replicas: 2 + scaleStrategy: + context: foo # with the same context foo + selector: + matchLabels: + app: foo + template: + metadata: + labels: + app: foo + spec: + containers: + - image: nginx:1.25.2 + name: nginx +``` + +Then create these CollaSets with the sample file: + +``` shell +$ kubectl -n default apply -f ~/sample.yaml +collaset.apps.kusionstack.io/collaset-sample-a created +collaset.apps.kusionstack.io/collaset-sample-b created + +$ kubectl -n default get pod +NAME READY STATUS RESTARTS AGE +collaset-sample-a-g4sjj 1/1 Running 0 42s +collaset-sample-a-ph9vc 1/1 Running 0 42s +collaset-sample-b-fqkq4 1/1 Running 0 42s +collaset-sample-b-lqg8f 1/1 Running 0 42s + +$ kubectl -n default get pod -o yaml | grep collaset.kusionstack.io/instance-id + collaset.kusionstack.io/instance-id: "0" + collaset.kusionstack.io/instance-id: "1" + collaset.kusionstack.io/instance-id: "3" + collaset.kusionstack.io/instance-id: "2" +``` + +Now, the 4 Pods created by these 2 CollaSets will have a unique instance ID. + +### Revision Consistency +Pods within a CollaSet can utilize more than two different Pod templates simultaneously, including both the current and updated revisions. This can result from partial updates. To ensure the stability of Pod revisions over time, CollaSet records this information. When a Pod is deleted, CollaSet recreates it using its previous revision. + +It can be reproduced by following steps: + +1. Provision a new CollaSet with replicas 3. + +``` shell +$ kubectl -n default apply -f ./config/samples/apps_v1alpha1_collaset.yaml +collaset.apps.kusionstack.io/collaset-sample created + +$ kubectl get pod +NAME READY STATUS RESTARTS AGE +collaset-sample-5tgcs 1/1 Running 0 4s +collaset-sample-glgnb 1/1 Running 0 4s +collaset-sample-qs46r 1/1 Running 0 4s + +$ kubectl -n default get cls +NAME DESIRED CURRENT UPDATED UPDATED_READY UPDATED_AVAILABLE CURRENT_REVISION UPDATED_REVISION AGE +collaset-sample 3 3 3 3 3 collaset-sample-6d7b7c58f collaset-sample-6d7b7c58f 64s +``` + +2. Update the image of PodTemplate of the CollaSet to image nginx:1.24.0 and set the partition to 2. Then there will be 2 Pods with image nginx:1.24.0 and 1 Pod with image nginx:1.25.2. + +``` shell +$ kubectl -n default edit cls collaset-sample +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: collaset-sample +spec: + template: + ...... + spec: + containers: + - image: nginx:1.24.0 # changed from nginx:1.25.2 + ... + updateStrategy: + rollingUpdate: + byPartition: + partition: 2 # update 2 Pods + +# Wait until these 2 Pods are updated, and check the Pod's images. +$ kubectl get pod -o yaml | grep "image: nginx" + - image: nginx:1.25.2 + - image: nginx:1.24.0 + - image: nginx:1.24.0 +``` + +3. Update the image of PodTemplate of the CollaSet to image nginx:1.23.4 and set the partition to 1. + +``` shell +$ kubectl -n default edit cls collaset-sample +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: collaset-sample +spec: + template: + ...... + spec: + containers: + - image: nginx:1.23.4 # changed from nginx:1.24.0 + ... + updateStrategy: + rollingUpdate: + byPartition: + partition: 1 # update 1 Pod + +# Wait until the Pod is updated, and check the Pod's images. +$ kubectl get pod -o yaml | grep "image: nginx" + - image: nginx:1.25.2 + - image: nginx:1.24.0 # Pod collaset-sample-qs46r + - image: nginx:1.23.4 +``` + +Now, there are 3 Pods, each of which has an individual image. If we then delete the Pod with the image nginx:1.24.0, the new Pod replacing it will be created with the same image nginx:1.24.0 in order for the Pod to inherit the revision. + +``` shell +$ kubectl delete -n default delete pod collaset-sample-qs46r +pod "collaset-sample-qs46r" deleted + +$ kubectl get pod +NAME READY STATUS RESTARTS AGE +collaset-sample-5tgcs 1/1 Running 0 3h +collaset-sample-ht9x6 1/1 Running 0 2m27s # Pod recreated +collaset-sample-qs46r 1/1 Running 1 (3h ago) 3h + +$ kubectl get pod -o yaml | grep "image: nginx" + - image: nginx:1.25.2 + - image: nginx:1.24.0 # image has not been changed + - image: nginx:1.23.4 +``` + +### In-Place Update Pod +In addition to the `Recreate` update policy, which is identical to Deployment and StatefulSet, CollaSet offers the `InPlaceIfPossible` update policy. + +``` yaml +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: collaset-sample +spec: + ... + updateStrategy: + podUpgradePolicy: InPlaceIfPossible # Options: InPlaceIfPossible, Recreate, Replace +``` + +`InPlaceIfPossible` is the default value, which instructs CollaSets to try to update Pods in place when only container images, labels, and annotations have changed. If some other fields have changed too, the policy will back off to the `Recreate` policy. + +`Recreate` indicates CollaSets always delete the old Pod and create a new one with an updated revision. + +If update pod template with `InPlaceIfPossible` policy as following example, the Pod will not be recreated. + +``` shell +$ kubectl -n default edit cls collaset-sample +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: collaset-sample +spec: + template: + ...... + spec: + containers: + - image: nginx:1.24.0 # changed from nginx:1.25.2 + ... + updateStrategy: + podUpgradePolicy: InPlaceIfPossible # use InPlaceIfPossible policy + +$ kubectl -n default get pod -o yaml | grep "image: nginx" + - image: nginx:1.24.0 + - image: nginx:1.24.0 + - image: nginx:1.24.0 + +$ kubectl -n default get pod +NAME READY STATUS RESTARTS AGE +collaset-sample-5wvlh 1/1 Running 1 (6s ago) 2m10s +collaset-sample-ldvrg 1/1 Running 1 (6s ago) 2m10s +collaset-sample-pbz75 1/1 Running 1 (6s ago) 2m10s +``` + +### Replace Update Pod + +CollaSet provides the `Replace` policy for certain applications that are sensitive to the available number of Pods. + +``` yaml +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: collaset-sample +spec: + ... + updateStrategy: + podUpgradePolicy: Replace # Options: InPlaceIfPossible, Recreate, Replace +``` + +The `Replace` policy indicates that CollaSet should update a Pod by creating a new one to replace it. +Unlike the `Recreate` policy, which deletes the old Pod before creating a new updated one, or the `InPlaceIfPossible` policy, which updates the current Pod in place, +the `Replace` policy first creates a new Pod with the updated revision. It then deletes the old Pod once the new one becomes available for service. + +```shell +# Before updating CollaSet +$ kubectl -n default get pod +NAME READY STATUS RESTARTS AGE +collaset-sample-dwkls 1/1 Running 0 6m55s + +# After updating CollaSet, the updated Pod is created first +$ kubectl -n default get pod +NAME READY STATUS RESTARTS AGE +collaset-sample-dwkls 1/1 Running 0 6m55s +collaset-sample-rcmbv 0/1 ContainerCreating 0 0s + +# Once the created Pod is available for service, the old Pod will be deleted +$ kubectl -n default get pod +NAME READY STATUS RESTARTS AGE +collaset-sample-rcmbv 1/1 Running 0 1s +collaset-sample-dwkls 1/1 Terminating 0 7m12s +``` + +The two Pods will have a pair of labels to identify their relationship. The new Pod will have the label `collaset.kusionstack.io/replace-pair-origin-name` to indicate the name of the old Pod, and the old Pod will have the label `collaset.kusionstack.io/replace-pair-new-id` to indicate the instance ID of the new Pod. + +Additionally, the new Pod and old Pod will each begin their own PodOpsLifecycles, which are independent of each other. + +### Recreate And Replace Specified Pod + +In practice, users often need to recreate or replace specified Pods under a CollaSet. + +To delete a Pod, users can simply call the Kubernetes API, like executing `kubectl delete pod `. +However, this will bypass the [PodOpsLifecycle](https://www.kusionstack.io/docs/kuperator/concepts/podopslifecycle) Mechanism. +We provide following two options: + +1. Enable the feature `GraceDeleteWebhook` so that it is possible to delete Pods through `PodOpsLifecycle`. +```shell +# Enable the GraceDeleteWebhook feature when starting the controller with this argument +$ /manager --feature-gates=GraceDeleteWebhook=true +``` +```shell +$ kubectl -n default get pod +NAME READY STATUS RESTARTS AGE +collaset-sample-vqccr 1/1 Running 0 21s + +# Delete the pod directly. A message will respond indicating that the Pod deletion is handled by PodOpsLifecycle +kubectl -n default delete pod collaset-sample-vqccr +Error from server (failed to validate GraceDeleteWebhook, pod deletion process is underway and being managed by PodOpsLifecycle): admission webhook "validating-pod.apps.kusionstack.io" denied the request: failed to validate GraceDeleteWebhook, pod deletion process is underway and being managed by PodOpsLifecycle + +# The old Pod is deleted, and a new Pod will be created +$ kubectl -n default get pod -w +collaset-sample-vqccr 1/1 Running 0 71s +collaset-sample-vqccr 1/1 Terminating 0 71s +...... +collaset-sample-nbl6t 0/1 Pending 0 0s +collaset-sample-nbl6t 0/1 ContainerCreating 0 0s +...... +collaset-sample-nbl6t 1/1 Running 0 0s +``` +2. Label the Pod with `podopslifecycle.kusionstack.io/to-delete`, so that CollaSet will delete the Pod through PodOpsLifecycle. + +```shell +# Label Pod +$ kubectl -n default label pod collaset-sample-nbl6t podopslifecycle.kusionstack.io/to-delete=true + +# The old Pod is deleted, and a new Pod will be recreated +$ kubectl -n default get pod -w +collaset-sample-nbl6t 1/1 Running 0 5m28s +collaset-sample-nbl6t 1/1 Terminating 0 5m28s +...... +collaset-sample-w6x69 0/1 Pending 0 0s +...... +collaset-sample-w6x69 0/1 ContainerCreating 0 0s +...... +collaset-sample-w6x69 1/1 Running 0 2s +``` + +Recreating a Pod will delete the old Pod first and then create a new one. This will affect the available Pod count. +To avoid this, CollaSet provides a feature to replace Pods by labeling them with `podopslifecycle.kusionstack.io/to-replace`. + +```shell +# Replace Pod by label +$ kubectl -n echo label pod collaset-sample-w6x69 podopslifecycle.kusionstack.io/to-replace=true + +# The old Pod is deleted, and a new Pod will be created +$ kubectl -n default get pod -w +collaset-sample-w6x69 1/1 Running 0 5m29s +collaset-sample-74fsv 0/1 Pending 0 0s +collaset-sample-74fsv 0/1 ContainerCreating 0 0s +...... +collaset-sample-74fsv 1/1 Running 0 2s +...... +collaset-sample-w6x69 0/1 Terminating 0 5m33s +``` + + +### Supprting PVCs +CollaSet introduces support for PVCs, allowing user to declare `VolumeClaimTemplates` to create PVCs for each Pod. +Furthermore, in response to common issues with PVCs management, such as high modification costs and difficult control, CollaSet extends its functionality with the following advantages vs. StatefulSet: + +1. Support update, add and delete on `volumeClaimTemplates`. +2. Provide control over PVC lifecycle. + +#### Provision PVCs +The `collaset-pvc.yaml` file declares a CollaSet with `VolumeClaimTemplates` to provision a PVC with `1Gi` storage for each Pod. +These PVCs are then mounted on the container at the path `/path/mount/www`. + +``` yaml +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: foo +spec: + replicas: 2 + selector: + matchLabels: + app: foo + template: + metadata: + labels: + app: foo + spec: + containers: + - image: nginx:1.25 + name: nginx + volumeMounts: + - mountPath: /path/mount/www # path to mount PVC + name: www + volumeClaimTemplates: + - metadata: + name: www + spec: + storageClassName: standard + volumeMode: Filesystem + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: 1Gi +``` + +Pods and PVCs can be provisioned by CollaSet. + +``` shell +$ kubectl -n default apply -f collaset-pvc.yaml +collaset.apps.kusionstack.io/foo created + +$ kubectl -n default get pod +NAME READY STATUS RESTARTS AGE +foo-pw5lg 1/1 Running 0 4s +foo-5n6ts 1/1 Running 0 4s + +$ kubectl -n default get pvc +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +foo-www-h5zv7 Bound pvc-8a7d8ea0-ced0-423a-9255-bedfad0f2db6 1Gi RWO standard 7s +foo-www-lswp2 Bound pvc-9564b44b-9c99-467b-abee-4285183ff9c3 1Gi RWO standard 7s +``` + +Each Pod and its related PVC have the same value of label `collaset.kusionstack.io/instance-id`. + +``` shell +$ kubectl -n default get pod -o yaml | grep instance-id + collaset.kusionstack.io/instance-id: "1" + collaset.kusionstack.io/instance-id: "0" + +$ kubectl -n default get pvc -o yaml | grep instance-id + collaset.kusionstack.io/instance-id: "1" + collaset.kusionstack.io/instance-id: "0" +``` + +#### Update PVCs +To save the operating costs of PVCs, i.e. expand storage capacity, CollaSet supports update, add and delete on `volumeClaimTemplates`. + +To achieve this, for each PVC, CollaSet calculates a hash value based on its template, and attatch it to label `collaset.kusionstack.io/pvc-template-hash`. +Once users modify the templates, CollaSet recognizes, caculates a new hash value and attach it on new PVCs to replace old ones. + +Let's give it a try, update the storage of PVC template from `1Gi` to `2Gi`. +``` shell +$ kubectl -n default edit cls foo + ...... + volumeClaimTemplates: + - metadata: + name: www + spec: + storageClassName: standard + volumeModes: Filesystem + accessModes: [ "ReadWriteOnce" ] + resources: + requests: +- storage: 1Gi ++ storage: 2Gi # update pvc template to expand storage +...... +``` + +There are 2 new PVCs with `2Gi` storage created with different hash values. + +``` shell +$ kubectl -n default edit cls foo +collaset.apps.kusionstack.io/foo edited + +$ kubectl -n default get pod +NAME READY STATUS RESTARTS AGE +foo-pw5lg 1/1 Terminating 0 7s +foo-5n6ts 1/1 Terminating 0 7s +foo-9nhz4 0/1 Pending 0 1s +foo-xb2gd 0/1 Pending 0 1s + +$ kubectl -n default get pvc +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +foo-www-h5zv7 Terminating pvc-8a7d8ea0-ced0-423a-9255-bedfad0f2db6 1Gi RWO standard 11s +foo-www-lswp2 Terminating pvc-9564b44b-9c99-467b-abee-4285183ff9c3 1Gi RWO standard 11s +foo-www-cj2s9 Bound pvc-647e2a81-7fc6-4f37-a835-e63da9172de3 2Gi RWO standard 5s +foo-www-hp2t6 Bound pvc-03d7536e-cd3f-465f-bd30-362a9510f0c9 2Gi RWO standard 5s + +$ kubectl -n default get pvc -o yaml | grep pvc-template-hash + collaset.kusionstack.io/pvc-template-hash: 594d8857f9 # hash value of old pvc + collaset.kusionstack.io/pvc-template-hash: 594d8857f9 + collaset.kusionstack.io/pvc-template-hash: d78c5ff6b # hash value of new pvc + collaset.kusionstack.io/pvc-template-hash: d78c5ff6b +``` + +For old Pvcs, users can retain them by configuring `whenScaled` policy to `Retain` . +Then old PVCs can be re-mount on its related Pod after rolling back. +Otherwise, old PVCs can be deleted by default policy `Delete`. + + +#### Add PVCs +Add a PVC template `yyy`, which is mounted on the container at the path `/path/mount/yyy`. + +``` shell +$ kubectl -n default edit cls foo +...... + spec: + containers: + - image: nginx:1.25 + name: nginx + volumeMounts: + - mountPath: /path/mount/www # path to mount PVC + name: www ++ - mountPath: /path/mount/yyy # path to mount PVC ++ name: yyy + volumeClaimTemplates: + - metadata: + name: www + spec: + storageClassName: standard + volumeMode: Filesystem + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: 2Gi ++ - metadata: # added pvc template ++ name: yyy ++ spec: ++ storageClassName: standard ++ volumeMode: Filesystem ++ accessModes: [ "ReadWriteOnce" ] ++ resources: ++ requests: ++ storage: 2Gi +``` + +Now, each pod has two PVCs, which include a new PVCs claimed by template `yyy` and one old PVC claimed by template `www`. + +``` shell +$ kubectl -n default edit cls foo +collaset.apps.kusionstack.io/foo edited + +$ kubectl -n default get pod +NAME READY STATUS RESTARTS AGE +foo-8wwsz 0/1 Pending 0 1s +foo-9nhz4 1/1 Terminating 0 23s +foo-hd2cv 0/1 Pending 0 1s +foo-xb2gd 1/1 Terminating 0 23s + +$ kubectl -n default get pvc +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +foo-www-cj2s9 Bound pvc-647e2a81-7fc6-4f37-a835-e63da9172de3 2Gi RWO standard 25s +foo-www-hp2t6 Bound pvc-03d7536e-cd3f-465f-bd30-362a9510f0c9 2Gi RWO standard 25s +foo-yyy-c68nh Bound pvc-94ee5eff-2350-4cb7-8411-85f0928d25fc 2Gi RWO standard 3s # new pvc +foo-yyy-vpwss Bound pvc-8363dc78-3340-47d0-aa11-0adac36308d5 2Gi RWO standard 3s # new pvc +``` + +#### Delete PVCs +Delete the PVC template `yyy` on CollaSet. + +``` shell +$ kubectl -n default edit cls foo +...... + spec: + containers: + - image: nginx:1.25 + name: nginx + volumeMounts: + - mountPath: /path/mount/www # path to mount PVC + name: www +- - mountPath: /path/mount/yyy # path to mount PVC +- name: yyy + volumeClaimTemplates: + - metadata: + name: www + spec: + storageClassName: standard + volumeMode: Filesystem + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: 2Gi +- - metadata: # delete pvc template +- name: yyy +- spec: +- storageClassName: standard +- volumeMode: Filesystem +- accessModes: [ "ReadWriteOnce" ] +- resources: +- requests: +- storage: 2Gi +``` + +Now, PVCs claimed by template `yyy` are deleted and the origin PVCs claimed by template `www` are retained. + +``` shell +$ kubectl -n default edit cls foo +collaset.apps.kusionstack.io/foo edited + +$ kubectl -n default get pod +NAME READY STATUS RESTARTS AGE +foo-6qcpc 1/1 Running 0 2s +foo-z2jqv 1/1 Running 0 2s +foo-8wwsz 1/1 Terminating 0 38s +foo-hd2cv 1/1 Terminating 0 38s + +$ kubectl -n default get pvc +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +foo-www-cj2s9 Bound pvc-647e2a81-7fc6-4f37-a835-e63da9172de3 2Gi RWO standard 61s +foo-www-hp2t6 Bound pvc-03d7536e-cd3f-465f-bd30-362a9510f0c9 2Gi RWO standard 61s +foo-yyy-c68nh Terminating pvc-94ee5eff-2350-4cb7-8411-85f0928d25fc 2Gi RWO standard 39s +foo-yyy-vpwss Terminating pvc-8363dc78-3340-47d0-aa11-0adac36308d5 2Gi RWO standard 39s +``` + +#### PVC Retention Policy +CollaSet provides control over PVC lifecycle by configuring `spec.persistentVolumeClaimRetentionPolicy`. +Users can retain or delete PVCs after its related Pod is scaled down or CollaSet is deleted, respectively. +This feature is also supported by [StatefulSet](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention) since v1.27. +Basic rule is detailed as follows: +- `whenScale` : decides to delete or retain PVCs after Pod is scaled down. +- `whenDeleted`: decides to delete or retain PVCs after CollaSet is deleted. + +For each policy users can set the value to either Delete (by default) or Retain. +Note that for StatefulSet, the default policy is Retain. + +#### whenScaled +Apply `collaset-pvc.yaml` and edit foo to scale replicas to 1. +``` shell +$ kubectl apply -f collaset-pvc.yaml +collaset.apps.kusionstack.io/foo created + +$ kubectl edit cls foo + ...... + spec: +- replicas: 2 ++ replicas: 1 # scale in 1 pod + selector: + matchLabels: + app: foo + ...... +``` +As the `whenScaled` is not configured, thus its value is `Delete` by default. +Consequently, PVC `foo-www-wzwbq` is deleted as its related Pod `foo-tkc5m` is scaling down. + +``` shell +$ kubectl -n default edit cls foo +collaset.apps.kusionstack.io/foo edited + +$ kubectl -n default get pod +NAME READY STATUS RESTARTS AGE +foo-tkc5m 0/1 Terminating 0 27s # related pvc is terminating +foo-vwtcm 1/1 Running 0 27s + +$ kubectl -n default get pvc +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +foo-www-wzwbq Terminating pvc-b92c28c6-59ad-4976-810c-8d538c4a22c6 1Gi RWO standard 29s +foo-www-r4vlh Bound pvc-dd7f7cce-a3cb-4bba-a106-e5ad264959a2 1Gi RWO standard 29s +``` + +Set `Retain` to `whenScaled`, and scale replicas to 0. + +``` shell +$ kubectl -n default edit cls foo + ...... + spec: +- replicas: 1 ++ replicas: 0 # scale in 1 pod + selector: + matchLabels: + app: foo ++ scaleStrategy: ++ persistentVolumeClaimRetentionPolicy: ++ whenScaled: Retain # retain the pvc after pod is scaled down + ...... +``` + +Pod `foo-vwtcm` is terminating, while its related PVC `foo-www-r4vlh` is retained. + +``` shell +$ kubectl -n default edit cls foo +collaset.apps.kusionstack.io/foo edited + +$ kubectl -n default get pod +NAME READY STATUS RESTARTS AGE +foo-vwtcm -n default 1/1 Terminating 0 62s # related pvc is retained + +$ kubectl -n default get pvc +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +foo-www-r4vlh Bound pvc-dd7f7cce-a3cb-4bba-a106-e5ad264959a2 1Gi RWO standard 63s +``` + +To validate the retention policy, try ro scale replicas to 2, and the remaining PVC should be mounted again. + +``` shell +$ kubectl -n default edit cls foo + ...... + spec: +- replicas: 0 ++ replicas: 2 # scale out 2 pods + ...... +``` + +We can see that PVC `foo-www-r4vlh` is retained by Pod `foo-px487` as they have the same `instance-id`. + +``` shell +$ kubectl -n default edit cls foo +collaset.apps.kusionstack.io/foo edited + +$ kubectl -n default get pod +NAME READY STATUS RESTARTS AGE +foo-ld5xc 1/1 Running 0 27s +foo-px487 1/1 Running 0 27s + +$ kubectl -n default get pvc +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +foo-www-d48gx Bound pvc-1884ee45-5cc9-48ee-b01a-20f5ad63d6d4 1Gi RWO standard 29s +foo-www-r4vlh Bound pvc-dd7f7cce-a3cb-4bba-a106-e5ad264959a2 1Gi RWO standard 2m47s + +$ kubectl -n default get pod foo-px487 -o yaml | grep instance-id + collaset.kusionstack.io/instance-id: "1" + +$ kubectl -n default get pvc foo-www-r4vlh -o yaml | grep instance-id + collaset.kusionstack.io/instance-id: "1" # pvc foo-www-r4vlh is retained +``` + +#### whenDelete +Edit `foo` to configure `Retain` policy for `whenDelete`, and then delete this CollaSet. +``` shell +$ kubectl -n default edit cls foo + ...... + scaleStrategy: + persistentVolumeClaimRetentionPolicy: + whenScaled: Retain ++ whenDelete: Retain # retain the pvc after collaset is deleted + ...... +collaset.apps.kusionstack.io/foo edited + +$ kubectl -n default delete cls foo +collaset.apps.kusionstack.io "foo" deleted +``` + +Now, try to recreate `foo` with 2 replicas, and the result shows both PVCs are retained. +``` shell +$ kubectl -n default apply -f collaset-pvc.yaml +collaset.apps.kusionstack.io/foo created + +$ kubectl -n default get pod +NAME READY STATUS RESTARTS AGE +foo-qhh8t 1/1 Running 0 2s +foo-ss255 1/1 Running 0 2s + +$ kubectl -n default get pvc +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +foo-www-d48gx Bound pvc-1884ee45-5cc9-48ee-b01a-20f5ad63d6d4 1Gi RWO standard 4m29s +foo-www-r4vlh Bound pvc-dd7f7cce-a3cb-4bba-a106-e5ad264959a2 1Gi RWO standard 6m47s + +$ kubectl -n default get pod foo-px487 -o yaml | grep instance-id + collaset.kusionstack.io/instance-id: "0" + collaset.kusionstack.io/instance-id: "1" + +$ kubectl -n default get pvc foo-www-r4vlh -o yaml | grep instance-id + collaset.kusionstack.io/instance-id: "0" # pvc foo-www-d48gx is retained + collaset.kusionstack.io/instance-id: "1" # pvc foo-www-r4vlh is retained +``` diff --git a/docs/kuperator/manuals/poddecoration.md b/docs/kuperator/manuals/poddecoration.md new file mode 100644 index 00000000..a4c87532 --- /dev/null +++ b/docs/kuperator/manuals/poddecoration.md @@ -0,0 +1,352 @@ +--- +sidebar_position: 4 +--- + +# PodDecoration +PodDecoration works in conjunction with CollaSet to selectively inject specific configurations to Pods that meet certain criteria. + +PodDecoration not only allows injecting sidecar containers to Pods but also enables modifying existing container configurations, metadata, and scheduling parameters etc. +The PodDecoration controller does not control the upgrade of Pods. The actual upgrade process is fully controlled by the CollaSet controller. This means that the injection upgrade of PodDecoration can also be performed `InPlaceIfPossible`. + +About [CollaSet](collaset.md). +# Example + +## Create CollaSet + +```yaml +# collaset.yaml +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: foo + namespace: default +spec: + replicas: 3 + selector: + matchLabels: + app: foo + template: + metadata: + labels: + app: foo + spec: + containers: + - image: nginx:1.25.2 + name: foo +``` +Use `collaset.yaml` to create three pods under CollaSet `foo` management. +```shell +$ kubectl apply -f collaset.yaml +collaset.apps.kusionstack.io/foo created + +$ kubectl get cls +NAME DESIRED CURRENT AVAILABLE UPDATED UPDATED_READY UPDATED_AVAILABLE CURRENT_REVISION UPDATED_REVISION AGE +foo 3 3 3 3 3 3 foo-7bdb974bc7 foo-7bdb974bc7 7s + +$ kubectl get pod +NAME READY STATUS RESTARTS AGE +foo-2wnnf 1/1 Running 0 41s +foo-hqpx7 1/1 Running 0 41s +foo-mqt48 1/1 Running 0 41s +``` +## Create PodDecoration + +The following `poddecoration.yaml` file describes a PodDecoration, which selects the pod under CollaSet `foo` and injects the content in `template` into the pod with `instance-id=0`. + +```yaml +# poddecoration.yaml +apiVersion: apps.kusionstack.io/v1alpha1 +kind: PodDecoration +metadata: + name: sample-pd +spec: + selector: # selected pod range in which PodDecoration takes effect + matchLabels: + app: foo + updateStrategy: + rollingUpdate: + selector: # select pod to upgrade in effect range + matchLabels: + collaset.kusionstack.io/instance-id: "0" + template: + metadata: + - patchPolicy: Overwrite + labels: + custom.io/sidecar-version: "v1" + containers: + - injectPolicy: AfterPrimaryContainer + name: sidecar-a + image: ubuntu:22.04 + command: ["sleep", "2h"] + volumeMounts: + - name: sample-volume + mountPath: /vol/sample + volumes: + - name: sample-volume + emptyDir: {} +``` + +Create PodDecoration `sample-pd` to upgrade selected pod +```shell +$ kubectl apply -f poddecoration.yaml +poddecoration.apps.kusionstack.io/sample-pd created +``` +The status of PodDecoration is updated, and one pod is injected with sidecar through recreate. +```shell +$ kubectl get pd +NAME EFFECTIVE MATCHED INJECTED UPDATED UPDATED_READY CURRENT_REVISION UPDATED_REVISION AGE +sample-pd true 3 1 1 1 sample-pd-9465f4c84 20s + +$ kubectl get pod +NAME READY STATUS RESTARTS AGE +foo-2gnnl 2/2 Running 0 15s +foo-2wnnf 1/1 Running 0 2m +foo-hqpx7 1/1 Running 0 2m + +$ kubectl get pd sample-pd -o yaml | grep -A20 status +status: + details: + - affectedReplicas: 3 + collaSet: foo + pods: + - name: foo-2gnnl + revision: sample-pd-9465f4c84 + - name: foo-2wnnf + escaped: true + - name: foo-hqpx7 + escaped: true + matchedPods: 3 + injectedPods: 1 + updatedPods: 1 + updatedReadyPods: 1 + updatedAvailablePods: 1 + isEffective: true + updatedRevision: sample-pd-9465f4c84 +``` + +## Update PodDecoration + +### Rolling update v1 + +Edit `sample-pd` to expand the upgrade scope. +```shell +$ kubectl edit pd sample-pd +``` + +```yaml +# poddecoration.yaml +# Edit updateStrategy to select instance-id in [0, 1, 2] +... +spec: + ... + updateStrategy: + rollingUpdate: + selector: + matchExpressions: + - key: collaset.kusionstack.io/instance-id + operator: In + values: + - "0" + - "1" + - "2" + template: + ... +``` + +All pods updated. +```shell +$ kubectl get pd +NAME EFFECTIVE MATCHED INJECTED UPDATED UPDATED_READY CURRENT_REVISION UPDATED_REVISION AGE +sample-pd true 3 3 3 3 sample-pd-9465f4c84 sample-pd-9465f4c84 3m + +$ kubectl get pod +NAME READY STATUS RESTARTS AGE +foo-2gnnl 2/2 Running 0 3m +foo-lftw8 2/2 Running 0 8s +foo-n57rr 2/2 Running 0 8s + +$ kubectl get pd sample-pd -o yaml | grep -A20 status +status: + currentRevision: sample-pd-9465f4c84 + details: + - affectedReplicas: 3 + collaSet: foo + pods: + - name: foo-2gnnl + revision: sample-pd-9465f4c84 + - name: foo-lftw8 + revision: sample-pd-9465f4c84 + - name: foo-n57rr + revision: sample-pd-9465f4c84 + matchedPods: 3 + injectedPods: 3 + updatedPods: 3 + updatedReadyPods: 3 + updatedAvailablePods: 3 + isEffective: true + currentRevision: sample-pd-9465f4c84 + updatedRevision: sample-pd-9465f4c84 +``` +### Rolling update v1 -> v2 + + +Update `sample-pd`'s sidecar container image and `updateStrategy`. +```shell +$ kubectl edit pd sample-pd +``` +```yaml +# poddecoration.yaml +# Update sidecar-a's image with ubuntu:22.10 +# Edit updateStrategy to select instance-id in [0] +... +spec: + ... + updateStrategy: + rollingUpdate: + selector: + - key: collaset.kusionstack.io/instance-id + operator: In + values: + - "0" + template: + ... + containers: + - injectPolicy: AfterPrimaryContainer + name: sidecar-a + image: ubuntu:22.10 + ... +``` +Pod `foo-2gnnl` in-place upgrade sidecar container image. +```shell +$ kubectl get pd +NAME EFFECTIVE MATCHED INJECTED UPDATED UPDATED_READY CURRENT_REVISION UPDATED_REVISION AGE +sample-pd true 3 3 1 1 sample-pd-9465f4c84 sample-pd-8697d4bf8c 6min + +$ kubectl get pod +NAME READY STATUS RESTARTS AGE +foo-2gnnl 2/2 Running 1 (12s ago) 6m +foo-lftw8 2/2 Running 0 3min +foo-n57rr 2/2 Running 0 3min + +$ kubectl get pod foo-2gnnl -o yaml | grep "image: ubuntu" + image: ubuntu:22.10 + +$ kubectl get pd sample-pd -o yaml | grep -A20 status +status: + details: + - affectedReplicas: 3 + collaSet: foo + pods: + - name: foo-2gnnl + revision: sample-pd-8697d4bf8c + - name: foo-lftw8 + revision: sample-pd-9465f4c84 + - name: foo-n57rr + revision: sample-pd-9465f4c84 + matchedPods: 3 + injectedPods: 3 + updatedPods: 1 + updatedReadyPods: 1 + updatedAvailablePods: 1 + isEffective: true + currentRevision: sample-pd-9465f4c84 + updatedRevision: sample-pd-8697d4bf8c +``` + + +# Features + +## Injection + +### Metadata +```yaml +apiVersion: apps.kusionstack.io/v1alpha1 +kind: PodDecoration +spec: + template: + metadata: + - patchPolicy: MergePatchJson + annotations: + cafe.sofastack.io/decoration-version: '[{"name":"sample-pd","version":"v2"}]' + - patchPolicy: Overwrite + labels: + custom.io/sidecar-version: "v2" + annotations: + cafe.sofastack.io/decoration-name: sample-pd +``` +`patchPolicy` is the injected policy, as follows: +- `Retain`: The original value of annotations and labels will be retained. +- `Overwrite`: The value of annotations and labels corresponding to the existing key will be overwritten. +- `MergePatchJson`: It only takes effect for annotation. If the key does not exist, the value will be written directly. Otherwise, the json value will be merged. + +For example: +```yaml +# Old pod metadata +metadata: + labels: + custom.io/sidecar-version: "v1" + annotations: + cafe.sofastack.io/decoration-version: '[{"name":"old-pd","version":"v1"}]' + +# After metadata injected +metadata: + labels: + custom.io/sidecar-version: "v2" + annotations: + cafe.sofastack.io/decoration-type: sample-pd + cafe.sofastack.io/decoration-version: '[{"name":"old-pd","version":"v1"}, {"name":"sample-pd","version":"v2"}]' +``` +### Primary Container + +```yaml +apiVersion: apps.kusionstack.io/v1alpha1 +kind: PodDecoration +spec: + template: + primaryContainers: + - targetPolicy: ByName + name: foo + image: foo:v2 + env: + - name: APP_NAME + value: foo + volumeMounts: + - name: sample-volume + mountPath: /vol/sample + volumes: + - name: sample-volume + emptyDir: {} +``` +Injection into the primary containers only supports limited fields: `image`, `env` and `volumeMounts`. + +`targetPolicy` indicates which existed container these configuration should inject into, as follows: +- `ByName`: Only inject containers matching `name`. +- `All`: Inject all primary containers. +- `First`: Inject into first primary container. +- `Last`: Inject into last primary container. + +### Sidecar Container + +```yaml +spec: + template: + containers: + - injectPolicy: AfterPrimaryContainer # Container injected policy, AfterPrimaryContainer or BeforePrimaryContainer + name: sidecar-a + image: ubuntu:22.04 + ... +``` +Inject a new sidecar container. Optional, it can be placed in front or behind the primary container. +### InitContainer + +```yaml +spec: + template: + initContainers: + - name: init + image: custom-init-image:v1 + ... +``` + +## Upgrade strategy +Coming soon... \ No newline at end of file diff --git a/docs/kuperator/manuals/podtransitionrule.md b/docs/kuperator/manuals/podtransitionrule.md new file mode 100644 index 00000000..de0f7d2f --- /dev/null +++ b/docs/kuperator/manuals/podtransitionrule.md @@ -0,0 +1,220 @@ +--- +sidebar_position: 3 +--- + +# PodTransitionRule +In normal pod lifecycle, some phases are defined. For example, K8s Pods follow a defined lifecycle,starting in the `Pending` phase, moving through `Running` if at least one of its primary containers starts `OK`, and then through either the `Succeeded` or `Failed` phases depending on whether any container in the Pod terminated in failure. + +These phase definitions can fulfill basic Pod change scenarios, but it are ambiguous. +Actually, before pod upgrade or ready, it is necessary to have some check mechanisms in place to ensure the safety of pod changes. Fortunately, [PodOpsLifecycle](../concepts/podopslifecycle.md) extends and supports some check stages: `PreCheck` before pod upgrade and `PostCheck` before pod ready. + +To ensure a more fine-grained and controlled change process for Pods, we introduce custom rules or perform additional tasks as prerequisites for state transitions before the desired state of a Pod is achieved. Similar to the Pod `readinessGates`, where certain conditions must be met for a Pod to be considered readiness. For example, we consider a Pod ready for the `PostCheck` phase only if it has specific labels. For this purpose, we introduce the `PodTransitionRule` as a prerequisite for the state transition of a Pod. + +## Rule Definition + +You can use `PodTransitionRule` to define a set of transition rules for your workload pods. +Each rule will be executed at the corresponding stage, and it will be blocked if the conditions are not met. + +Here is an example: +```yaml +apiVersion: apps.kusionstack.io/v1alpha1 +kind: PodTransitionRule +metadata: + name: podtransitionrule-sample +spec: + rules: + - availablePolicy: + maxUnavailableValue: 50% + name: maxUnavailable + - stage: PreCheck # stages are supported by PodOpsLifecycle. Defaults to PreCheck. + labelCheck: + requires: + matchLabels: + app.custom/ready: 'true' + name: labelCheck + - stage: PostCheck + webhook: + clientConfig: + url: https://1.1.1.1:8089/post-stop + caBundle: Cg== + poll: + url: http://1.1.1.1:8089/fetch-result + rawQueryKey: task-id # URL parameter key to carry trace ID when fetching result. Defaults to task-id in form 'QueryUrl=URL?rawQueryKey=' + intervalSeconds: 5 + timeoutSeconds: 60 + failurePolicy: Fail + parameters: + - key: podIP + valueFrom: + fieldRef: + fieldPath: status.podIP + name: webhookCheck + selector: # select pods in effect + matchLabels: + app: foo +``` + + +### Available Policy +An `availablePolicy` rule defines the availability strategy during the Pod update process. + +#### maxUnavailable +```yaml +availablePolicy: + maxUnavailable: + value: 50% # int or string +``` + +`maxUnavailableValue` is the maximum number of pods that can be unavailable during the update. +Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). +Absolute number is calculated from percentage by rounding down. +This can not be 0. + +#### minAvailable +```yaml +availablePolicy: + minAvailable: + value: 5 # int or string +``` +`minAvailableValue` is the minimum number of pods that should be available during the update. + +### Label Check + +A `labelCheck` rule is used to check if labels are satisfied. +You can define your own labels as change check conditions and modify the labels according to your needs. +```yaml +labelCheck: + requires: + matchLabels: + app.custom/ready: 'true' + matchExpressions: + - key: app.custom/forbidden + operator: DoesNotExist +``` + +### Webhook +A `webhook` is an HTTP callback, based on which a external web application can determine whether a pod can pass this check. + +* An HTTP POST occurs first when pods entries the configured stage which defaults PreCheck. +* If `poll` is provided, this rule then keeps calling polling url to fetch a long running job result. This job can be located by `task-id` returned from the response of the first request. + + +```yaml +webhook: + clientConfig: # custom server config + url: https://1.1.1.1:8089/post-stop + caBundle: Cg== + poll: + url: http://1.1.1.1:8089/fetch-result + rawQueryKey: task-id + intervalSeconds: 5 + timeoutSeconds: 60 + failurePolicy: Fail + parameters: + - key: podIP + valueFrom: + fieldRef: + fieldPath: status.podIP +``` +**Protocol without poll** + +Request: +```json +// URL: https://1.1.1.1:8089/post-stop +// Method: POST + +{ + "traceId": "", // is generated by Kuperator, which can be used to track request + "stage": "PreTrafficOff", + "ruleName": "webhookCheck", + "resources": [ // Information of Pods which are in this stage + { + "apiVersion": "v1", + "kind": "Pod", + "name": "pod-a", + "parameters": { + "podIP": "1.0.0.1" // Customized information users can indicate from rule paramter + } + }, + { + "apiVersion": "v1", + "kind": "Pod", + "name": "pod-b", + "parameters": { + "podIP": "1.0.0.2" + } + } + ] +} +``` +Response: +```json +{ + "success": false, + "message": "msg", + "finishedNames": ["pod-a", "pod-b"] +} +``` +Response `success` indicating all pods approved or not. If it's `false`, the `finishedNames` field can be used to approve partial pods. + +**Protocol with poll** + +Request: +```json +// URL: https://1.1.1.1:8089/post-stop +// Method: POST + +{ + "traceId": "", // is generated by Kuperator, which can be used to track request + "stage": "PreTrafficOff", + "ruleName": "webhookCheck", + "resources": [ // Information of Pods which are in this stage + { + "apiVersion": "v1", + "kind": "Pod", + "name": "pod-a", + "parameters": { + "podIP": "1.0.0.1" // Customized information users can indicate from rule paramter + } + }, + { + "apiVersion": "v1", + "kind": "Pod", + "name": "pod-b", + "parameters": { + "podIP": "1.0.0.2" + } + } + ] +} +``` + +Response: + +```json +{ + "success": true, + "poll": true, // required to indicate polling calls is necessary + "taskId": , // required to to fetch polling result + "message": "msg" +} +``` +Response `success` indicating whether the first request is success or not. If true and field `poll` in response is `true` (or field `async` in response is `true`), PodTransisionRule will then begin to keep calling poll URL to fetch process result. +Field `taskId` is required for polling. + +The request for polling is GET method and in form of `QueryUrl=URL?task-id=`. The parameter key in this URL defaults `task-id`, if using `poll` in above response. It would be `trace-id` if using `async` in above response. +Users can also indicate the key by field `poll.rawQueryKey`. + +The response from polling call is expected like following: + +```json +{ + "success": true, + "message": "msg", + "finished": false, + "finishedNames": ["pod-a", "pod-b"] +} +``` + +`success` is supposed to be true, if there is no error. If all pods is approved, `finished` should be `true`. +If `finished` is `false`, `finishedNames` can be used to allow partial pods to be approved. diff --git a/docs/kuperator/manuals/resourceconsist.md b/docs/kuperator/manuals/resourceconsist.md new file mode 100644 index 00000000..19aa12f6 --- /dev/null +++ b/docs/kuperator/manuals/resourceconsist.md @@ -0,0 +1,437 @@ +--- +sidebar_position: 2 +--- + +# ResourceConsist +[**ResourceConsist**](https://github.com/KusionStack/resourceconsist/blob/main/README.md) aims to make a customized controller can be realized easily, and offering the ability of following +**PodOpsLifecycle** for controllers. + +## Tutorials +**kusionstack.io/resourceconsit** mainly consists of frame, experimental/adapters and adapters. + +The frame, ```kusionstack.io/resourceconsist/pkg/frame```, is used for adapters starting a controller, which handles +Reconcile and Employer/Employees' spec&status. If you wrote an adapter in your own repo, you can import +```kusionstack.io/resourceconsist/pkg/frame/controller``` and ```kusionstack.io/resourceconsist/pkg/frame/webhook```, +]and call AddToMgr to start a controller. + +>webhookAdapter is only necessary to be implemented for controllers following PodOpsLifecycle. + +```go +package main + +import ( + controllerframe "kusionstack.io/resourceconsist/pkg/frame/controller" + webhookframe "kusionstack.io/resourceconsist/pkg/frame/webhook" +) + +func main() { + controllerframe.AddToMgr(manager, yourOwnControllerAdapter) + webhookframe.AddToMgr(manager, yourOwnWebhookAdapter) +} +``` +### adapters +The adapters, ```kusionstack.io/resourceconsist/pkg/adapters```, consists of built-in adapters. You can start a +controller with built-in adapters just calling AddBuiltinControllerAdaptersToMgr and AddBuiltinWebhookAdaptersToMgr, +passing built-in adapters' names. Currently, an aliababacloudslb adapter has released. You can use it as follows: +```go +import ( + "kusionstack.io/resourceconsist/pkg/adapters" +) + +func main() { + adapters.AddBuiltinControllerAdaptersToMgr(manager, []adapters.AdapterName{adapters.AdapterAlibabaCloudSlb}) + adapters.AddBuiltinWebhookAdaptersToMgr(manager, []adapters.AdapterName{adapters.AdapterAlibabaCloudSlb}) +} +``` +Built-in adapters can also be used like how frame used. You can call NewAdapter from a certain built-in adapter pkg +and the call frame.AddToMgr to start a controller/webhook + +More built-in adapters will be implemented in the future. To make this repo stable, all new built-in adapters will +be added to ```kusionstack.io/resourceconsist/pkg/experimental/adapters``` first, and then moved to +```kusionstack.io/resourceconsist/pkg/adapters``` until ready to be released. +#### alibabacloudslb adapter +```pkg/adapters/alibabacloudslb``` is an adapter that implements ReconcileAdapter. It follows **PodOpsLifecycle** to +handle various scenarios during pod operations, such as creating a new pod, deleting an existing pod, or handling +changes to pod configurations. This adapter ensures minimal traffic loss and provides a seamless experience for users +accessing services load balanced by Alibaba Cloud SLB. + +In ```pkg/adapters/alibabacloudslb```, the real server is removed from SLB before pod operation in ACK. The LB +management and real server management are handled by CCM in ACK. Since alibabacloudslb adapter follows PodOpsLifecycle +and real servers are managed by CCM, ReconcileLifecycleOptions should be implemented. If the cluster is not in ACK or +CCM is not working in the cluster, the alibabacloudslb controller should implement additional methods of ReconcileAdapter. +### experimental/adapters +The experimental/adapters is more like a pre-release pkg for built-in adapters. Usage of experimental/adapters is same +with built-in adapters, and be aware that **DO NOT USE EXPERIMENTAL/ADAPTERS IN PRODUCTION** +### demo adapter +A demo is implemented in ```resource_controller_suite_test.go```. In the demo controller, the employer is represented +as a service and is expected to have the following **DemoServiceStatus**: +``` +DemoServiceStatus{ + EmployerId: employer.GetName(), + EmployerStatuses: DemoServiceDetails{ + RemoteVIP: "demo-remote-VIP", + RemoteVIPQPS: 100, + } +} +``` +The employee is represented as a pod and is expected to have the following **DemoPodStatus**: +``` +DemoPodStatus{ + EmployeeId: pod.Name, + EmployeeName: pod.Name, + EmployeeStatuses: PodEmployeeStatuses{ + Ip: string, + Ipv6: string, + LifecycleReady: bool, + ExtraStatus: PodExtraStatus{ + TrafficOn: bool, + TrafficWeight: int, + }, + } +} +``` +The DemoResourceProviderClient is a fake client that handles backend provider resources related to the employer/employee +(service/pods). In the Demo Controller, ```demoResourceVipStatusInProvider``` and ```demoResourceRsStatusInProvider``` +are mocked as resources in the backend provider. + +How the demo controller adapter realized will be introduced in detail as follows, +```DemoControllerAdapter``` was defined, including a kubernetes client and a resourceProviderClient. What included in +the Adapter struct can be defined as needed. +```go +type DemoControllerAdapter struct { + client.Client + resourceProviderClient *DemoResourceProviderClient +} +``` +Declaring that the DemoControllerAdapter implemented ```ReconcileAdapter``` and ```ReconcileLifecycleOptions```. +Implementing ```RconcileAdapter``` is a must action, while ```ReconcileLifecycleOptions``` isn't, check the remarks +for ```ReconcileLifecycleOptions``` in ```kusionstack.io/resourceconsist/pkg/frame/controller/types.go``` to find why. +```go +var _ ReconcileAdapter = &DemoControllerAdapter{} +var _ ReconcileLifecycleOptions = &DemoControllerAdapter{} +``` +Following two methods for DemoControllerAdapter inplementing ```ReconcileLifecycleOptions```, defines whether +DemoControllerAdapter following PodOpsLifecycle and need record employees. +```go +func (r *DemoControllerAdapter) FollowPodOpsLifeCycle() bool { + return true +} + +func (r *DemoControllerAdapter) NeedRecordEmployees() bool { + return needRecordEmployees +} +``` +```IEmployer``` and ```IEmployee``` are interfaces that includes several methods indicating the status employer and +employee. +```go +type IEmployer interface { + GetEmployerId() string + GetEmployerStatuses() interface{} + EmployerEqual(employer IEmployer) (bool, error) +} + +type IEmployee interface { + GetEmployeeId() string + GetEmployeeName() string + GetEmployeeStatuses() interface{} + EmployeeEqual(employee IEmployee) (bool, error) +} + +type DemoServiceStatus struct { + EmployerId string + EmployerStatuses DemoServiceDetails +} + +type DemoServiceDetails struct { + RemoteVIP string + RemoteVIPQPS int +} + +type DemoPodStatus struct { + EmployeeId string + EmployeeName string + EmployeeStatuses PodEmployeeStatuses +} +``` +```GetSelectedEmployeeNames``` returns all employees' names selected by employer, here is pods' names selected by +service. ```GetSelectedEmployeeNames``` is used for ensuring LifecycleFinalizer and ExpectedFinalizer, so you can give +it an empty return if your adapter doesn't follow PodOpsLifecycle. +```go +func (r *DemoControllerAdapter) GetSelectedEmployeeNames(ctx context.Context, employer client.Object) ([]string, error) { + svc, ok := employer.(*corev1.Service) + if !ok { + return nil, fmt.Errorf("expect employer kind is Service") + } + selector := labels.Set(svc.Spec.Selector).AsSelectorPreValidated() + var podList corev1.PodList + err := r.List(ctx, &podList, &client.ListOptions{Namespace: svc.Namespace, LabelSelector: selector}) + if err != nil { + return nil, err + } + + selected := make([]string, len(podList.Items)) + for idx, pod := range podList.Items { + selected[idx] = pod.Name + } + + return selected, nil +} +``` +```GetExpectedEmployer``` and ```GetCurrentEmployer``` defines what is expected under the spec of employer and what is +current status, like the load balancer from a cloud provider. Here in the demo adapter, expected is defined by hardcode +and current is retrieved from a fake resource provider ```demoResourceVipStatusInProvider```. +```go +func (r *DemoControllerAdapter) GetExpectedEmployer(ctx context.Context, employer client.Object) ([]IEmployer, error) { + if !employer.GetDeletionTimestamp().IsZero() { + return nil, nil + } + var expect []IEmployer + expect = append(expect, DemoServiceStatus{ + EmployerId: employer.GetName(), + EmployerStatuses: DemoServiceDetails{ + RemoteVIP: "demo-remote-VIP", + RemoteVIPQPS: 100, + }, + }) + return expect, nil +} + +func (r *DemoControllerAdapter) GetCurrentEmployer(ctx context.Context, employer client.Object) ([]IEmployer, error) { + var current []IEmployer + + req := &DemoResourceVipOps{} + resp, err := r.resourceProviderClient.QueryVip(req) + if err != nil { + return current, err + } + if resp == nil { + return current, fmt.Errorf("demo resource vip query resp is nil") + } + + for _, employerStatus := range resp.VipStatuses { + current = append(current, employerStatus) + } + return current, nil +} +``` +```CreateEmployer/UpdateEmployer/DeleteEmployer``` handles creation/update/deletion of resources related to employer on +related backend provider. Here in the demo adapter, ```CreateEmployer/UpdateEmployer/DeleteEmployer``` handles +```demoResourceVipStatusInProvider```. +```go +func (r *DemoControllerAdapter) CreateEmployer(ctx context.Context, employer client.Object, toCreates []IEmployer) ([]IEmployer, []IEmployer, error) { + if toCreates == nil || len(toCreates) == 0 { + return toCreates, nil, nil + } + + toCreateDemoServiceStatus := make([]DemoServiceStatus, len(toCreates)) + for idx, create := range toCreates { + createDemoServiceStatus, ok := create.(DemoServiceStatus) + if !ok { + return nil, toCreates, fmt.Errorf("toCreates employer is not DemoServiceStatus") + } + toCreateDemoServiceStatus[idx] = createDemoServiceStatus + } + + _, err := r.resourceProviderClient.CreateVip(&DemoResourceVipOps{ + VipStatuses: toCreateDemoServiceStatus, + }) + if err != nil { + return nil, toCreates, err + } + return toCreates, nil, nil +} + +func (r *DemoControllerAdapter) UpdateEmployer(ctx context.Context, employer client.Object, toUpdates []IEmployer) ([]IEmployer, []IEmployer, error) { + if toUpdates == nil || len(toUpdates) == 0 { + return toUpdates, nil, nil + } + + toUpdateDemoServiceStatus := make([]DemoServiceStatus, len(toUpdates)) + for idx, update := range toUpdates { + updateDemoServiceStatus, ok := update.(DemoServiceStatus) + if !ok { + return nil, toUpdates, fmt.Errorf("toUpdates employer is not DemoServiceStatus") + } + toUpdateDemoServiceStatus[idx] = updateDemoServiceStatus + } + + _, err := r.resourceProviderClient.UpdateVip(&DemoResourceVipOps{ + VipStatuses: toUpdateDemoServiceStatus, + }) + if err != nil { + return nil, toUpdates, err + } + return toUpdates, nil, nil +} + +func (r *DemoControllerAdapter) DeleteEmployer(ctx context.Context, employer client.Object, toDeletes []IEmployer) ([]IEmployer, []IEmployer, error) { + if toDeletes == nil || len(toDeletes) == 0 { + return toDeletes, nil, nil + } + + toDeleteDemoServiceStatus := make([]DemoServiceStatus, len(toDeletes)) + for idx, update := range toDeletes { + deleteDemoServiceStatus, ok := update.(DemoServiceStatus) + if !ok { + return nil, toDeletes, fmt.Errorf("toDeletes employer is not DemoServiceStatus") + } + toDeleteDemoServiceStatus[idx] = deleteDemoServiceStatus + } + + _, err := r.resourceProviderClient.DeleteVip(&DemoResourceVipOps{ + VipStatuses: toDeleteDemoServiceStatus, + }) + if err != nil { + return nil, toDeletes, err + } + return toDeletes, nil, nil +} +``` +```GetExpectedEmployee```and```GetCurrentEmployee``` defines what is expected under the spec of employer and employees +and what is current status, like real servers under the load balancer from a cloud provider. Here in the demo adapter, +expected is calculated from pods and current is retrieved from a fake resource provider ```demoResourceRsStatusInProvider```. +```go +// GetExpectEmployeeStatus return expect employee status +func (r *DemoControllerAdapter) GetExpectedEmployee(ctx context.Context, employer client.Object) ([]IEmployee, error) { + if !employer.GetDeletionTimestamp().IsZero() { + return []IEmployee{}, nil + } + + svc, ok := employer.(*corev1.Service) + if !ok { + return nil, fmt.Errorf("expect employer kind is Service") + } + selector := labels.Set(svc.Spec.Selector).AsSelectorPreValidated() + + var podList corev1.PodList + err := r.List(ctx, &podList, &client.ListOptions{Namespace: svc.Namespace, LabelSelector: selector}) + if err != nil { + return nil, err + } + + expected := make([]IEmployee, len(podList.Items)) + expectIdx := 0 + for _, pod := range podList.Items { + if !pod.DeletionTimestamp.IsZero() { + continue + } + status := DemoPodStatus{ + EmployeeId: pod.Name, + EmployeeName: pod.Name, + } + employeeStatuses, err := GetCommonPodEmployeeStatus(&pod) + if err != nil { + return nil, err + } + extraStatus := PodExtraStatus{} + if employeeStatuses.LifecycleReady { + extraStatus.TrafficOn = true + extraStatus.TrafficWeight = 100 + } else { + extraStatus.TrafficOn = false + extraStatus.TrafficWeight = 0 + } + employeeStatuses.ExtraStatus = extraStatus + status.EmployeeStatuses = employeeStatuses + expected[expectIdx] = status + expectIdx++ + } + + return expected[:expectIdx], nil +} + +func (r *DemoControllerAdapter) GetCurrentEmployee(ctx context.Context, employer client.Object) ([]IEmployee, error) { + var current []IEmployee + req := &DemoResourceRsOps{} + resp, err := r.resourceProviderClient.QueryRealServer(req) + if err != nil { + return current, err + } + if resp == nil { + return current, fmt.Errorf("demo resource rs query resp is nil") + } + + for _, rsStatus := range resp.RsStatuses { + current = append(current, rsStatus) + } + return current, nil +} +``` +```CreateEmployees/UpdateEmployees/DeleteEmployees``` handles creation/update/deletion of resources related to employee +on related backend provider. Here in the demo adapter, ```CreateEmployees/UpdateEmployees/DeleteEmployees``` +handles ```demoResourceRsStatusInProvider```. +```go +func (r *DemoControllerAdapter) CreateEmployees(ctx context.Context, employer client.Object, toCreates []IEmployee) ([]IEmployee, []IEmployee, error) { + if toCreates == nil || len(toCreates) == 0 { + return toCreates, nil, nil + } + toCreateDemoPodStatuses := make([]DemoPodStatus, len(toCreates)) + + for idx, toCreate := range toCreates { + podStatus, ok := toCreate.(DemoPodStatus) + if !ok { + return nil, toCreates, fmt.Errorf("toCreate is not DemoPodStatus") + } + toCreateDemoPodStatuses[idx] = podStatus + } + + _, err := r.resourceProviderClient.CreateRealServer(&DemoResourceRsOps{ + RsStatuses: toCreateDemoPodStatuses, + }) + if err != nil { + return nil, toCreates, err + } + + return toCreates, nil, nil +} + +func (r *DemoControllerAdapter) UpdateEmployees(ctx context.Context, employer client.Object, toUpdates []IEmployee) ([]IEmployee, []IEmployee, error) { + if toUpdates == nil || len(toUpdates) == 0 { + return toUpdates, nil, nil + } + + toUpdateDemoPodStatuses := make([]DemoPodStatus, len(toUpdates)) + + for idx, toUpdate := range toUpdates { + podStatus, ok := toUpdate.(DemoPodStatus) + if !ok { + return nil, toUpdates, fmt.Errorf("toUpdate is not DemoPodStatus") + } + toUpdateDemoPodStatuses[idx] = podStatus + } + + _, err := r.resourceProviderClient.UpdateRealServer(&DemoResourceRsOps{ + RsStatuses: toUpdateDemoPodStatuses, + }) + if err != nil { + return nil, toUpdates, err + } + + return toUpdates, nil, nil +} + +func (r *DemoControllerAdapter) DeleteEmployees(ctx context.Context, employer client.Object, toDeletes []IEmployee) ([]IEmployee, []IEmployee, error) { + if toDeletes == nil || len(toDeletes) == 0 { + return toDeletes, nil, nil + } + + toDeleteDemoPodStatuses := make([]DemoPodStatus, len(toDeletes)) + + for idx, toDelete := range toDeletes { + podStatus, ok := toDelete.(DemoPodStatus) + if !ok { + return nil, toDeletes, fmt.Errorf("toDelete is not DemoPodStatus") + } + toDeleteDemoPodStatuses[idx] = podStatus + } + + _, err := r.resourceProviderClient.DeleteRealServer(&DemoResourceRsOps{ + RsStatuses: toDeleteDemoPodStatuses, + }) + if err != nil { + return nil, toDeletes, err + } + + return toDeletes, nil, nil +} +``` diff --git a/docs/kuperator/started/_category_.json b/docs/kuperator/started/_category_.json new file mode 100644 index 00000000..877a378f --- /dev/null +++ b/docs/kuperator/started/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Getting Started", + "position": 2 +} diff --git a/docs/kuperator/started/demo-graceful-operation.md b/docs/kuperator/started/demo-graceful-operation.md new file mode 100644 index 00000000..6eb1fce9 --- /dev/null +++ b/docs/kuperator/started/demo-graceful-operation.md @@ -0,0 +1,340 @@ +# Using KusionStack Kuperator to operate Pods gracefully + +Applications always provide its service along with traffic routing. +On Kubernetes, they should be a set of Pods and a corresponding Kubernetes Service resource to expose the service. + +However, during operations such as updating Pod revisions, +there is a risk that client request traffic may be lost. This can lead to a poor user experience for developers. + +This tutorial will demonstrate how to operate Pods gracefully in a KusionStack Kuperator way on Aliyun ACK +with SLB as a Service backend provider. + +> You can also get the same point from [this video](https://www.bilibili.com/video/BV1n8411q7sP/?t=15.7), +> which shows the same case using both KusionStack Kusion and Kuperator. +> The sample used in this video can be found from [KusionStack Catalog](https://github.com/KusionStack/catalog/tree/main/models/samples/wordpress). + +## Preparing + +First, ensure that you have an Aliyun ACK Kubernetes cluster set up in order to provision an Aliyun SLB. + +Next, install KusionStack Kuperator on this Kubernetes cluster +following [installation doc](https://kusionstack.io/docs/kuperator/started/install). + +## Get started + +### Create a new namespace + +To begin, create a new namespace for this tutorial: + +```shell +$ kubectl create ns kuperator-tutorial +``` + +### Provision Pods and Services + +You can create a set of Pods to run up a demo application service +by creating CollaSet resource using following command: + +``` shell +echo ' +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: server +spec: + replicas: 3 + selector: + matchLabels: + app: server + template: + metadata: + labels: + app: server + spec: + containers: + - image: wu8685/echo:1.3 + name: server + command: + - /server + resources: + limits: + cpu: "0.1" + ephemeral-storage: 100Mi + memory: 100Mi + requests: + cpu: "0.1" + ephemeral-storage: 100Mi + memory: 100Mi + readinessProbe: + httpGet: + path: /healthz + port: 8080 + initialDelaySeconds: 5 + periodSeconds: 3 +' | kubectl -n kuperator-tutorial apply -f - +``` + +There should be 3 Pods created. + +```shell +$ kubectl -n kuperator-tutorial get pod +NAME READY STATUS RESTARTS AGE +server-c5lsr 1/1 Running 0 2m23s +server-p6wrx 1/1 Running 0 2m23s +server-zn62c 1/1 Running 0 2m23s +``` + +Then create a Kubernetes Service by running following command, +which will provision Aliyun SLB to expose service. + +```shell +echo ' +apiVersion: v1 +kind: Service +metadata: + annotations: + service.beta.kubernetes.io/alibaba-cloud-loadbalancer-spec: slb.s1.small + service.beta.kubernetes.io/backend-type: eni + labels: + kusionstack.io/control: "true" # this label is required + name: server +spec: + ports: + - port: 80 + protocol: TCP + targetPort: 8080 + selector: + app: server + type: LoadBalancer +' | kubectl -n kuperator-tutorial apply -f - +``` + +A service with external IP should be provisioned. + +```shell +$ kubectl -n kuperator-tutorial get svc server +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +server LoadBalancer 192.168.225.55 47.101.49.182 80:30146/TCP 51s +``` + +The label `kusionstack.io/control: "true"` on Service is very important. +It means this service resource will be recognized by ResourceConsist framework, and then participate in PodOpsLifecycle +to control the Aliyun SLB to switch off traffic before updating each Pod and switch on traffic after it finished, +in order to protect the service. + +### Provision a client + +Then we will provision a client to access the service we created before. +Please replace `` in the following CollaSet yaml with the external IP from Kubernetes Service created above, and apply again. + +```shell +echo ' +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: client +spec: + replicas: 1 + selector: + matchLabels: + app: client + template: + metadata: + labels: + app: client + spec: + containers: + - image: wu8685/echo:1.3 + name: nginx + command: + - /client + args: + - -url + - http:///echo # EXTERNAL_IP should be replaced + - -m + - POST + - d + - kuperator-tutorial + - -qps + - "10" + - -worker + - "10" + - -timeout + - "10000" + resources: + limits: + cpu: "0.1" + ephemeral-storage: 1Gi + memory: 100Mi + requests: + cpu: "0.1" + ephemeral-storage: 1Gi + memory: 100Mi +' | kubectl -n kuperator-tutorial apply -f - +``` + +A client Pod should be created. + +```shell +$ kubectl -n kuperator-tutorial get pod +NAME READY STATUS RESTARTS AGE +client-nc426 1/1 Running 0 30s +server-c5lsr 1/1 Running 0 19m +server-p6wrx 1/1 Running 0 19m +server-zn62c 1/1 Running 0 19m +``` + +This client will continuously access the service using the configuration provided in the command. +You can monitor the response codes from its logs: + +```shell +kubectl -n kuperator-tutorial logs -f client-nc426 +worker-0 another loop, request: 50, failed: 0 +worker-1 another loop, request: 50, failed: 0 +worker-0 another loop, request: 50, failed: 0 +worker-1 another loop, request: 50, failed: 0 +worker-0 another loop, request: 50, failed: 0 +worker-1 another loop, request: 50, failed: 0 +worker-0 another loop, request: 50, failed: 0 +worker-1 another loop, request: 50, failed: 0 +``` + +The accesses are all successful. + +### Update Pod revision + +To trigger a Pod revision update, run the following command +to edit the container image and command in the PodTemplate of CollaSet: + +```shell +echo ' +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: server +spec: + replicas: 3 + selector: + matchLabels: + app: server + template: + metadata: + labels: + app: server + spec: + containers: + - image: wu8685/echo:1.2 + name: server + command: + - /app/echo + resources: + limits: + cpu: "0.1" + ephemeral-storage: 100Mi + memory: 100Mi + requests: + cpu: "0.1" + ephemeral-storage: 100Mi + memory: 100Mi + readinessProbe: + httpGet: + path: /healthz + port: 8080 + initialDelaySeconds: 5 + periodSeconds: 3 +' | kubectl -n kuperator-tutorial apply -f - +``` + +It will trigger all Pods updated simultaneously. So the application `server` has no Pod to serve. +We can observe the error from client logs. + +```shell +worker-1 fails to request POST http://47.101.49.182/echo : Post "http://47.101.49.182/echo": read tcp 10.244.1.11:54040->47.101.49.182:80: read: connection reset by peer +worker-0 fails to request POST http://47.101.49.182/echo : Post "http://47.101.49.182/echo": read tcp 10.244.1.11:34438->47.101.49.182:80: read: connection reset by peer +worker-1 fails to request POST http://47.101.49.182/echo : Post "http://47.101.49.182/echo": context deadline exceeded (Client.Timeout exceeded while awaiting headers) +worker-0 fails to request POST http://47.101.49.182/echo : Post "http://47.101.49.182/echo": context deadline exceeded (Client.Timeout exceeded while awaiting headers) +worker-1 fails to request POST http://47.101.49.182/echo : Post "http://47.101.49.182/echo": context deadline exceeded (Client.Timeout exceeded while awaiting headers) +worker-1 another loop, request: 20, failed: 3 +worker-0 fails to request POST http://47.101.49.182/echo : Post "http://47.101.49.182/echo": context deadline exceeded (Client.Timeout exceeded while awaiting headers) +worker-0 another loop, request: 20, failed: 3 +worker-1 fails to request POST http://47.101.49.182/echo : Post "http://47.101.49.182/echo": context deadline exceeded (Client.Timeout exceeded while awaiting headers) +``` + +### Provision PodTransistionRule + +To avoid this problem, provision a PodTransitionRule with a maxUnavailable 50% rule by running the following command: + +```shell +echo ' +apiVersion: apps.kusionstack.io/v1alpha1 +kind: PodTransitionRule +metadata: + labels: + name: server +spec: + rules: + - availablePolicy: + maxUnavailableValue: 50% + name: maxUnavailable + selector: + matchLabels: + app: server +' | kubectl -n kuperator-tutorial apply -f - +``` + +After updating the CollaSet of the server to trigger an update, you will see the Pods rolling update one by one, +ensuring that at least one Pod is always available to serve. + +```shell +kubectl -n kuperator-tutorial get pod +NAME READY STATUS RESTARTS AGE +client-rrfbj 1/1 Running 0 25s +server-457sn 0/1 Running 0 5s +server-bd5sz 0/1 Running 0 5s +server-l842s 1/1 Running 0 2m4s +``` + +You can see from the client logs that no access requests fail during this update. + +```shell +worker-0 another loop, request: 50, failed: 0 +worker-1 another loop, request: 50, failed: 0 +worker-0 another loop, request: 50, failed: 0 +worker-1 another loop, request: 50, failed: 0 +worker-0 another loop, request: 50, failed: 0 +worker-1 another loop, request: 50, failed: 0 +worker-0 another loop, request: 50, failed: 0 +worker-0 another loop, request: 50, failed: 0 +worker-1 another loop, request: 50, failed: 0 +worker-1 another loop, request: 50, failed: 0 +worker-0 another loop, request: 50, failed: 0 +``` + +### Clean tutorial namespace + +At the end of this tutorial, you can clean up the resources by deleting the namespace: + +```shell +$ kubectl delete ns kuperator-tutorial +``` + +## Comparison with the Native Approach + +Kubernetes provides `preStop` and `postStart` hook in each container, by which users can also interact with service outside +Kubernetes like Aliyun SLB service. However, KusionStack Kuperator offers several advantages: + +* Pod level vs Container level + +Kuperator offers a Pod level hooks which have more complete information than one container, +especially there are several containers in one Pod. + +* Plugin-able + +Through KusionStack Kuperator, you can decouple operations executed before or after Pods actually change. +For example, traffic control can be added or removed without modifying the Pod's preStop configuration. + +* Rollback option + +In case of issues, rollback becomes a viable option when using the Kuperator approach to update Pods. +Since Kuperator does not modify the Pods or their containers during the update, +if the traffic service experiences problems, there is an opportunity to cancel the update. \ No newline at end of file diff --git a/docs/kuperator/started/install.md b/docs/kuperator/started/install.md new file mode 100644 index 00000000..1ac6a89c --- /dev/null +++ b/docs/kuperator/started/install.md @@ -0,0 +1,55 @@ +--- +sidebar_position: 2 +--- + +# Installation + +## Install with helm +KusionStack Kuperator requires **Kubernetes version >= 1.18** +```shell +# Firstly add charts repository if you haven't do this. +$ helm repo add kusionstack https://kusionstack.github.io/charts + +# To update the kusionstack repo. +$ helm repo update kusionstack + +# Install the latest version. +$ helm install kuperator kusionstack/kuperator +``` + + +[Helm](https://github.com/helm/helm) is a tool for managing packages of pre-configured Kubernetes resources. +### Optional: chart parameters + +The following table lists the configurable parameters of the chart and their default values. + +| Parameter | Description | Default | +|-------------|----------------|----------------| +| `namespace` | namespace for Kuperator installation | `kusionstack-system` | +| `namespaceEnabled` | Whether to create the installation.namespace | `true` | +| `managerReplicas`| Replicas of Kuperator deployment | `3` | +| `image.repo` | Repository for kuperator image | `kusionstack/kuperator`| +| `image.pullPolicy`| Image pull policy for kuperator-manager container | `IfNotPresent` | +| `image.tag` | Tag for kuperator-manager image | `v0.1.0` | +| `resources.limits.cpu` | CPU resource limit of kuperator-manager container | `500m` | +| `resources.limits.memory` | Memory resource limit of kuperator-manager container | `128Mi` | +| `resources.requests.cpu` | CPU resource request of kuperator-manager container | `10m` | +| `resources.requests.memory` | Memory resource request of kuperator-manager container | `64Mi` | + +### Upgrade + +Run following command to upgrade KusionStack Kuperator to the latest version. + +```shell +# Upgrade to the latest version +$ helm upgrade kuperator kusionstack/kuperator +``` + +### Uninstall + +Run following command to uninstall KusionStack Kuperator. + +```shell +# Uninstall +$ helm uninstall kuperator +``` \ No newline at end of file diff --git a/docs/kusion/1-what-is-kusion/1-overview.md b/docs/kusion/1-what-is-kusion/1-overview.md new file mode 100644 index 00000000..bbbc5fbb --- /dev/null +++ b/docs/kusion/1-what-is-kusion/1-overview.md @@ -0,0 +1,62 @@ +--- +id: overview +title: Overview +slug: / +--- + +# Overview + +Welcome to Kusion! This introduction section covers what Kusion is, the Kusion workflow, and how Kusion compares to other software. If you just want to dive into using Kusion, feel free to skip ahead to the [Getting Started](getting-started/install-kusion) section. + +## What is Kusion? + +Kusion is an intent-driven [Platform Orchestrator](https://internaldeveloperplatform.org/platform-orchestrators/), which sits at the core of an [Internal Developer Platform (IDP)](https://internaldeveloperplatform.org/what-is-an-internal-developer-platform/). With Kusion you can enable app-centric development, your developers only need to write a single application specification - [AppConfiguration](https://www.kusionstack.io/docs/concepts/app-configuration). [AppConfiguration](https://www.kusionstack.io/docs/concepts/app-configuration) defines the workload and all resource dependencies without needing to supply environment-specific values, Kusion ensures it provides everything needed for the application to run. + +Kusion helps app developers who are responsible for creating applications and the platform engineers responsible for maintaining the infrastructure the applications run on. These roles may overlap or align differently in your organization, but Kusion is intended to ease the workload for any practitioner responsible for either set of tasks. + +![arch](https://raw.githubusercontent.com/KusionStack/kusion/main/docs/overview.jpg) + + +## How does Kusion work? + +As a Platform Orchestrator, Kusion enables you to address challenges often associated with Day 0 and Day 1. Both platform engineers and application engineers can benefit from Kusion. + +There are two key workflows for Kusion: + +1. **Day 0 - Set up the modules and workspaces:** Platform engineers create shared modules for deploying applications and their underlying infrastructure, and workspace definitions for target landing zone. These standardized, shared modules codify the requirements from stakeholders across the organization including security, compliance, and finance. + + Kusion modules abstract the complexity of underlying infrastructure tooling, enabling app developers to deploy their applications using a self-service model. + +
+ + ![workflow](https://raw.githubusercontent.com/KusionStack/kusion/main/docs/platform_workflow.jpg) +
+ +2. **Day 1 - Set up the application:** Application developers leverage the workspaces and modules created by the platform engineers to deploy applications and their supporting infrastructure. The platform team maintains the workspaces and modules, which allows application developers to focus on building applications using a repeatable process on standardized infrastructure. + +
+ + ![workflow](https://raw.githubusercontent.com/KusionStack/kusion/main/docs/app_workflow.jpg) +
+ +## Kusion Highlights + +* **Platform as Code** + + Specify desired application intent through declarative configuration code, drive continuous deployment with any CI/CD systems or GitOps to match that intent. No ad-hoc scripts, no hard maintain custom workflows, just declarative configuration code. + +* **Dynamic Configuration Management** + + Enable platform teams to set baseline-templates, control how and where to deploy application workloads and provision accessory resources. While still enabling application developers freedom via workload-centric specification and deployment. + +* **Security & Compliance Built In** + + Enforce security and infrastructure best practices with out-of-box [base models](https://github.com/KusionStack/catalog), create security and compliance guardrails for any Kusion deploy with third-party Policy as Code tools. All accessory resource secrets are automatically injected into Workloads. + +* **Lightweight and Open Model Ecosystem** + + Pure client-side solution ensures good portability and the rich APIs make it easier to integrate and automate. Large growing model ecosystem covers all stages in application lifecycle, with extensive connections to various infrastructure capabilities. + +:::tip + +**Kusion is an early project.** The end goal of Kusion is to boost [Internal Developer Platform](https://internaldeveloperplatform.org/) revolution, and we are iterating on Kusion quickly to strive towards this goal. For any help or feedback, please contact us in [Slack](https://github.com/KusionStack/community/discussions/categories/meeting) or [issues](https://github.com/KusionStack/kusion/issues). diff --git a/docs/kusion/1-what-is-kusion/2-kusion-vs-x.md b/docs/kusion/1-what-is-kusion/2-kusion-vs-x.md new file mode 100644 index 00000000..a5ed333d --- /dev/null +++ b/docs/kusion/1-what-is-kusion/2-kusion-vs-x.md @@ -0,0 +1,37 @@ +--- +id: kusion-vs-x +--- + +# Kusion vs Other Software + +It can be difficult to understand how different software compare to each other. Is one a replacement for the other? Are they complementary? etc. In this section, we compare Kusion to other software. + +**vs. GitOps (ArgoCD, FluxCD, etc.)** + +According to the [open GitOps principles](https://opengitops.dev/), GitOps systems typically have its desired state expressed declaratively, continuously observe actual system state and attempt to apply the desired state. In the design of Kusion toolchain, we refer to those principles but have no intention to reinvent any GitOps systems wheel. + +Kusion adopts your GitOps process and improves it with richness of features. The declarative [AppConfiguration](../concepts/app-configuration) model can be used to express desired intent, once intent is declared [Kusion CLI](../reference/commands) takes the role to make production match intent as safely as possible. + +**vs. PaaS (Heroku, Vercel, etc.)** + +Kusion shares the same goal with traditional PaaS platforms to provide application delivery and management capabilities. The intuitive difference from the full functionality PaaS platforms is that Kusion is a client-side toolchain, not a complete PaaS platform. + +Also traditional PaaS platforms typically constrain the type of applications they can run but there is no such constrain for Kusion which means Kusion provides greater flexibility. + +Kusion allows you to have platform-like features without the constraints of a traditional PaaS. However, Kusion is not attempting to replace any PaaS platforms, instead Kusion can be used to deploy to a platform such as Heroku. + +**vs. KubeVela** + +KubeVela is a modern software delivery and management control plane which makes it easier to deploy and operate applications on top of Kubernetes. + +Although some might initially perceive an overlap between Kusion and KubeVela, they are in fact complementary and can be integrated to work together. As a lightweight, purely client-side tool, coupled with corresponding [Generator](https://github.com/KusionStack/kusion-module-framework) implementation, Kusion can render [AppConfiguration](../concepts/app-configuration) schema to generate CRD resources for KubeVela and leverage KubeVela's control plane to implement application delivery. + +**vs. Helm** + +The concept of Helm originates from the [package management](https://en.wikipedia.org/wiki/Package_manager) mechanism of the operating system. It is a package management tool based on templated YAML files and supports the execution and management of resources in the package. + +Kusion is not a package manager. Kusion naturally provides a superset of Helm capabilities with the modeled key-value pairs, so that developers can use Kusion directly as a programable alternative to avoid the pain of writing text templates. For users who have adopted Helm, the stack compilation results in Kusion can be packaged and used in Helm format. + +**vs. Kubernetes** + +Kubernetes(K8s) is a container scheduling and management runtime widely used around the world, an "operating system" core for containers, and a platform for building platforms. Above the Kubernetes API layer, Kusion aims to provide app-centric **abstraction** and unified **workspace**, better **user experience** and automation **workflow**, and helps developers build the app delivery model easily and collaboratively. diff --git a/docs/kusion/1-what-is-kusion/_category_.json b/docs/kusion/1-what-is-kusion/_category_.json new file mode 100644 index 00000000..0817eb90 --- /dev/null +++ b/docs/kusion/1-what-is-kusion/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "What is Kusion?" +} diff --git a/docs/kusion/2-getting-started/1-install-kusion.md b/docs/kusion/2-getting-started/1-install-kusion.md new file mode 100644 index 00000000..540881d6 --- /dev/null +++ b/docs/kusion/2-getting-started/1-install-kusion.md @@ -0,0 +1,144 @@ +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Install Kusion + +You can install the latest Kusion CLI on MacOS, Linux and Windows. + +## MacOs/Linux + +For the MacOs and Linux, Homebrew and sh script are supported. Choose the one you prefer from the methods below. + + + + +The recommended method for installing on MacOS and Linux is to use the brew package manager. + +**Install Kusion** + +```bash +# tap formula repository Kusionstack/tap +brew tap KusionStack/tap + +# install Kusion +brew install KusionStack/tap/kusion +``` + +**Update Kusion** + +```bash +# update formulae from remote +brew update + +# update Kusion +brew upgrade KusionStack/tap/kusion +``` + +**Uninstall Kusion** + +```bash +# uninstall Kusion +brew uninstall KusionStack/tap/kusion +``` + +```mdx-code-block + + +``` + +**Install Kusion** + +```bash +# install Kusion, default latest version +curl https://www.kusionstack.io/scripts/install.sh | sh +``` + +**Install the Specified Version of Kusion** + +You can also install the specified version of Kusion by appointing the version as shell script parameter, where the version is the [available tag](https://github.com/KusionStack/kusion/tags) trimming prefix "v", such as 0.11.0, 0.10.0, etc. In general, you don't need to specify Kusion version, just use the command above to install the latest version. + +```bash +# install Kusion of specified version 0.11.0 +curl https://www.kusionstack.io/scripts/install.sh | sh -s 0.11.0 +``` + +**Uninstall Kusion** + +```bash +# uninstall Kusion +curl https://www.kusionstack.io/scripts/uninstall.sh | sh +``` + +```mdx-code-block + + +``` + +## Windows + +For the Windows, Scoop and Powershell script are supported. Choose the one you prefer from the methods below. + + + + +The recommended method for installing on Windows is to use the scoop package manager. + +**Install Kusion** + +```bash +# add scoop bucket KusionStack +scoop bucket add KusionStack https://github.com/KusionStack/scoop-bucket.git + +# install kusion +scoop install KusionStack/kusion +``` + +**Update Kusion** + +```bash +# update manifest from remote +scoop update + +# update Kusion +scoop install KusionStack/kusion +``` + +**Uninstall Kusion** + +```bash +# uninstall Kusion +brew uninstall KusionStack/kusion +``` + +```mdx-code-block + + +``` + +**Install Kusion** + +```bash +# install Kusion, default latest version +powershell -Command "iwr -useb https://www.kusionstack.io/scripts/install.ps1 | iex" +``` + +**Install the Specified Version of Kusion** + +You can also install the specified version of Kusion by appointing the version as shell script parameter, where the version is the [available tag](https://github.com/KusionStack/kusion/tags) trimming prefix "v", such as 0.11.0, etc. In general, you don't need to specify Kusion version, just use the command above to install the latest version. + +```bash +# install Kusion of specified version 0.10.0 +powershell {"& { $(irm https://www.kusionstack.io/scripts/install.ps1) } -Version 0.11.0" | iex} +``` + +**Uninstall Kusion** + +```bash +# uninstall Kusion +powershell -Command "iwr -useb https://www.kusionstack.io/scripts/uninstall.ps1 | iex" +``` + +```mdx-code-block + + +``` diff --git a/docs/kusion/2-getting-started/2-deliver-quickstart.md b/docs/kusion/2-getting-started/2-deliver-quickstart.md new file mode 100644 index 00000000..7b89b4fa --- /dev/null +++ b/docs/kusion/2-getting-started/2-deliver-quickstart.md @@ -0,0 +1,221 @@ +--- +id: deliver-quickstart +--- + +# Run Your First App on Kubernetes with Kusion + +In this tutorial, we will walk through how to deploy a quickstart application on Kubernetes with Kusion. The demo application can interact with a locally deployed MySQL database, which is declared as an accessory in the config codes and will be automatically created and managed by Kusion. + +## Prerequisites + +Before we start to play with this example, we need to have the Kusion CLI installed and run an accessible Kubernetes cluster. Here are some helpful documents: + +- Install [Kusion CLI](./1-install-kusion.md). +- Run a [Kubernetes](https://kubernetes.io) cluster. Some light and convenient options for Kubernetes local deployment include [k3s](https://docs.k3s.io/quick-start), [k3d](https://k3d.io/v5.4.4/#installation), and [MiniKube](https://minikube.sigs.k8s.io/docs/tutorials/multi_node). + +## Initialize Project + +We can start by initializing this tutorial project with `kusion init` cmd. + +```shell +# Create a new directory and navigate into it. +mkdir quickstart && cd quickstart + +# Initialize the demo project with the name of the current directory. +kusion init +``` + +The created project structure looks like below: + +```shell +tree +. +├── default +│   ├── kcl.mod +│   ├── main.k +│   └── stack.yaml +└── project.yaml + +2 directories, 4 files +``` + +:::info +More details about the project and stack structure can be found in [Project](../3-concepts/1-project/1-overview.md) and [Stack](../3-concepts/2-stack/1-overview.md). +::: + +### Review Configuration Files + +Now let's have a glance at the configuration codes of `default` stack: + +```shell +cat default/main.k +``` + +```python +import kam.v1.app_configuration as ac +import service +import service.container as c +import network as n + +# main.k declares the customized configuration codes for default stack. +quickstart: ac.AppConfiguration { + workload: service.Service { + containers: { + quickstart: c.Container { + image: "kusionstack/kusion-quickstart:latest" + } + } + } + accessories: { + "network": n.Network { + ports: [ + n.Port { + port: 8080 + } + ] + } + } +} +``` + +The configuration file `main.k`, usually written by the **App Developers**, declares the customized configuration codes for `default` stack, including an `AppConfiguration` instance with the name of `quickstart`. The `quickstart` application consists of a `Workload` with the type of `service.Service`, which runs a container named `quickstart` using the image of `kusionstack/kusion-quickstart:latest`. + +Besides, it declares a **Kusion Module** with the type of `network.Network`, exposing `8080` port to be accessed for the long-running service. + +The `AppConfiguration` model can hide the major complexity of Kubernetes resources such as `Namespace`, `Deployment`, and `Service` which will be created and managed by Kusion, providing the concepts that are **application-centric** and **infrastructure-agnostic** for a more developer-friendly experience. + +:::info +More details about the `AppConfiguration` model and built-in Kusion Module can be found in [kam](https://github.com/KusionStack/kam) and [catalog](https://github.com/KusionStack/catalog). +::: + +The declaration of the dependency packages can be found in `default/kcl.mod`: + +```shell +cat default/kcl.mod +``` + +```shell +[dependencies] +kam = { git = "https://github.com/KusionStack/kam.git", tag = "0.2.0" } +service = {oci = "oci://ghcr.io/kusionstack/service", tag = "0.1.0" } +network = { oci = "oci://ghcr.io/kusionstack/network", tag = "0.2.0" } +``` + +:::info +More details about the application model and module dependency declaration can be found in [Kusion Module guide for app dev](../3-concepts/3-module/3-app-dev-guide.md). +::: + +:::tip +The specific module versions we used in the above demonstration is only applicable for Kusion CLI after **v0.12.0**. +::: + +## Application Delivery + +Use the following command to deliver the quickstart application in `default` stack on your accessible Kubernetes cluster, while watching the resource creation and automatically port-forwarding the specified port (8080) from local to the Kubernetes Service of the application. We can check the details of the resource preview results before we confirm to apply the diffs. + +```shell +cd default && kusion apply --port-forward 8080 +``` + +![](/img/docs/user_docs/getting-started/kusion_apply_quickstart_0.12.gif) + +:::info +During the first apply, the models and modules that the application depends on will be downloaded, so it may take some time (usually within one minute). You can take a break and have a cup of coffee. +::: + +:::info +Kusion by default will create the Kubernetes resources of the application in the namespace the same as the project name. If you want to customize the namespace, please refer to [Project Namespace Extension](../3-concepts/1-project/2-configuration.md#kubernetesnamespace) and [Stack Namespace Extension](../3-concepts/2-stack/2-configuration.md#kubernetesnamespace). +::: + +Now we can visit [http://localhost:8080](http://localhost:8080) in our browser and play with the demo application! + +![](/img/docs/user_docs/getting-started/quickstart_page.png) + +## Add MySQL Accessory + +As you can see, the demo application page indicates that the MySQL database is not ready yet. Hence, we will now add a MySQL database as an accessory for the workload. + +We can add the Kusion-provided built-in dependency in the `default/kcl.mod`, so that we can use the `MySQL` module in the configuration codes. + +```shell +[dependencies] +kam = { git = "https://github.com/KusionStack/kam.git", tag = "0.2.0" } +service = {oci = "oci://ghcr.io/kusionstack/service", tag = "0.1.0" } +network = { oci = "oci://ghcr.io/kusionstack/network", tag = "0.2.0" } +mysql = { oci = "oci://ghcr.io/kusionstack/mysql", tag = "0.2.0" } +``` + +We can update the `default/main.k` with the following configuration codes: + +```python +import kam.v1.app_configuration as ac +import service +import service.container as c +import network as n +import mysql + +# main.k declares the customized configuration codes for default stack. +quickstart: ac.AppConfiguration { + workload: service.Service { + containers: { + quickstart: c.Container { + image: "kusionstack/kusion-quickstart:latest" + env: { + "DB_HOST": "$(KUSION_DB_HOST_QUICKSTART_DEFAULT_QUICKSTART_MYSQL)" + "DB_USERNAME": "$(KUSION_DB_USERNAME_QUICKSTART_DEFAULT_QUICKSTART_MYSQL)" + "DB_PASSWORD": "$(KUSION_DB_PASSWORD_QUICKSTART_DEFAULT_QUICKSTART_MYSQL)" + } + } + } + } + accessories: { + "network": n.Network { + ports: [ + n.Port { + port: 8080 + } + ] + } + "mysql": mysql.MySQL { + type: "local" + version: "8.0" + } + } +} +``` + +The configuration codes above declare a local `mysql.MySQL` with the engine version of `8.0` as an accessory for the application workload. The necessary Kubernetes resources for deploying and using the local MySQL database will be generated and users can get the `host`, `username` and `password` of the database through the [MySQL Credentials And Connectivity](../6-reference/2-modules/1-developer-schemas/database/mysql.md#credentials-and-connectivity) of Kusion in application containers. + +:::info +For more information about the naming convention of Kusion built-in MySQL module, you can refer to [Module Naming Convention](../6-reference/2-modules/3-naming-conventions.md). +::: + +After that, we can re-apply the application, and we can set the `--watch=false` to skip watching the resources to be reconciled: + +```shell +kusion apply --port-forward 8080 --watch=false +``` + +![](/img/docs/user_docs/getting-started/kusion_re_apply_quickstart_0.12.gif) + +:::info +You may wait another minute to download the MySQL Module. +::: + +Let's visit [http://localhost:8080](http://localhost:8080) in our browser, and we can find that the application has successfully connected to the MySQL database. The connection information is also printed on the page. + +![](/img/docs/user_docs/getting-started/quickstart_page_with_mysql.png) + +Now please feel free to enjoy the demo application! + +![](/img/docs/user_docs/getting-started/quickstart_mysql_validation.gif) + +## Delete Application + +We can delete the quickstart demo workload and related accessory resources with the following cmd: + +```shell +kusion destroy --yes +``` + +![](/img/docs/user_docs/getting-started/kusion_destroy_quickstart.gif) diff --git a/docs/kusion/2-getting-started/_category_.json b/docs/kusion/2-getting-started/_category_.json new file mode 100644 index 00000000..41f4c00e --- /dev/null +++ b/docs/kusion/2-getting-started/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Getting Started" +} diff --git a/docs/kusion/3-concepts/0-overview.md b/docs/kusion/3-concepts/0-overview.md new file mode 100644 index 00000000..44aa634e --- /dev/null +++ b/docs/kusion/3-concepts/0-overview.md @@ -0,0 +1,21 @@ +--- +id: overview +--- + +# Overview + +In this article, we will provide an overview of the core concepts of Kusion from the perspective of the Kusion workflow. + +![kusion workflow](/img/docs/concept/kusion_workflow.png) + +The workflow of Kusion is illustrated in the diagram above, which consists of three steps. + +The first step is **Write**, where the platform engineers build the [Kusion Modules](./3-module/1-overview.md) and initialize a [Workspace](./4-workspace.md). And the application developers declare their operational intent in [AppConfiguration](./5-appconfiguration.md) under a specific [Project](./1-project/1-overview.md) and [Stack](./2-stack/1-overview.md) path. + +The second step is the **Build** process, which results in the creation of the **SSoT** (Single Source of Truth), also known as the [Spec](./6-spec.md) of the current operational task. If you need version management of the SSoT, we recommend you manage the `Spec` with a VCS (Version Control System) tool like **Git**. + +The third step is **Apply**, which makes the `Spec` effective. Kusion parses the operational intent based on the `Spec` produced in the previous step. Before applying the `Spec`, Kusion will execute the `Preview` command (you can also execute this command manually) which will use a three-way diff algorithm to preview changes and prompt users to make sure all changes meet their expectations. And the `Apply` command will then actualize the operation intent onto various infrastructure platforms, currently supporting **Kubernetes**, **Terraform**, and **On-Prem** infrastructures. A [Release](./9-release.md) file will be created in the [Storage Backend](./7-backend.md) to record an operation. The `Destroy` command will delete the resources recorded in the `Release` file of a project in a specific workspace. + +A more detailed demonstration of the Kusion engine can be seen below. + +![kusion engine](/img/docs/concept/kusion_engine.png) \ No newline at end of file diff --git a/docs/kusion/3-concepts/1-project/1-overview.md b/docs/kusion/3-concepts/1-project/1-overview.md new file mode 100644 index 00000000..edcc84d7 --- /dev/null +++ b/docs/kusion/3-concepts/1-project/1-overview.md @@ -0,0 +1,12 @@ +--- +sidebar_label: Overview +id: overview +--- + +# Overview + +A project in Kusion is defined as a folder that contains a `project.yaml` file and is generally recommended to be linked to a Git repository. Typically, the mapping between a project and a repository is 1:1, however, it is possible to have multiple projects connected to a single repository — for example, in the case of a monorepo. A project consists of one or more applications. + +The purpose of the project is to bundle application configurations there are relevant. Specifically, it organizes logical configurations for internal components to orchestrate the application and assembles these configurations to suit different roles, such as developers and SREs, thereby covering the entire lifecycle of application development. + +From the perspective of the application development lifecycle, the configurations delineated by the project is decoupled from the application code. It takes an immutable image as input, allowing users to perform operations and maintain the application within an independent configuration codebase. \ No newline at end of file diff --git a/docs/kusion/3-concepts/1-project/2-configuration.md b/docs/kusion/3-concepts/1-project/2-configuration.md new file mode 100644 index 00000000..b5823df8 --- /dev/null +++ b/docs/kusion/3-concepts/1-project/2-configuration.md @@ -0,0 +1,38 @@ +--- +id: configuration +sidebar_label: Project file reference +--- + +# Kusion project file reference + +Every Kusion project has a project file, `project.yaml`, which specifies metadata about your project, such as the project name and project description. The project file must begin with lowercase `project` and have an extension of either `.yaml` or `.yml`. + +## Attributes + +| Name | Required | Description | Options | +|:------------- |:--------------- |:------------- |:------------- | +| `name` | required | Name of the project containing alphanumeric characters, hyphens, underscores. | None | +| `description` | optional | A brief description of the project. | None | +| `extensions` | optional | List of extensions on the project. | [See blow](#extensions) | + +### Extensions + +Extensions allow you to customize how resources are generated or customized as part of release. + +#### kubernetesNamespace + +The Kubernetes namespace extension allows you to customize namespace within your application generate Kubernetes resources. + +| Key | Required | Description | Example | +|:------|:--------:|:-------------|:---------| +| kind | y | The kind of extension being used. Must be 'kubernetesNamespace' | `kubernetesNamespace` | +| namespace | y | The namespace where all application-scoped resources generate Kubernetes objects. | `default` | + +```yaml +# Example `project.yaml` file with customized namespace of `test`. +name: example +extensions: + - kind: kubernetesNamespace + kubernetesNamespace: + namespace: test +``` diff --git a/docs/kusion/3-concepts/1-project/_category_.json b/docs/kusion/3-concepts/1-project/_category_.json new file mode 100644 index 00000000..b62ac774 --- /dev/null +++ b/docs/kusion/3-concepts/1-project/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Projects" +} diff --git a/docs/kusion/3-concepts/2-stack/1-overview.md b/docs/kusion/3-concepts/2-stack/1-overview.md new file mode 100644 index 00000000..c6dcd2b5 --- /dev/null +++ b/docs/kusion/3-concepts/2-stack/1-overview.md @@ -0,0 +1,16 @@ +--- +sidebar_label: Overview +id: overview +--- + +# Overview + +A stack in Kusion is defined as a folder within the project directory that contains a `stack.yaml` file. Stacks provide a mechanism to isolate multiple sets of different configurations in the same project. It is also the smallest unit of operation that can be configured and deployed independently. + +The most common way to leverage stacks is to denote different phases of the software development lifecycle, such as `development`, `staging`, `production`, etc. For instance, in the case where the image and resource requirements for an application workload might be different across different phases in the SDLC, they can be represented by different stacks in the same project, namely `dev`, `stage` and `prod`. + +To distinguish this from the deploy-time concept of a "target environment" - which Kusion defines as `workspaces`, **stack** is a development-time concept for application developers to manage different configurations. One way to illustrate the difference is that you can easily be deploying the `prod` stack to multiple target environments, for example, `aws-prod-us-east`, `aws-prod-us-east-2` and `azure-prod-westus`. + +## High Level Schema + +![High_Level_Schema](/img/docs/user_docs/concepts/high-level-schema.png) \ No newline at end of file diff --git a/docs/kusion/3-concepts/2-stack/2-configuration.md b/docs/kusion/3-concepts/2-stack/2-configuration.md new file mode 100644 index 00000000..b09a5c43 --- /dev/null +++ b/docs/kusion/3-concepts/2-stack/2-configuration.md @@ -0,0 +1,38 @@ +--- +id: configuration +sidebar_label: Stack file reference +--- + +# Kusion stack file reference + +Every Kusion project's stack has a stack file, `stack.yaml`, which specifies metadata about your stack, such as the stack name and stack description. The stack file must begin with lowercase `stack` and have an extension of either `.yaml` or `.yml`. + +## Attributes + +| Name | Required | Description | Options | +|:------------- |:--------------- |:------------- |:------------- | +| `name` | required | Name of the stack containing alphanumeric characters, hyphens, underscores. | None | +| `description` | optional | A brief description of the stack. | None | +| `extensions` | optional | List of extensions on the stack. | [See blow](#extensions) | + +### Extensions + +Extensions allow you to customize how resources are generated or customized as part of release. + +#### kubernetesNamespace + +The Kubernetes namespace extension allows you to customize namespace within your application generate Kubernetes resources. + +| Key | Required | Description | Example | +|:------|:--------:|:-------------|:---------| +| kind | y | The kind of extension being used. Must be 'kubernetesNamespace' | `kubernetesNamespace` | +| namespace | y | The namespace where all application-scoped resources generate Kubernetes objects. | `default` | + +```yaml +# Example `stack.yaml` file with customized namespace of `test`. +name: dev +extensions: + - kind: kubernetesNamespace + kubernetesNamespace: + namespace: test +``` diff --git a/docs/kusion/3-concepts/2-stack/_category_.json b/docs/kusion/3-concepts/2-stack/_category_.json new file mode 100644 index 00000000..914c863f --- /dev/null +++ b/docs/kusion/3-concepts/2-stack/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Stacks" +} diff --git a/docs/kusion/3-concepts/3-module/1-overview.md b/docs/kusion/3-concepts/3-module/1-overview.md new file mode 100644 index 00000000..b6487117 --- /dev/null +++ b/docs/kusion/3-concepts/3-module/1-overview.md @@ -0,0 +1,16 @@ +# Overview + +A Kusion module is a reusable building block designed by platform engineers to standardize application deployments and enable app developers to self-service. It consists of two parts: + +- App developer-oriented schema: It is a [KCL schema](https://kcl-lang.io/docs/user_docs/guides/schema-definition/). Fields in this schema are recommended to be understandable to application developers and workspace-agnostic. For example, a database Kusion module schema only contains fields like database engine type and database version. +- Kusion module generator: It is a piece of logic that generates the Intent with an instantiated schema mentioned above, along with platform configurations ([workspace](../workspace)). As a building block, Kusion module hides the complexity of infrastructures. A database Kusion module not only represents a cloud RDS, but it also contains logic to configure other resources such as security groups and IAM policies. Additionally, it seamlessly injects the database host address, username, and password into the workload's environment variables. The generator logic can be very complex in some situations so we recommend implementing it in a GPL like [go](https://go.dev/). + +Here are some explanations of the Kusion Module: + +1. It represents an independent unit that provides a specific capability to the application with clear business semantics. +2. It consists of one or multiple infrastructure resources (K8s/Terraform resources), but it is not merely a collection of unrelated resources. For instance, a database, monitoring capabilities, and network access are typical Kusion Modules. +3. Modules should not have dependencies or be nested within each other. +4. AppConfig is not a Module. +5. Kusion Module is a superset of [KPM](https://www.kcl-lang.io/docs/user_docs/guides/package-management/quick-start). It leverages the KPM to manage KCL schemas in the Kusion module. + +![kusion-module](/img/docs/concept/kusion-module.png) \ No newline at end of file diff --git a/docs/kusion/3-concepts/3-module/2-develop-guide.md b/docs/kusion/3-concepts/3-module/2-develop-guide.md new file mode 100644 index 00000000..e4a076e9 --- /dev/null +++ b/docs/kusion/3-concepts/3-module/2-develop-guide.md @@ -0,0 +1,256 @@ +# Platform Engineer Develop Guide + +## Prerequisites + +To follow this guide, you will need: + +- Go 1.22 or higher installed and configured +- Kusion v0.12 or higher installed locally + +## Workflow + +As a platform engineer, the workflow of developing a Kusion module looks like this: + +1. Communicate with app developers and identify the fields that should exposed to them in the dev-orient schema +2. Identify module input parameters that should be configured by platform engineers in the [workspace](../workspace) +3. Define the app dev-orient schema +4. Develop the module by implementing gRPC interfaces + +The first two steps primarily involve communication with the application development team, and the specific details are not included in this tutorial. This tutorial begins with the subsequent two steps. + +## Set up a developing environment + +Developing a Kusion module includes defining a KCL schema and developing a module binary in golang. We will provide a scaffold repository and a new command `kusion mod init` to help developers set up the developing environment easily. + +After executing the command + +```shell +kusion mod init +``` + +Kusion will download a [scaffold repository](https://github.com/KusionStack/kusion-module-scaffolding) and rename this project with your module name. The scaffold contains code templates and all files needed for developing a Kusion module. + +## Developing + +The scaffold repository directory structure is shown below: + +```shell +$ tree kawesome/ +. +├── example +│   ├── dev +│   │   ├── example_workspace.yaml +│   │   ├── kcl.mod +│   │   ├── main.k +│   │   └── stack.yaml +│   └── project.yaml +├── kawesome.k +├── kcl.mod +└── src + ├── Makefile + ├── go.mod + ├── go.sum + ├── kawesome_generator.go + └── kawesome_generator_test.go +``` + +When developing a Kusion module with the scaffold repository, you could follow the steps below: + +1. **Define the module name and version** + - For go files. Rename the module name in the `go.mod` and related files to your Kusion module name. + ```yaml + module kawsome + go 1.22 + require ( + ... + ) + ``` + - For KCL files. Rename package name and version in the `kcl.mod` + ```toml + [package] + name = "kawesome" + version = 0.2.0 + ``` + + We assume the module named is `kawesome` and the version is `0.2.0` in this guide. + +2. **Define the dev-orient schemas**. They would be initialized by app developers. In this scaffold repository, we've defined a schema named Kawesome in `kawesome.k` that consists of two resources `Service` and `RandomPassword` and they will be generated into a Kubernetes Service and a Terraform RandomPassword later. + +```python +schema Kawesome: +""" Kawesome is a sample module schema consisting of Service +and RandomPassword + +Attributes +---------- +service: Service, default is Undefined, required. + The exposed port of Workload, which will be generated into Kubernetes Service. +randomPassword: RandomPassword, default is Undefined, required. + The sensitive random string, which will be generated into Terraform random_password. + +Examples +-------- +import kawesome as ks + +... ... + +accessories: { + "kawesome": kawesome.Kawesome { + service: kawesome.Service{ + port: 5678 + } + randomPassword: kawesome.RandomPassword { + length: 20 + } + } +} +""" + +# The exposed port of Workload, which will be generated into Kubernetes Service. +service: Service + +# The sensitive random string, which will be generated into Terraform random_password. +randomPassword: RandomPassword +``` + +3. **Implement the [gRPC proto](https://github.com/KusionStack/kusion/blob/main/pkg/modules/proto/module.proto) generate interface.** The `generate` interface consumes the application developer's config described in the [`AppConfiguration`](../app-configuration) and the platform engineer's config described in the [`workspace`](../workspace) to generate all infrastructure resources represented by this module. + +```go +func (k *Kawesome) Generate(_ context.Context, request *module.GeneratorRequest) (*module.GeneratorResponse, error) { + // generate your infrastructure resoruces +} + +// Patcher primarily contains patches for fields associated with Workloads, and additionally offers the capability to patch other resources. +type Patcher struct { + // Environments represent the environment variables patched to all containers in the workload. + Environments []v1.EnvVar `json:"environments,omitempty" yaml:"environments,omitempty"` + // Labels represent the labels patched to the workload. + Labels map[string]string `json:"labels,omitempty" yaml:"labels,omitempty"` + // PodLabels represent the labels patched to the pods. + PodLabels map[string]string `json:"podLabels,omitempty" yaml:"podLabels,omitempty"` + // Annotations represent the annotations patched to the workload. + Annotations map[string]string `json:"annotations,omitempty" yaml:"annotations,omitempty"` + // PodAnnotations represent the annotations patched to the pods. + PodAnnotations map[string]string `json:"podAnnotations,omitempty" yaml:"podAnnotations,omitempty"` + // JSONPatchers represents patchers that can be patched to an arbitrary resource. + // The key of this map represents the ResourceId of the resource to be patched. + JSONPatchers map[string]JSONPatcher `json:"jsonPatcher,omitempty" yaml:"jsonPatcher,omitempty"` +} +``` + +The `GeneratorRequest` contains the application developer's config, platform engineer's config, workload config and related metadata a module could need to generate infrastructure resources. +In the `GeneratorResponse`, there are two fields, `Resources` and `Patchers`. The `Resource` represents resources that should operated by Kusion and they will be appended into the [Spec](../spec). The `Patchers` are used to patch the workload and other resources. + +### Workload + +Workload in the AppConfiguration is also a Kusion module. If the workload module only generates one resource, this resource will be regarded as the workload resource. However, if the workload module generates more than one resource, one and only one of them must contain a key-value pair in the 'extension' field, where the key is 'kusion.io/is-workload' and the value is 'true' and this resource will be regarded as the workload resource. + +### Implicit Resource Dependency + +When you need to use an attribute of another resource as the value of a specific resource attribute, Kusion supports declaring the implicit resource dependencies with the `$kusion_path` prefix. You can concatenate the implicit resource dependency path with the resource `id`, attribute `name` and the `$kusion_path` prefix, for example: + +```yaml +# Dependency path as an attribute value. +spec: + resources: + - id: v1:Service:test-ns:test-service + type: Kubernetes + attributes: + metadata: + annotations: + deployment-name: $kusion_path.v1:Deployment:test-ns:test-deployment.metadata.name +``` + +In addition, please note that: + +- The implicit resource dependency path can only be used to replace the value in `Attributes` field of the `Resource`, but not the key. For example, the following `Spec` is **invalid**: + +```yaml +# Dependency path not in `attributes`. +spec: + resources: + - id: v1:Service:test:$kusion_path.apps/v1:Deployment:test-ns:test-deployment.metadata.name +``` + +```yaml +# Dependency path in the key, but not in the value. +spec: + resources: + - id: apps/v1:Deployment:test-ns:test-deployment + type: Kubernetes + attributes: + metadata: + annotations: + $kusion_path.v1:Service:test-ns:test-service.metadata.name: test-svc +``` + +- The implicit resource dependency path can only be used as a standalone value and cannot be combined with other string. For example, the following `Spec` is **invalid**: + +```yaml +# Dependency path combined with other string. +spec: + resources: + - id: apps/v1:Deployment:test-ns:test-deployment + type: Kubernetes + attributes: + metadata: + annotations: + test-svc: $kusion_path.v1:Service:test-ns:test-service.metadata.name + "-test" +``` + +- The impliciy resource dependency path does not support accessing the value in an array, so the following is currently **invalid**: + +```yaml +# Dependency path accessing the value in an array. +spec: + resources: + - id: apps/v1:Deployment:test-ns:test-deployment + type: Kubernetes + attributes: + metadata: + annotations: + test-svc: $kusion_path.v1:Service:test-ns:test-service.spec.ports[0].name +``` + +## Publish + +Publish the Kusion module to an OCI registry with the command `kusion mod push`. If your module is open to the public, we **welcome and highly encourage** you to contribute it to the module registry [catalog](https://github.com/KusionStack/catalog), so that more people can benefit from the module. Submit a pull request to this repository, once it is merged, it will be published to the [KusionStack GitHub container registry](https://github.com/orgs/KusionStack/packages). + +Publish a stable version +```shell +kusion mod push /path/to/my-module oci:/// --creds +``` + +Publish a module of a specific OS arch +```shell +kusion mod push /path/to/my-module oci:/// --os-arch==darwin/arm64 --creds +``` + +Publish a pre-release version +```shell +kusion mod push /path/to/my-module oci:/// --latest=false --creds +``` + +:::info +The OCI URL format is `oci:///` and please ensure that your token has permissions to write to the registry. +::: + +More details can be found in the `kusion mod push` reference doc. + +## Register to the workspace + +```yaml +modules: + kawesome: + path: oci://ghcr.io/kusionstack/kawesome + version: 0.2.0 + configs: + default: + service: + labels: + kusionstack.io/module-name: kawesome + annotations: + kusionstack.io/module-version: 0.2.0 +``` + +Register module platform configuration in the `workspace.yaml` to standardize the module's behavior. App developers can list all available modules registered in the workspace. \ No newline at end of file diff --git a/docs/kusion/3-concepts/3-module/3-app-dev-guide.md b/docs/kusion/3-concepts/3-module/3-app-dev-guide.md new file mode 100644 index 00000000..3169c67c --- /dev/null +++ b/docs/kusion/3-concepts/3-module/3-app-dev-guide.md @@ -0,0 +1,127 @@ +# Application Developer User Guide + +## Prerequisites + +To follow this guide, you will need: + +- Kusion v0.12 or higher installed locally + +## Workflow + +As an application developer, the workflow of using a Kusion module looks like this: + +1. Browse available modules registered by platform engineers in the workspace +2. Add modules you need to your Stack +3. Initialize modules +4. Apply the AppConfiguration + +## Browse available modules + +For all KusionStack built-in modules, you can find all available modules and documents in the [reference](../../6-reference/2-modules/index.md) + +Since the platform engineers have already registered the available modules in the workspace, app developers can execute `kusion mod list` to list the available modules. + +```shell +kusion mod list --workspace dev + +Name Version URL +kawesome 0.2.0 oci://ghcr.io/kusionstack/kawesome +``` + +## Add modules to your Stack + +Taking `kawesome` as an example, the directory structure is shown below: + +```shell +example +├── dev +│   ├── example_workspace.yaml +│   ├── kcl.mod +│   ├── main.k +│   └── stack.yaml +└── project.yaml +``` + +Select the module you need from the result of `kusion mod list` and execute `kusion mod add kawesome` to add `kawesome` into your Stack. + +Once you have added the `kawesome` module, the `kcl.mod` file will be updated to look like this. + +``` toml +[package] +name = "example" + +[dependencies] +kawesome = { oci = "oci://ghcr.io/kusionstack/kawesome", tag = "0.2.0" } +service = {oci = "oci://ghcr.io/kusionstack/service", tag = "0.1.0" } +kam = { git = "https://github.com/KusionStack/kam.git", tag = "0.2.0" } + +[profile] +entries = ["main.k"] +``` + +- The `kam` dependency represents the [Kusion Application Module](https://github.com/KusionStack/kam.git) which contains the AppConfiguration. +- The `service` dependency represents the service workload module. +- The `kawesome` is the Kusion module we are going to use in the AppConfiguration. + +## Initialize modules + +```python +# The configuration codes in perspective of developers. +import kam.v1.app_configuration as ac +import service +import service.container as c +import kawesome.v1.kawesome + +kawesome: ac.AppConfiguration { + # Declare the workload configurations. + workload: service.Service { + containers: { + kawesome: c.Container { + image: "hashicorp/http-echo" + env: { + "ECHO_TEXT": "$(KUSION_KAWESOME_RANDOM_PASSWORD)" + } + } + } + replicas: 1 + } + # Declare the kawesome module configurations. + accessories: { + "kawesome": kawesome.Kawesome { + service: kawesome.Service{ + port: 5678 + } + randomPassword: kawesome.RandomPassword { + length: 20 + } + } + } +} +``` + +Initialize the `kawesome` module in the `accessories` block of the AppConfiguration. The key of the `accessories` item represents the module name and the value represents the actual module you required. + +## Apply the result + +Execute the preview command to validate the result. + +```shell +kusion apply + ✔︎ Generating Spec in the Stack dev... +Stack: dev +ID Action +hashicorp:random:random_password:example-dev-kawesome Create +v1:Namespace:example Create +v1:Service:example:example-dev-kawesome Create +apps/v1:Deployment:example:example-dev-kawesome Create + + +Do you want to apply these diffs?: + > details +Which diff detail do you want to see?: +> all + hashicorp:random:random_password:example-dev-kawesome Create + v1:Namespace:example Create + v1:Service:example:example-dev-kawesome Create + apps/v1:Deployment:example:example-dev-kawesome Create +``` \ No newline at end of file diff --git a/docs/kusion/3-concepts/3-module/_category_.json b/docs/kusion/3-concepts/3-module/_category_.json new file mode 100644 index 00000000..5952a21e --- /dev/null +++ b/docs/kusion/3-concepts/3-module/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Modules" +} diff --git a/docs/kusion/3-concepts/4-workspace.md b/docs/kusion/3-concepts/4-workspace.md new file mode 100644 index 00000000..daedd84f --- /dev/null +++ b/docs/kusion/3-concepts/4-workspace.md @@ -0,0 +1,222 @@ +--- +id: workspace +sidebar_label: Workspace +--- + +# Workspace + +Workspace is a logical concept that maps to an actual target environment to deploy a stack to. In today's context, this _usually_ represents a Kubernetes cluster for the application workload and an optional cloud account to provision infrastructure resources that the workload depends on (A database, for example). Aside from the deployment destination, workspaces are also designed to be associated with a series of platform configurations that are used by all the stacks deployed to said workspace. + +When executing the command `kusion generate`, Kusion will "match" the AppConfiguration and the approriate workspace configuration to dynamically generate the `Spec`, which contains the complete manifest to describe the resources in the stack. The relationship of the Project, Stack and Workspace is shown as below. Notice that all three ways to organize stacks are valid. + +![project-stack-workspace](/img/docs/concept/project-stack-workspace.png) + +Workspace is designed to address separation of concerns. As opposed to the development-time concept of a "stack", a workspace is a deploy-time concept that represents a deployment target, a.k.a an actual runtime environment. Workspaces should be entirely managed by Platform Engineers to alleviate the burden on developers to understand environment-specific details. + +To dig a little deeper, a workspace represents the need for a distinct set of "platform opinions". That includes things that application developer either don't want to or shouldn't need to worry about, such as which Kubernetes cluster to deploy to, the credentials to deploy to said clusters, and other infrastructure details like what database instance to provision. + +Workspace is intended to be flexible so you can map them as your see fit to the boundaries that are relevant to your use case. For example, you can map a workspace to a cloud region (aws-us-east-1), provided that regional isolation is sufficient for you (this is an extreme case). Alternatively, a workspace can be map to the combination of a cloud region and an SDLC phase (aws-dev-us-east-1), provided that it represents the right boundary from a platform perspective. + +The workspace configuration is in a deterministic format and currently written in YAML. The subcommands of `kusion workspace` are provided to manage the workspaces. When using `kusion workspace`, the workspace configuration will be saved as YAML file in local file system. To avoid the possible risks, the environment variables are provided to hold the sensitive data such as Access Keys and Secret keys. + +## Workspace Configuration + +The configuration of a Workspace is stored in a single YAML file, which consists of `modules`, `secretStore`, and `context`. An example of Workspace configuration is shown as below. + +```yaml +# The platform configuration for Modules or KAMs. +# For each Module or KAM, the configuration format is as below. +# # ${module_identifier} or ${KAM_name}: +# # path: oci://ghcr.io/kusionstack/module-name # url of the module artifact +# # version: 0.2.0 # version of the module +# # configs: +# # default: # default configuration, applied to all projects +# # ${field1}: ${value1} +# # #{field2}: ${value2} +# # ... +# # ${patcher_name}: #patcher configuration, applied to the projects assigned in projectSelector +# # ${field1}: ${value1_override} +# # ... +# # projectSelector: +# # - ${project1_name} +# # - ${project2_name} +# # ... +modules: + mysql: + path: oci://ghcr.io/kusionstack/mysql + version: 0.2.0 + configs: + default: + cloud: alicloud + size: 20 + instanceType: mysql.n2.serverless.1c + category: serverless_basic + privateRouting: false + subnetID: ${mysql_subnet_id} + databaseName: kusion + largeSize: + size: 50 + projectSelector: + - foo + - bar + importDBInstance: + importedResources: + "aliyun:alicloud:alicloud_db_instance:wordpress-demo": "your-imported-resource-id" + projectSelector: + - baz + +secretStore: + provider: + aws: + region: us-east-1 + profile: The optional profile to be used to interact with AWS Secrets Manager. + +context: + KUBECONFIG_PATH: $HOME/.kube/config + AWS_ACCESS_KEY_ID: ref://secrets-manager-name/key-for-ak + AWS_SECRET_ACCESS_KEY: ref://secrets-manager-name/key-for-sk +``` + +### modules + +The `modules` are the platform-part configurations of Modules and KAMs, where the identifier of them are `${namespace}/${module_name}@${module_tag}` and `${kam_name}`. For each Module or KAM configuration, it is composed of a `default` and several `patcher` blocks. The `default` block contains the universal configuration of the Workspace, and can be applied to all Stacks in the Workspace, which is composed of the values of the Module's or KAM's fields. The `patcher` block contains the exclusive configuration for certain Stacks, which includes not only the fields' values, but also the applied Projects. + +The `patcher` block is designed to increase the flexibility for platform engineers managing Workspaces. Cause the Workspace should map to the real physical environment, in the actual production practice, it's almost impossible that all the Stacks share the same platform configuration, although we want them the same. + +The values of the same fields in `patcher` will override the `default`, and one field in multiple patchers is forbidden to assign to the same Project. That is, if there are more than one `patcher` declaring the same field with different values, the applied Projects are prohibited to overlap. And, The name of `patcher` must not be `default`. + +In the `patcher`, the applied Projects are assigned by the field `ProjectSelector`, which is an array of the Project names. The `ProjectSelector` is provided rather than something may like `StackSelector`, which specifies the applied Stacks. Here are the reasons. Explaining from the perspective of using Workspace, the mapping of Workspace and Stack is specified by the Kusion operation commands' users. While explaining from the perspective of the relationship among Project, Stack and Workspace, Workspace is designed for the reuse of platform-level configuration among multiple Projects. When a Project "encounters" a Workspace, it becomes a "Stack instance", which can be applied to a series of real resources. If using something like `StackSelector`, the reuse would not get realized, and Workspace would also lose its relevance. For more information of the relationship, please refer to [Project](project/overview) and [Stack](stack/overview). + +Different Module and KAM has different name, fields, and corresponding format and restrictions. When writing the configuration, check the corresponding Module's or KAM's description, and make sure all the requisite Modules and KAMs have correctly configured. Please refer to [Kuiosn Module](module/overview) and find more information. The example above gives a sample of the Module `mysql`. + +The `importedResources` block is designed to declare the import of existing cloud resources. The `importedResources` is a `map` where you can declare the mapping from `id` of the resource in Kusion `Spec` to the Terraform ID of the resource to be imported. Kusion will automatically synchronize the state of the existing cloud resource for the Kusion resource. + +### secretStore + +The `secretStore` field can be used to access the sensitive data stored in a cloud-based secrets manager. More details can be found in [here](../5-user-guides/4-secrets-management/1-using-cloud-secrets.md). + +### context + +The `context` field can be used to declare the information such as Kubernetes `kubeconfig` path or content, and the AK/SK of the Terraform providers. Below shows the configurable attributes. + +- `KUBECONFIG_PATH`: the local path of the `kubeConfig` file +- `KUBECONFIG_CONTENT`: the content of the `kubeConfig` file, can be used with cloud-based secrets management (e.g. `ref://secrets-management-name/secrets-key-for-kubeconfig`) +- `AWS_ACCESS_KEY_ID`: the access key ID of the AWS provider +- `AWS_SECRET_ACCESS_KEY`: the secret key of the AWS provider +- `ALICLOUD_ACCESS_KEY`: the access key ID of the Alicloud provider +- `ALICLOUD_SECRET_KEY`: the secret key of the Alicloud provider + +## Managing Workspace + +The subcommands of `kusion workspace` are used to manage Workspaces, including `create`, `show`, `list`, `switch`, `update` and `delete`. Cause the Workspace configurations are stored persistently, the current or a specified Backend will be used. For more information of Backend, please refer to [Backend](backend). + +Kusion will create a `default` Workspace with empty configuration in every Backend automatically, and set it as the current. When first using Kusion, or no configuration of Workspace, the `default` Workspace will be used. + +### Creating Workspace + +Use `kusion workspace create ${name} -f ${configuration_file_path}` to create a new Workspace with the configuration in a YAML file. The Workspace is identified by the `name`, and must be a new one, while the configuration must be written in a YAML file with correct format. + +The command above will create the Workspace in current Backend. If to create a Workspace in another backend, please use flag `--backend` to specify. The Workspace names in a Backend must be different, but allow the same in different Backends. + +In some scenarios, when a Workspace is created, it is expected to be the current. For simplification, the flag `--current` is provided to set the Workspace current alongside the creation. + +Be attention, creating `default` Workspace is not allowed, because it's created by Kusion automatically. + +The example is shown as below. + +```shell +# create a workspace in current backend +kusion workspace create dev -f dev.yaml + +# create a workspace in current backend ans set it as current +kusion workspace create dev -f dev.yaml --current + +# create a workspace in specified backend +kusion workspace create dev -f dev.yaml --backend oss-pre +``` + +The Workspaces to create are decided by the platform engineers. We recommend that they are organized by the following rules: + +- **SDLC phases**, such as `dev`, `pre`, `prod`; +- **cloud vendors**, such as `aws`, `alicloud`; +- combination of the two above, such as `dev-aws`, `prod-alicloud`. + +In design, Kusion does not support deploying Stack to multiple clouds or regions within a single Workspace. While users can technically define a Module that provisions resources across multiple clouds or regions, Kusion does not recommend this practice, and will not provide technical support for such configuration. If the platform engineers need to manage resources across multiple clouds or regions, they should create separate Workspaces. + +### Listing Workspace + +Use `kusion workspace list` to get all the workspace names. + +The example is shown as below. In order to simplify, The following examples will not give using specified backend, which is supported by `--backend` flag. + +```shell +# list all the workspace names +kusion workspace list +``` + +### Switching Workspace + +In order not to specify the Workspace name for each Kusion operation command, `kusion workspace switch ${name}` is provided to switch the current Workspace. Then when executing `kusion generate`, the current Workspace will be used. The to-switch Workspace must be created. + +The example is shown as below. + +```shell +# switch workspace +kusion workspace switch dev +``` + +### Showing Workspace + +Use `kusion workspace show ${name}` to get the Workspace configuration. If the `name` is not specified, the configuration of current Workspace will get returned. + +The example is shown as below. + +```shell +# show a specified workspace configuration +kusion workspace show dev + +# show the current workspace configuration +kusion workspace show +``` + +### Updating Workspace + +When the Workspace needs to update, use `kusion workspace update ${name} -f ${configuration_file_path}` to update with the new configuration file. The whole updated configuration is asked to provide, and the Workspace must be created. Get the Workspace configuration first, then refresh the configuration and execute the command, which are the recommended steps. If the `name` is not specified, the current Workspace will be used. + +Updating the `default` Workspace is allowed. And the flag `--current` is also supported to set it as the current. + +The example is shown as below. + +```shell +# update a specified workspace +kusion workspace update dev -f dev_new.yaml + +# update a specified workspace and set it as current +kusion workspace update dev -f dev_new.yaml --current + +# update the current workspace +kusion workspace update -f dev_new.yaml +``` + +### Deleting Workspace + +When a Workspace is not in use anymore, use `kusion workspace delete ${name}` to delete a Workspace. If the `name` is not specified, the current Workspace will get deleted, and the `default` Workspace will be set as the current Workspace. Therefore, deleting the `default` Workspace is not allowed. + +The example is shown as below. + +```shell +# delete a specified workspace +kusion workspace delete dev + +# delete the current workspace +kusion workspace delete +``` + +## Using Workspace + +Workspace is used in the command `kusion generate`, the following steps help smooth the operation process. + +1. Write the Workspace configuration file with the format shown above, and fulfill all the necessary fields; +2. Create the workspace with `kusion workspace create`, then Kusion perceives the Workspace. The flag `--current` can be used to set it as the current. +3. Execute `kusion generate` in a Stack to generate the whole Spec, the AppConfiguration and Workspace configuration get rendered automatically, and can be applied to the real infrastructure. If the appointed Workspace or Backend is asked, the flags `--workspace` and `--backend` will help achieve that. +4. If the Workspace needs to update, delete, switch, etc. Use the above commands to achieve that. diff --git a/docs/kusion/3-concepts/5-appconfiguration.md b/docs/kusion/3-concepts/5-appconfiguration.md new file mode 100644 index 00000000..570d1ac0 --- /dev/null +++ b/docs/kusion/3-concepts/5-appconfiguration.md @@ -0,0 +1,38 @@ +--- +id: app-configuration +sidebar_label: AppConfiguration +--- + +# AppConfiguration + +As a modern cloud-native application delivery toolchain, declarative intent-based actuation is the central idea of Kusion, and `AppConfiguration` model plays the role of describing the intent, which provides a simpler path for on-boarding developers to the platform without leaking low-level details in runtime infrastructure and allows developers to fully focus on the application logic itself. + +The `AppConfiguration` model consolidates workload and their dependent accessories for the application deployment, along with any pipeline and operational requirements into one standardized, infrastructure-independent declarative specification. This declarative specification represents the intuitive user intent for the application, which drives a standardized and efficient application delivery and operation process in a hybrid environment. + +![appconfig.png](/img/docs/concept/appconfig.png) + +AppConfiguration consists of four core concepts, namely `Workload`, `Accessory`, `Pipeline`, and `Dependency`. Each of them represents a [Kusion module](./3-module/1-overview.md). We will walk through these concepts one by one. + +#### Workload + +Workload is a representation of the business logic that runs in the cluster. Common workload types include long-running services that should “never” go down and batch jobs that take from a few seconds to a few days to complete. + +In most cases, a Workload is a backend service or the frontend of an Application. For example, in a micro-service architecture, each service would be represented by a distinct Workload. This allows developers to manage and deploy their code in a more organized and efficient manner. + +#### Accessory + +Using the analogy of a car, workload is the core engine of the application, but only having the engine isn’t enough for the application to function properly. In most cases, there must be other supporting parts for the workload to operate as intended. For those supporting parts, we call them Accessory. Accessory refers to various runtime capabilities and operational requirements provided by the underlying infrastructure, such as database, network load-balancer, storage and so on. + +From the perspective of team collaboration, the platform team should be responsible for creating and maintaining various accessory definitions, providing reusable building blocks out-of-the-box. Application developers just need to leverage the existing accessories to cover the evolving application needs. This helps software organizations achieve separation of concern so that different roles can focus on the subject matter they are an expert in. + +#### Pipeline + +Running reliable applications requires reliable delivery pipelines. By default, Kusion provides a relatively fixed built-in application delivery pipeline, which should be sufficient for most use cases. However, as the application scale and complexity grow, so does the need for a customizable delivery pipeline. Developers wish for more fine-tuned control and customization over the workflow to deliver their applications. That’s why we introduced the Pipeline section in AppConfiguration model. + +A customized delivery pipeline is made of several steps, each corresponding to an operation that needs to be executed, such as running certain tests after a deployment, scanning artifacts for vulnerabilities prior to deployment, and so on. Implementation-wise, the execution of each step should be carried out in the form of a plugin, developed and managed by the platform owners. + +#### Topologies + +Application dependencies refer to the external services or other software that an application relies on to function properly. These dependencies may be required to provide certain functionality or to use certain features in the application. + +Similar to declaring a dependency from an application to an accessory, AppConfiguration lets you declare the dependencies between different applications in the same way. diff --git a/docs/kusion/3-concepts/6-spec.md b/docs/kusion/3-concepts/6-spec.md new file mode 100644 index 00000000..0c3de9d4 --- /dev/null +++ b/docs/kusion/3-concepts/6-spec.md @@ -0,0 +1,123 @@ +--- +id: spec +sidebar_label: Spec +--- + +# Spec + +The Spec represents the operational intentions that you aim to deliver using Kusion. These intentions are expected to contain all components throughout the DevOps lifecycle, including resources (workload, database, load balancer, etc.), dependencies, and policies. The Kusion module generators are responsible for converting all AppConfigurations and environment configurations into the Spec. Once the Spec is generated, the Kusion Engine takes charge of updating the actual infrastructures to match the Spec. + +## Purpose + +### Single Source of Truth + +In Kusion's workflow, the platform engineer builds Kusion modules and provides environment configurations, application developers choose Kusion modules they need and deploy operational intentions to an environment with related environment configurations. They can also input dynamic parameters like the container image when executing the `kusion generate` command. So the final operational intentions include configurations written by application developers, environment configurations and dynamic inputs. Due to this reason, we introduce **Spec** to represent the SSoT(Single Source of Truth) of Kusion. It is the result of `kusion generate` which contains all operational intentions from different sources. + +### Consistency + +Delivering an application to different environments with identical configurations is a common practice, especially for applications that require scalable distribution. In such cases, an immutable configuration package is helpful. By utilizing the Spec, all configurations and changes are stored in a single file. As the Spec is the input of Kusion, it ensures consistency across different environments whenever you execute Kusion with the same Spec file. + +### Rollback and Disaster Recovery + +The ability to roll back is crucial in reducing incident duration. Rolling back the system to a previously validated version is much faster compared to attempting to fix it during an outage. We regard a validated Spec as a snapshot of the system and recommend storing the Spec in a version control system like Git. This enables better change management practices and makes it simpler to roll back to previous versions if needed. In case of a failure or outage, having a validated Spec simplifies the rollback process, ensuring that the system can be quickly recovered. + +## Example + +The API definition of the `Spec` structure in Kusion can be found [here](https://github.com/KusionStack/kusion/blob/main/pkg/apis/api.kusion.io/v1/types.go#L862). Below is an example `Spec` file generated from the `quickstart` demo application (more details can be found [here](../2-getting-started/2-deliver-quickstart.md)). + +```yaml +resources: + - id: v1:Namespace:quickstart + type: Kubernetes + attributes: + apiVersion: v1 + kind: Namespace + metadata: + creationTimestamp: null + name: quickstart + spec: {} + status: {} + extensions: + GVK: /v1, Kind=Namespace + - id: apps/v1:Deployment:quickstart:quickstart-default-quickstart + type: Kubernetes + attributes: + apiVersion: apps/v1 + kind: Deployment + metadata: + creationTimestamp: null + labels: + app.kubernetes.io/name: quickstart + app.kubernetes.io/part-of: quickstart + name: quickstart-default-quickstart + namespace: quickstart + spec: + selector: + matchLabels: + app.kubernetes.io/name: quickstart + app.kubernetes.io/part-of: quickstart + strategy: {} + template: + metadata: + creationTimestamp: null + labels: + app.kubernetes.io/name: quickstart + app.kubernetes.io/part-of: quickstart + spec: + containers: + - image: kusionstack/kusion-quickstart:latest + name: quickstart + resources: {} + status: {} + dependsOn: + - v1:Namespace:quickstart + - v1:Service:quickstart:quickstart-default-quickstart-private + extensions: + GVK: apps/v1, Kind=Deployment + - id: v1:Service:quickstart:quickstart-default-quickstart-private + type: Kubernetes + attributes: + apiVersion: v1 + kind: Service + metadata: + creationTimestamp: null + labels: + app.kubernetes.io/name: quickstart + app.kubernetes.io/part-of: quickstart + name: quickstart-default-quickstart-private + namespace: quickstart + spec: + ports: + - name: quickstart-default-quickstart-private-8080-tcp + port: 8080 + protocol: TCP + targetPort: 8080 + selector: + app.kubernetes.io/name: quickstart + app.kubernetes.io/part-of: quickstart + type: ClusterIP + status: + loadBalancer: {} + dependsOn: + - v1:Namespace:quickstart + extensions: + GVK: /v1, Kind=Service +secretStore: null +context: {} +``` + +From the example above, we can see that the `Spec` contains a list of `resources` required by the application. + +A `resource` is a concept in `Kusion` that abstract infrastructure. It represents an individual unit of infrastructure or application component managed by the `Kusion`, serving as a fundamental building block for defining the desired state of the infrastructure. They provide a unified way to define various types of resources, including `Kubernetes` objects and `Terraform` resources. Each `resource` in the `Spec` needs to have `id`, `type`, `attributes`, `dependsOn`, and `extensions` fields: + +- `id` is the unique key of this resource. An idiomatic way for `Kubernetes` resources is `apiVersion:kind:namespace:name`, and for `Terraform` resources is `providerNamespace:providerName:resourceType:resourceName`. +- `type` represents the type of runtime Kusion supports, and currently includes `Kubernetes` and `Terraform`. +- `attributes` represents all specified attributes of this resource, basically the manifest and argument attributes for the `Kubernetes` and `Terraform` resources. +- `dependsOn` contains all the other resources the resource depends on. +- `extensions` specifies the arbitrary metadata of the resource, where you can declare information such as Kubernetes GVK, Terraform provider, and imported resource id, etc. + +Besides the `resources`, Spec also records the `secretStore` and `context` field in the corresponding workspace. The former can be used to access sensitive data stored in an external secrets manager, while the latter can be used to declare the workspace-level configurations such as Kubernetes `kubeconfig` file path or content, and Terraform providers' AK/SK. More information can be found [here](4-workspace.md#secretstore). + +## Apply with Spec File + +Kusion supports using the Spec file directly as input. Users can place the Spec file in the stack directory and execute `kusion preview --spec-file spec.yaml` and `kusion apply --spec-file spec.yaml` to preview and apply the resources. diff --git a/docs/kusion/3-concepts/7-backend.md b/docs/kusion/3-concepts/7-backend.md new file mode 100644 index 00000000..5262f7ac --- /dev/null +++ b/docs/kusion/3-concepts/7-backend.md @@ -0,0 +1,228 @@ +--- +id: backend +sidebar_label: Backends +--- + +# Backend + +Backend is Kusion's storage, which defines the place to store Workspace and Release. By default, Kusion uses the `local` type of backend to store on the local disk. While in the scenario of team collaboration, the Workspace and Release can be stored on a remote backend, such as `oss` and `s3`, to allow multiple users' access. + +The command `kusion config` is used to configure the backend configuration. Configuring a whole backend or an individual config item are both supported. For the sensitive data, the environment variables are supported, and with higher priority. + +Furthermore, Kusion provides the operation of setting current backend. Thus, the trouble of specifying backend can be saved when executing operation commands and managing `workspace`. + +## Available Backend Types + +There are three available backend types: `local`, `oss`, `s3`. + +### local + +The `local` type backend uses local file system as storage, which is suitable for local operations, but not ideal for multi-user collaboration. The supported config items are as below. + +- **path**: `type string`, `optional`, specify the directory to store the Workspace and Release files. The subdirectories `workspaces` and `releases` are used to store the corresponding files separately. It's recommended to use an empty or a Kusion exclusive directory as the local backend path. If not set, the default path `${KUSION_HOME}` is in use. + +The whole local type backend configuration is as below. + +```yaml +{ + "type": "local", + "configs": { + "path": "${local_path}" # type string, optional, the directory to store files. + } +} +``` + +### oss + +The `oss` type backend uses the Alicloud Object Storage Service (OSS) as storage. The supported config items are as below. + +- **endpoint**: `type string`, `required`, specify the access endpoint for alicloud oss bucket. +- **accessKeyID**: `type string`, `required`, specify the alicloud account accessKeyID, support declaring by environment variable `OSS_ACCESS_KEY_ID`. +- **accessKeySecret**: `type string`, `required`, specify the alicloud account accessKeySecret, support declaring by environment variable `OSS_ACCESS_KEY_SECRET`. +- **bucket**: `type string`, `required`, specify the name of the alicloud oss bucket. +- **prefix**: `type string`, `optional`, constitute the prefix to store the Workspace and Release files, whose prefixes are `${prefix}/workspaces` and `${prefix}/releases` respectively. Using prefix can create a "dedicated space" for the Kusion data, which is beneficial for the management and reuse of the bucket. If not set, there is no prefix, the files are stored in the root path of the bucket if analogy to a file system. + +Noted that `accessKeyID` and `accessKeySecret` are required for the whole configuration combined by the configuration managed by the command `kusion config` and the environment variables. For the `kusion config` alone, they are not obligatory. And for the safety reason, using environment variables is the recommended way. + +The whole oss type backend configuration is as below. + +```yaml +{ + "type": "oss", + "configs": { + "endpoint": "${oss_endpoint}", # type string, required, the oss endpoint. + "accessKeyID": "${oss_access_key_id}", # type string, ooptional for the command "kusion config", the oss access key id. + "accessKeySecret": "${oss_access_key_secret}", # type string, optional for the command "kusion config", the oss access key secret. + "bucket": "${oss_bucket}", # type string, required, the oss bucket. + "prefix": "${oss_prefix}" # type string, optional, the prefix to store the files. + } +} +``` + +The supported environment variables are as below. + +```bash +export OSS_ACCESS_KEY_ID="${oss-access-key-id}" # configure accessKeyID +export OSS_ACCESS_KEY_SECRET="${oss-access-key-secret}" # configure accessKeySecret +``` + +### s3 + +The `s3` type backend uses the AWS Simple Storage Service (S3) as storage. The supported config items are as below. + +- **region**: `type string`, `required`, specify the region of aws s3 bucket, support declaring by environment variable `AWS_DEFAULT_REGION` or `AWS_REGION`, where the latter has higher priority. +- **endpoint**: `type string`, `optional`, specify the access endpoint for aws s3 bucket. +- **accessKeyID**: `type string`, `required`, specify the aws account accessKeyID, support declaring by environment variable `AWS_ACCESS_KEY_ID`. +- **accessKeySecret**: `type string`, `required`, specify the aws account.accessKeySecret, support declaring by environment variable `AWS_SECRET_ACCESS_KEY`. +- **bucket**: `type string`, `required`, specify the name of the aws s3 bucket. +- **prefix**: `type string`, `optional`, constitute the prefix to store the Workspace and Release files, whose prefixes are `${prefix}/workspaces` and `${prefix}/releases` respectively. + +Noted that `region`, `accessKeyID` and `accessKeySecret` are optional for the `kusion config` command. + +The whole s3 type backend configuration is as below. + +```yaml +{ + "type": "s3", + "configs": { + "region": "${s3_region}", # type string, optional for the command "kusion config", the aws region. + "endpoint": "${s3_endpoint}", # type string, optional, the aws endpoint. + "accessKeyID": "${s3_access_key_id}", # type string, optional for the command "kusion config", the aws access key id. + "accessKeySecret": "${s3_access_key_secret}", # type string, optional for the command "kusion config", the aws access key secret. + "bucket": "${s3_bucket}", # type string, required, the s3 bucket. + "prefix": "${s3_prefix}" # type string, optional, the prefix to store the files. + } +} +``` + +The supported environment variables are as below. + +```bash +export AWS_DEFAULT_REGION="${s3_region}" # configure region, lower priority than AWS_REGION +export AWS_REGION="${s3_region}" # configure region, higher priority than AWS_DEFAULT_REGION +export AWS_ACCESS_KEY_ID="${s3_access_key_id}" # configure accessKeyID +export AWS_SECRET_ACCESS_KEY="${s3_access_key_secret}" # configure accessKeySecret +``` + + +## Setting a Backend + +When there is a new backend or the backend configuration needs to update, use the command `kusion config set ${key} ${value}` to set a backend. A backend is identified by a unique name, and its whole configuration is made up of the backend type and its corresponding config items. + +Be attention, do not confuse backend with backend type. For example, a backend named `s3_prod` uses `s3` as its storage, the `s3_prod` is the backend, while the `s3` is the backend type. + +There are four configuration modes: + +- setting a whole backend +- setting a backend type +- setting a whole set of backend config items +- setting a backend config item + +A unique backend name is required to do the configuration. Take `s3` type backend with name `s3_prod` for an example to explain how these modes work. + +### Setting a Whole Backend + +The key to configure a whole backend is `backends.${name}`, whose value must be the JSON marshal result in a specified format, which is determined by the backend type. Enclosing the value in single quotation marks is a good choice, which can keep the format correct. + +```shell +# set a whole backend +kusion config set backends.s3_prod '{"type":"s3","configs":{"bucket":"kusion"}}' +``` + +### Setting a Backend Type + +The key to set a backend type is `backends.${name}.type`, whose value must be `local`, `oss` or `s3`. + +```shell +# set a backend type +kusion config set backends.s3_prod.type s3 +``` + +### Setting a Whole Set of Backend Config Items + +The key to set a whole set of backend config items is `backends.${name}.configs`, whose value must be the JSON marshal result in a specified format, which is determined by the backend type. The backend config must be set after the backend type, and corresponds to the backend type. + +```shell +# set a whole backend config +kusion config set backends.s3_prod.configs '{"bucket":"kusion"}' +``` + +### Setting a Backend Config Item + +The key to set a backend config item is `backends.${name}.configs.${item}`. The item name and value type both depend on the backend type. Like the whole backend config, the config item must be valid and set after the backend type. + +```shell +# set a backend config item +kusion config set backends.s3_prod.configs.bucket kusion +``` + +When executing `kusion config set`, the configuration will be stored in a local file. For security reason, the environment variables are supported to configure some config items, such as `password`, `accessKeyID`, `accessKeySecret`. Using environment variables rather than `kusion config` set to set sensitive data is the best practice. If both configured, the environment variables have higher priority. For details about the supported environment variables, please see above. + +Kusion has a default backend with `local` type and the path is `$KUSION_HOME`, whose name is exactly `default`. The `default` backend is forbidden to modification, that is setting or unsetting the default backend is not allowed. Besides, the keyword `current` is also used by Kusion itself, please do not use it as the backend name. + +## Unsetting a Backend + +When a backend is not in use, or the configuration is out of date, use the command `kusion config unset ${key}` to unset a backend or a specified config item. Same as the setting, there are also four modes of unsetting. + +- unsetting a whole backend +- unsetting a backend type +- unsetting a whole set of backend config items +- unsetting a backend config item + +When unsetting a whole backend, the backend must not be the current backend. When unsetting the backend type, the config items must be empty and the backend not be the current. + +Unsetting the `default` backend is forbidden. + +## Setting the Current Backend + +In order not to specify backend for every operation command. Kusion provides the mechanism of setting current backend, then the current workspace will be use by default. This is very useful when you execute a series of Kusion operation commands, for they usually use the same backend. + +Use the command `kusion config set backends.current ${name}` to set the current backend, where the `name` must be the already set backend. + +```shell +# set the current workspace +kusion config set backends.current s3_prod +``` + +Setting the current backend to `default` is legal. Actually, if there is no backend related configuration, the current backend is the `default` backend. + +## Getting Backend Configuration + +Use the command `kusion config get ${key}` to get a whole backend configuration or a specified backend config item. The `key` is same as setting and unsetting operation, the whole list can be found in the [Configuration](configuration). + +```shell +# get a whole backend +kusion config get backends.s3_prod + +# get a specified config item +kusion config get backends.s3_prod.configs.bucekt +``` + +Besides, the command `kusion config list` can also be used, which returns the whole kusion configuration, while the backend configuration is included. + +```shell +# get the whole Kusion configuration +kusion config list +``` + +## Using Backend + +The backend is used to store Workspace and Release. Thus, the following commands use the backend, shown as below. + +- subcommands of `kusion workspace`: use to store the Workspace; +- `kusion apply`, `kusion destroy`: use to store the Release; + +For all the commands above, the flag `--backend` is provided to specify the backend, or using the current backend. When using backend, you usually need to specify the sensitive data by environment variables. The example is shown below. + +```shell +# set environment variables of sensitive and other necessary data +export AWS_REGION="${s3_region}" +export AWS_ACCESS_KEY_ID="${s3_access_key_id}" +export AWS_SECRET_ACCESS_KEY="${s3_access_key_secret}" + +# use current backend +kusion apply + +# use a specified backend +kusion apply --backend s3_prod +``` diff --git a/docs/kusion/3-concepts/8-configuration.md b/docs/kusion/3-concepts/8-configuration.md new file mode 100644 index 00000000..2ad72e6b --- /dev/null +++ b/docs/kusion/3-concepts/8-configuration.md @@ -0,0 +1,114 @@ +--- +id: configuration +sidebar_label: Configuration +--- + +# Configuration + +Kusion can be configured with some global settings, which are separate from the AppConfiguration written by the application developers and the workspace configurations written by the platform engineers. + +The configurations are only relevant to the Kusion itself, and can be managed by command `kusion config`. The configuration items are specified, which are in the hierarchical format with full stop for segmentation, such as `backends.current`. For now, only the backend configurations are included. + +The configuration is stored in the file `${KUSION_HOME}/config.yaml`. For sensitive data, such as password, access key id and secret, setting them in the configuration file is not recommended, using the corresponding environment variables is safer. + +## Configuration Management + +Kusion provides the command `kusion config`, and its sub-commands `get`, `list`, `set`, `unset` to manage the configuration. The usages are shown as below: + +### Get a Specified Configuration Item + +Use `kusion config get` to get the value of a specified configuration item, only the registered item can be obtained correctly. The example is as below. + +```shell +# get a configuration item +kusion config get backends.current +``` + +### List the Configuration Items + +Use `kusion config list` to list all the Kusion configurations, where the result is in the YAML format. The example is as below. + +```shell +# list all the Kusion configurations +kusion config list +``` + +### Set a Specified Configuration Item + +Use `kusion config set` to set the value of a specified configuration item, where the type of the value of is also determinate. Kusion supports `string`, `int`, `bool`, `array` and `map` as the value type, which should be conveyed in the following format through CLI. + +- `string`: the original format, such as `local-dev`, `oss-pre`; +- `int`: convert to string, such as `3306`, `80`; +- `bool`: convert to string, only support `true` and `false`; +- `array`: convert to string with JSON marshal, such as `'["s3","oss"]'`. To preserve the format, enclosing the string content in single quotes is a good idea, or there may be unexpected errors; +- `map`: convert to string with JSON marshal, such as `'{"path":"\etc"}'`. + +Besides the type, some configuration items have more setting requirements. The configuration item dependency may exist, that is, a configuration item must be set after another item. And there may exist more restrictions for the configuration values themselves. For example, the valid keys for the map type value, the data range for the int type value. For detailed configuration item information, please refer to the following content of this article. + +The example of setting configuration item is as blow. + +```shell +# set a configuration item of type string +kusion config set backends.pre.type s3 + +# set a configuration item of type map +kusion config set backends.prod `{"configs":{"bucket":"kusion"},"type":"s3"}` +``` + +### Unset a Specified Configuration Item + +Use `kusion config unset` to unset a specified configuration item. Be attention, some items have dependencies, which must be unset in a correct order. The example is as below. + +```shell +# unset a specified configuration item +kusion config unset backends.pre +``` + +## Backend Configurations + +The backend configurations define the place to store Workspace, Spec and State files. Multiple backends and current backend are supported to set. + +### Available Configuration Items + +- **backends.current**: type `string`, the current used backend name. It can be set as the configured backend name. If not set, the default local backend will be used. +- **backends.${name}**: type `map`, a total backend configuration, contains type and config items, whose format is as below. It can be unset when the backend is not the current. +```yaml +{ + "type": "${backend_type}", # type string, required, support local, oss, s3. + "configs": ${backend_configs} # type map, optional for type local, required for the others, the specific keys depend on the type, refer to the description of backends.${name}.configs. +} +``` +- **backends.${name}.type**: type `string`, the backend type, support `local`, `s3` and `oss`. It can be unset when the backend is not the current, and the corresponding `backends.${name}.configs` are empty. +- **backends.${name}.configs**: type `map`, the backend config items, whose format depends on the backend type and is as below. It must be set after `backends.${name}.type`. +```yaml +# type local +{ + "path": "${local_path}" # type string, optional, the directory to store the files. If not set, use the default path ${KUSION_HOME}. +} + +# type oss +{ + "endpoint": "${oss_endpoint}", # type string, required, the oss endpoint. + "accessKeyID": "${oss_access_key_id}", # type string, optional, the oss access key id, which can be also obtained by environment variable OSS_ACCESS_KEY_ID. + "accessKeySecret": "${oss_access_key_secret}", # type string, optional, the oss access key secret, which can be also obtained by environment variable OSS_ACCESS_KEY_SECRET + "bucket": "${oss_bucket}", # type string, required, the oss bucket. + "prefix": "${oss_prefix}" # type string, optional, the prefix to store the files. +} + + # type s3 +{ + "region": "${s3_region}", # type string, optional, the aws region, which can be also obtained by environment variables AWS_REGION and AWS_DEFAULT_REGION. + "endpoint": "${s3_endpoint}", # type string, optional, the aws endpoint. + "accessKeyID": "${s3_access_key_id}", # type string, optional, the aws access key id, which can be also obtained by environment variable AWS_ACCESS_KEY_ID. + "accessKeySecret": "${s3_access_key_secret}", # type string, optional, the aws access key secret, which can be also obtained by environment variable AWS_SECRET_ACCESS_KEY + "bucket": "${s3_bucket}", # type string, required, the s3 bucket. + "prefix": "${s3_prefix}" # type string, optional, the prefix to store the files. +} +``` +- **backends.${name}.configs.path**: type `string`, the path of local type backend. It must be set after `backends.${name}.type` and which must be `local`. +- **backends.${name}.configs.endpoint**: type `string`, the endpoint of oss or s3 type backend. It must be set after `backends.${name}.type` and which must be `oss` or `s3`. +- **backends.${name}.configs.accessKeyID**: type `string`, the access key id of oss or s3 type backend. It must be set after `backends.${name}.type` and which must be `oss` or `s3`. For `oss`, it can be also obtained by environment variable `OSS_ACCESS_KEY_ID`; while for s3, it is `AWS_ACCESS_KEY_ID`. +- **backends.${name}.configs.accessKeySecret**: type `string`, the access key secret of oss or s3 type backend. It must be set after `backends.${name}.type` and which must be `oss` or `s3`. For `oss`, it can be also obtained by environment variable `OSS_ACCESS_KEY_SECRET`; while for s3, it is `AWS_SECRET_ACCESS_KEY`. +- **backends.${name}.configs.bucket**: type `string`, the bucket of oss or s3 type backend. It must be set after `backends.${name}.type` and which must be `oss` or `s3`. +- **backends.${name}.configs.prefix**: type `string`, the prefix to store the files of oss or s3 type backend. It must be set after `backends.${name}.type` and which must be `oss` or `s3`. +- **backends.${name}.configs.region**: type `string`, the aws region of s3 type backend. It must be set after `backends.${name}.type` and which must be `s3`. It can be also obtained by environment variables `AWS_REGION` and `AWS_DEFAULT_REGION`, where the former is priority. diff --git a/docs/kusion/3-concepts/9-release.md b/docs/kusion/3-concepts/9-release.md new file mode 100644 index 00000000..05f27f71 --- /dev/null +++ b/docs/kusion/3-concepts/9-release.md @@ -0,0 +1,24 @@ +--- +id: release +sidebar_label: Releases +--- + +# Release + +Release is used to indicate a single operation, triggered by `kusion apply` and `kusion destroy`, providing users with a more coherent and consistent operation experience with Kusion. Release also provides audit and rollback capabilities, which is currently under development. + +Every time `kusion apply` or `kusion destroy` is executed, it will trigger the generation of a `release` file. The combination of a `project` and `workspace` corresponds to a set of `release` files, which also relates to a set of the real application resources. The `release` file is stored in the same `backend` as the `workspace`, and the default path is `$HOME/.kusion/releases/$PROJECT_NAME/$WORKSPACE_NAME`, whose revision starts from 1 and increments. + +The release file contains the [Spec](./6-spec.md) and [State](./9-release.md#state) of an application, both of which are composed of `Resources`, representing the expected description from the configuration code and the actual state of the resources respectively. In addition, the release file also contains the information of creation and modification time, operation phase, and application metadata, etc. + +## State + +State is a record of an operation's result. It is a mapping between `resources` managed by `Kusion` and the actual infra resources. State is often used as a data source for three-way merge/diff in operations like `Apply` and `Preview`. + +A `resource` here represents an individual unit of infrastructure or application component, serving as a fundamental building block for defining and managing the actual state of your `project`. These `resources` are defined within the `State` and accurately reflect the actual states of the infrastructure. By providing a unified and consistent approach, `Kusion` enables seamless management of diverse resource types, encompassing Kubernetes objects and Terraform resources.Importantly, the structure of these resources in the `State` mirrors that of the `resources` in the `Spec`, ensuring coherence and facilitating efficient state management throughout the lifecycle of your `project`. + +State can be stored in many storage [backend](./7-backend.md) mediums like filesystems, S3, and OSS, etc. + +## Concurrency Control + +Release supports collaboration among multiple users and implements the concurrency control through operation `phase`. When the field of `phase` in the release file is not `succeeded` or `failed`, kusion will not be able to execute `kusion apply` or `kusion destroy` operation to the corresponding stack. For example, if a user unexpectedly exits during the `kusion apply` or `kusion destroy` process, the `phase` of the release file may be kept as `applying` or `destroying`. In this case, the user can use the command of `kusion release unlock` to unlock the release file for a specified application and workspace, setting the `phase` to `failed`. diff --git a/docs/kusion/3-concepts/_category_.json b/docs/kusion/3-concepts/_category_.json new file mode 100644 index 00000000..bccddbf1 --- /dev/null +++ b/docs/kusion/3-concepts/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Concepts" +} diff --git a/docs/kusion/4-configuration-walkthrough/1-overview.md b/docs/kusion/4-configuration-walkthrough/1-overview.md new file mode 100644 index 00000000..e7339ec9 --- /dev/null +++ b/docs/kusion/4-configuration-walkthrough/1-overview.md @@ -0,0 +1,223 @@ +--- +id: overview +--- + +# Configuration File Overview + +Kusion consumes one or more declarative configuration files (written in KCL) that describe the application, and delivers intent to the target runtime including Kubernetes, clouds, or on-prem infrastructure. + +This documentation series walks you through the odds and ends of managing such configuration files. + +## Table of Content + +- [Configuration File Overview](#configuration-file-overview) + - [Table of Content](#table-of-content) + - [Directory Structure](#directory-structure) + - [AppConfiguration Model](#appconfiguration-model) + - [Authoring Configuration Files](#authoring-configuration-files) + - [Identifying KCL file](#identifying-kcl-file) + - [KCL Schemas and KAM](#kcl-schemas-and-kam) + - [Kusion Modules](#kusion-modules) + - [Import Statements](#import-statements) + - [Understanding kcl.mod](#understanding-kclmod) + - [Building Blocks](#building-blocks) + - [Instantiating an application](#instantiating-an-application) + - [Using `kusion init`](#using-kusion-init) + - [Using references](#using-references) + +## Directory Structure + +Kusion expects the configuration file to be placed in a certain directory structure because it might need some metadata (that is not stored in the application configuration itself) in order to proceed. + +:::info + +See [Project](../concepts/project/overview) and [Stack](../concepts/stack/overview) for more details about Project and Stack. +::: + +A sample multi-stack directory structure looks like the following: +``` +~/playground$ tree multi-stack-project/ +multi-stack-project/ +├── README.md +├── base +│   └── base.k +├── dev +│   ├── kcl.mod +│   ├── main.k +│   └── stack.yaml +├── prod +│   ├── kcl.mod +│   ├── main.k +│   └── stack.yaml +└── project.yaml +``` + +In general, the directory structure follows a hierarchy where the top-level is the project configurations, and the sub-directories represent stack-level configurations. + +You may notice there is a `base` directory besides all the stacks. The `base` directory is not mandatory, but rather a place to store common configurations between different stacks. A common pattern we observed is to use stacks to represent different stages (dev, stage, prod, etc.) in the software development lifecycle, and/or different deployment targets (azure-eastus, aws-us-east-1, etc). A project can have as many stacks as needed. + +In practice, the applications deployed into dev and prod might very likely end up with a similar set of configurations except a few fields such as the application image (dev might be on newer versions), resource requirements (prod might require more resources), etc. + +As a general best practice, we recommend managing the common configurations in `base.k` as much as possible to minimize duplicate code. We will cover how override works in [Base and Override](base-override). + +## AppConfiguration Model + +`AppConfiguration` is the out-of-the-box model we build that describes an application. It serves as the declarative intent for a given application. + +The schema for `AppConfiguration` is defined in the [KusionStack/kam](https://github.com/KusionStack/kam/blob/main/v1/app_configuration.k) repository. It is designed as a unified, application-centric model that encapsulates the comprehensive configuration details and in the meantime, hides the complexity of the infrastructure as much as possible. + +`AppConfiguration` consists of multiple sub-components that each represent either the application workload itself, its dependencies (in the form of [Kusion Modules](../concepts/module/overview)), relevant workflows or operational expectations. We will deep dive into the details on how to author each of these elements in this upcoming documentation series. + +For more details on the `AppConfiguration`, please refer to the [design documentation](../concepts/app-configuration). + +## Authoring Configuration Files + +[KCL](https://kcl-lang.io/) is the choice of configuration language consumed by Kusion. KCL is an open-source constraint-based record and functional language. KCL works well with a large number of complex configurations via modern programming language technology and practice, and is committed to provide better modularity, scalability, stability and extensibility. + +### Identifying KCL file + +KCL files are identified with `.k` suffix in the filename. + +### KCL Schemas and KAM + +Similar to most modern General Programming Languages (GPLs), KCL provide packages that are used to organize collections of related KCL source files into modular and re-usable units. + +In the context of Kusion, we abstracted a core set of KCL Schemas (such as the aforementioned `AppConfiguration`, `Workload`, `Container`, etc)that represent the concepts that we believe that are relatively universal and developer-friendly, also known as [Kusion Application Model](https://github.com/KusionStack/kam), or KAM. + +### Kusion Modules + +To extend the capabilities beyond the core KAM model, we use a concept known as [Kusion Modules](../concepts/module/overview) to define components that could best abstract the capabilities during an application delivery. We provide a collection of official out-of-the-box Kusion Modules that represents the most common capabilities. They are maintained in [KusionStack's GitHub container registry](https://github.com/orgs/KusionStack/packages). When authoring an application configuration file, you can simply declare said Kusion Modules as dependencies and import them to declare ship-time capabilities that the application requires. + +If the modules in the KusionStack container registry does not meet the needs of your applications, Kusion provides the necessary mechanisms to extend with custom-built Kusion Modules. You can always create and publish your own module, then import the new module in your application configuration written in KCL. + +For the steps to develop your own module, please refer to the Module developer guide. + +### Import Statements + +An example of the import looks like the following: +``` +### import from the official kam package +import kam.v1.app_configuration as ac + +### import kusion modules +import service +import service.container as c +import monitoring as m +import network as n +``` + +Take `import kam.v1.app_configuration as ac` as an example, the `.v1.app_configuration` part after `import kam` represents the relative path of a specific schema to import. In this case, the `AppConfiguration` schema is defined under `v1/app_configuration` directory in the `kam` package. + +### Understanding kcl.mod + +Much similar to the concept of `go.mod`, Kusion uses `kcl.mod` as the source of truth to manage metadata (such as package name, dependencies, etc.) for the current package. Kusion will also auto-generate a `kcl.mod.lock` as the dependency lock file. + +The most common usage for `kcl.mod` is to manage the dependency of your application configuration file. + +:::info + +Please note this `kcl.mod` will be automatically generated if you are using `kusion init` to initialize a project with a template. You will only need to modify this file if you are modifying the project metadata outside the initialization process, such as upgrading the dependency version or adding a new dependency altogether, etc. +:::info + +There are 3 sections in a `kcl.mod` file: +- `package`, representing the metadata for the current package. +- `dependencies`, describing the packages the current package depends on. Supports referencing either a git repository or an OCI artifact. +- `profile`, defining the behavior for Kusion. In the example below, it describes the list of files Kusion should look for when parsing the application configuration. + +An example of `kcl.mod`: +``` +[package] +name = "multi-stack-project" +edition = "0.5.0" +version = "0.1.0" + +[dependencies] +monitoring = { oci = "oci://ghcr.io/kusionstack/monitoring", tag = "0.1.0" } +kam = { git = "https://github.com/KusionStack/kam.git", tag = "0.1.0" } +# Uncomment the line below to use your own modified module +# my-module = { oci = "oci://ghcr.io/my-repository/my-package", tag = "my-version" } + +[profile] +entries = ["../base/base.k", "main.k"] +``` + +### Building Blocks + +Configuration files consist of building blocks that are made of instances of schemas. An `AppConfiguration` instance consists of several child schemas, most of which are optional. The only mandatory one is the `workload` instance. We will take a closer look in the [workload walkthrough](workload). The order of the building blocks does NOT matter. + +The major building blocks as of version `0.12.0`: +``` +myapp: ac.AppConfiguration { + workload: service.Service { + containers: { + "myapp": c.Container {} + ... + } + secrets: {} + ... + } + # optional dependencies, usually expressed in kusion modules + accessories: { + ... + } + ... +} +``` + +We will deep dive into each one of the building blocks in this documentation series. + +### Instantiating an application + +In Kusion's out-of-the-box experience, an application is identified with an instance of `AppConfiguration`. You may have more than one application in the same project or stack. + +Here's an example of a configuration that can be consumed by Kusion (assuming it is placed inside the proper directory structure that includes project and stack configurations, with a `kcl.mod` present): + +``` +import kam.v1.app_configuration as ac +import service +import service.container as c +import network as n + +gocity: ac.AppConfiguration { + workload: service.Service { + containers: { + "gocity": c.Container { + image = "howieyuen/gocity:latest" + resources: { + "cpu": "500m" + "memory": "512Mi" + } + } + } + replicas: 1 + } + accessories: { + "network": n.Network { + ports: [ + n.Port { + port: 80 + public: True + } + ] + } + } +} +``` + +Don't worry about what `workload` or `n.Network` stand for at the moment. We will deep dive into each one of them in this upcoming documentation series. + +### Using `kusion init` + +Kusion offers a `kusion init` sub-command which initializes a new project using a pre-built template, which saves you from the hassle of manually building the aforementioned directory structure that Kusion expects. + +There is a built-in template `quickstart` in the Kusion binary that can be used offline. + +The pre-built templates are meant to help you get off the ground quickly with some simple out-of-the-box examples. You can refer to the [QuickStart documentation](../getting-started/deliver-quickstart) for some step-by-step tutorials. + +### Using references + +The reference documentation for the `kam` package and the official Kusion Modules is located in [Reference](../reference/modules/developer-schemas/app-configuration). + +If you are using them out of the box, the reference documentation provides a comprehensive view for each schema involved, including all the attribute names and description, their types, default value if any, and whether a particular attribute is required or not. There will also be an example attached to each schema reference. + +We will also deep dive into some common examples in the upcoming sections. \ No newline at end of file diff --git a/docs/kusion/4-configuration-walkthrough/2-kcl-basics.md b/docs/kusion/4-configuration-walkthrough/2-kcl-basics.md new file mode 100644 index 00000000..aaa80366 --- /dev/null +++ b/docs/kusion/4-configuration-walkthrough/2-kcl-basics.md @@ -0,0 +1,144 @@ +--- +id: kcl-basics +--- + +# KCL Basics + +## Table of Content +- [Variable assignments](#variable-assignments) +- [Common built-in types](#common-built-in-types) +- [Lists and maps](#lists-and-maps) +- [Conditional statements](#conditional-statements) +- [The : and = operator](#the--and--operator) +- [Advanced KCL capabilities](#advanced-kcl-capabilities) + +[KCL](https://kcl-lang.io/) is the choice of configuration language consumed by Kusion. KCL is an open source constraint-based record and functional language. KCL works well with a large number of complex configurations via modern programming language technology and practice, and is committed to provide better modularity, scalability, stability and extensibility. + +## Variable assignments + +There are two ways to initialize a variable in KCL. You can either use the `:` operator or the `=` operator. We will discuss the difference between them in [this section later](#the--and--operator). + +Here are the two ways to create a variable and initialize it: +``` +foo = "Foo" # Declare a variable named `foo` and its value is a string literal "Foo" +bar: "Bar" # Declare a variable named `bar` and its value is a string literal "Bar" +``` + +You will be able to override a variable assignment via the `=` operator. We will discuss this in depth in the [`:` and `=` operator section](#the--and--operator). + +## Common built-in types + +KCL supports `int`, `float`, `bool` and `string` as the built-in types. + +Other types are defined in the packages that are imported into the application configuration files. One such example would be the `AppConfiguration` object (or `Container`, `Probe`, `Port` object, etc) that are defined in the `kam` repository. + +## Lists and maps + +Lists are represented using the `[]` notation. +An example of lists: +``` +list0 = [1, 2, 3] +list1 = [4, 5, 6] +joined_list = list0 + list1 # [1, 2, 3, 4, 5, 6] +``` + +Maps are represented using the `{}` notation. +An example of maps: +``` +a = {"one" = 1, "two" = 2, "three" = 3} +b = {'one' = 1, 'two' = 2, 'three' = 3} +assert a == b # True +assert len(a) == 3 # True +``` + +## Conditional statements +You can also use basic control flow statements when writing the configuration file. + +An example that sets the value of `replicas` conditionally based on the value of `containers.myapp.resources.cpu`: +``` +import kam.v1.app_configuration as ac +import service +import service.container as c + +myapp: ac.AppConfiguration { + workload: service.Service { + containers: { + "myapp": c.Container { + image: "" + resources: { + "cpu": "500m" + "memory": "512Mi" + } + } + } + replicas: 1 if containers.myapp.resources.cpu == "500m" else 2 + } +} +``` + +For more details on KCL's control flow statements, please refer to the [KCL documentation](https://kcl-lang.io/docs/reference/lang/tour#control-flow-statements). + +## The `:` and `=` operator + +You might have noticed there is a mixed usage of the `:` and `=` in the samples above. + +:::info + +**TLDR: The recommendation is to use `:` in the common configurations, and `=` for override in the environment-specific configurations.** +::: + +In KCL: +- `:` represents a union-ed value assignment. In the pattern `identifier: E` or `identifier: T E`, the value of the expression `E` with optional type annotation `T` will be merged and union-ed into the element value. +- `=` represents a value override. In the pattern `identifier = E` or `identifier = T E`, The value of the expression `E` with optional type annotation `T` will override the `identifier` attribute value. + +Let's take a look at an example: +``` +# This is one configuration that will be merged. +config: Config { + data.d1 = 1 +} +# This is another configuration that will be merged. +config: Config { + data.d2 = 2 +} +``` + +The above is equivalent to the snippet below since the two expressions for `config` get merged/union-ed into one: +``` +config: Config { + data.d1 = 1 + data.d2 = 1 +} +``` + +whereas using the `=` operators will result in a different outcome: +``` +# This is first configuration. +config = Config { + data.d1 = 1 +} +# This is second configuration that will override the prior one. +config = Config { + data.d2 = 2 +} +``` + +The config above results in: +``` +config: Config { + data.d2 = 2 +} +``` + +Please note that the `:` attribute operator represents an idempotent merge operation, and an error will be thrown when the values that need to be merged conflict with each other. + +``` +data0 = {id: 1} | {id: 2} # Error:conflicting values between {'id': 2} and {'id': 1} +data1 = {id: 1} | {id = 2} # Ok, the value of `data` is {"id": 2} +``` + +More about `:` and `=` operator can be found in the [KCL documentation](https://kcl-lang.io/docs/reference/lang/tour#config-operations). + +## Advanced KCL capabilities + +For more advanced KCL capabilities, please visit the [KCL website](https://kcl-lang.io/docs/user_docs/support/faq-kcl). \ No newline at end of file diff --git a/docs/kusion/4-configuration-walkthrough/3-base-override.md b/docs/kusion/4-configuration-walkthrough/3-base-override.md new file mode 100644 index 00000000..f14af112 --- /dev/null +++ b/docs/kusion/4-configuration-walkthrough/3-base-override.md @@ -0,0 +1,94 @@ +--- +id: base-override +--- + +# Base and Override + +In practice, what we have observed for production-grade applications is that they usually need to be deployed to a wide range of different targets, be it different environments in the SDLC, or different clouds, regions or runtimes for cost/regulation/performance or disaster recovery related reasons. + +In that context, we advocate for a pattern where you can leverage some Kusion and KCL features to minimize the amount of duplicate configurations, by separating the common base application configuration and environment-specific ones. + +:::info + +The file names in the below examples don't matter as long as they are called out and appear in the correct order in the `entries` field (the field is a list) in `kcl.mod`. The files with common configurations should appear first in the list and stack-specific ones last. The latter one takes precedence. + +The configurations also don't have be placed into a single `.k` file. For complex projects, they can be broken down into smaller organized `.k` files for better readability. +::: + +Base configuration defined in `base/base.k`: +``` +import kam.v1.app_configuration as ac +import service +import service.container as c +import network.network as n + +myapp: ac.AppConfiguration { + workload: service.Service { + containers: { + "myapp": c.Container { + image: "" + resources: { + "cpu": "500m" + "memory": "512Mi" + } + } + } + replicas: 1 + } + accessories: { + "network": n.Network { + ports: [ + n.Port { + port: 80 + public: True + } + ] + } + } +} +``` + +Environment-specific configuration defined in `dev/main.k`: +``` +import kam.v1.app_configuration as ac +import service +import service.container as c + +# main.k declares customized configurations for dev stack. +myapp: ac.AppConfiguration { + workload: service.Service { + containers: { + "myapp": c.Container { + # dev stack has different app configuration from the base + image = "gcr.io/google-samples/gb-frontend:v5" + resources = { + "cpu": "250m" + "memory": "256Mi" + } + } + } + replicas = 2 + } +} +``` + +Alternatively, you could locate a specific property (in this case below, the `Container` object) in the `AppConfiguration` object using the dot selector shorthand(such as `workload.containers.myapp` or `workload.replicas` below): +``` +import kam.v1.app_configuration as ac + +# main.k declares customized configurations for dev stack. +myapp: ac.AppConfiguration { + workload.replicas = 2 + workload.containers.myapp: { + # dev stack has different app configuration + image = "gcr.io/google-samples/gb-frontend:v5" + resources = { + "cpu": "250m" + "memory": "256Mi" + } + } +} +``` +This is especially useful when the application configuration is complex but the override is relatively straightforward. + +The two examples above are equivalent when overriding the base. \ No newline at end of file diff --git a/docs/kusion/4-configuration-walkthrough/4-workload.md b/docs/kusion/4-configuration-walkthrough/4-workload.md new file mode 100644 index 00000000..2b880df0 --- /dev/null +++ b/docs/kusion/4-configuration-walkthrough/4-workload.md @@ -0,0 +1,373 @@ +# Workload + +The `workload` attribute in the `AppConfiguration` instance is used to describe the specification for the application workload. The application workload generally represents the computing component for the application. + +A `workload` maps to an `AppConfiguration` instance 1:1. If there are more than one workload, they should be considered different applications. + +## Table of Content +- [Import](#import) +- [Types of workloads](#types-of-workloads) +- [Configure containers](#configure-containers) + - [Application image](#application-image) + - [Resource Requirements](#resource-requirements) + - [Health Probes](#health-probes) + - [Lifecycle Hooks](#lifecycle-hooks) + - [Create Files](#create-files) + - [Customize container initialization](#customize-container-initialization) +- [Configure Replicas](#configure-replicas) +- [Differences between Service and Job](#differences-between-service-and-job) +- [Workload References](#workload-references) + +## Import + +In the examples below, we are using schemas defined in the `catalog` package. For more details on KCL package import, please refer to the [Configuration File Overview](overview). + +The `import` statements needed for the following walkthrough: +``` +import kam.v1.app_configuration as ac +import service +import service.container as c +import service.container.probe as p +import service.container.lifecycle as lc +``` + +## Types of Workloads + +There are currently two types of workloads: + +- `Service`, representing a long-running, scalable workload type that should "never" go down and respond to short-lived latency-sensitive requests. This workload type is commonly used for web applications and services that expose APIs. +- `Job`, representing batch tasks that take from a few seconds to days to complete and then stop. These are commonly used for batch processing that is less sensitive to short-term performance fluctuations. + +To instantiate a `Service`: +``` +import kam.v1.app_configuration as ac +import service +import service.container as c + +myapp: ac.AppConfiguration { + workload: service.Service {} +} +``` + +To instantiate a `Job`: +``` +import kam.v1.app_configuration as ac +import job +import job.container as c + +myapp: ac.AppConfiguration { + workload: job.Job {} +} +``` + +Of course, the `AppConfiguration` instances above is not sufficient to describe an application. We still need to provide more details in the `workload` section. + +## Configure containers + +Kusion is built on top of cloud-native philosophies. One of which is that applications should run as loosely coupled microservices on abstract and self-contained software units, such as containers. + +The `containers` attribute in a workload instance is used to define the behavior for the containers that run application workload. The `containers` attribute is a map, from the name of the container to the `catalog.models.schema.v1.workload.container.Container` Object which includes the container configurations. + +:::info + +The name of the container is in the context of the configuration file, so you could refer to it later. It's not referring to the name of the container in the Kubernetes cluster (or any other runtime). +::: + +Everything defined in the `containers` attribute is considered an application container, as opposed to a sidecar container. Sidecar containers will be introduced in a different attribute in a future version. + +In most of the cases, only one application container is needed. Ideally, we recommend mapping an `AppConfiguration` instance to a microservice in the microservice terminology. + +We will walk through the details of configuring a container using an example of the `Service` type. + +To add an application container: +``` +import kam.v1.app_configuration as ac +import service +import service.container as c + +myapp: ac.AppConfiguration { + workload: service.Service { + containers: { + "myapp": c.Container {} + } + } +} +``` + +### Application image + +The `image` attribute in the `Container` schema specifies the application image to run. This is the only required field in the `Container` schema. + +To specify an application image: +``` +import kam.v1.app_configuration as ac +import service +import service.container as c + +myapp: ac.AppConfiguration { + workload: service.Service { + containers: { + "myapp": c.Container { + image: "gcr.io/google-samples/gb-frontend:v5" + } + # ... + } + } +} +``` + +### Resource Requirements + +The `resources` attribute in the `Container` schema specifies the application resource requirements such as cpu and memory. + +You can specify an upper limit (which maps to resource limits only) or a range as the resource requirements (which maps to resource requests and limits in Kubernetes). + +To specify an upper bound (only resource limits): +``` +import kam.v1.app_configuration as ac +import service +import service.container as c + +myapp: ac.AppConfiguration { + workload: service.Service { + containers: { + "myapp": c.Container { + image: "gcr.io/google-samples/gb-frontend:v5" + resources: { + "cpu": "500m" + "memory": "512Mi" + } + # ... + } + } + } +} +``` + +To specify a range (both resource requests and limits): +``` +import kam.v1.app_configuration as ac +import service +import service.container as c + +myapp: ac.AppConfiguration { + workload: service.Service { + containers: { + "myapp": c.Container { + image: "gcr.io/google-samples/gb-frontend:v5" + # Sets requests to cpu=250m and memory=256Mi + # Sets limits to cpu=500m and memory=512Mi + resources: { + "cpu": "250m-500m" + "memory": "256Mi-512Mi" + } + # ... + } + } + } +} +``` + +### Health Probes + +There are three types of `Probe` defined in a `Container`: + +- `livenessProbe` - used to determine if the container is healthy and running +- `readinessProbe` - used to determine if the container is ready to accept traffic +- `startupProbe` - used to determine if the container has started properly. Liveness and readiness probes don't start until `startupProbe` succeeds. Commonly used for containers that takes a while to start + +The probes are optional. You can only have one Probe of each kind for a given `Container`. + +To configure a `Http` type `readinessProbe` that probes the health via HTTP request and a `Exec` type `livenessProbe` which executes a command: +``` +import kam.v1.app_configuration as ac +import service +import service.container as c + +myapp: ac.AppConfiguration { + workload: service.Service { + containers: { + "myapp": c.Container { + image: "gcr.io/google-samples/gb-frontend:v5" + # ... + # Configure an Http type readiness probe at /healthz + readinessProbe: p.Probe { + probeHandler: p.Http { + url: "/healthz" + } + initialDelaySeconds: 10 + timeoutSeconds: 5 + periodSeconds: 15 + successThreshold: 3 + failureThreshold: 1 + } + # Configure an Exec type liveness probe that executes probe.sh + livenessProbe: p.Probe { + probeHandler: p.Exec { + command: ["probe.sh"] + } + initialDelaySeconds: 10 + } + } + } + } +} +``` + +### Lifecycle Hooks + +You can also configure lifecycle hooks that triggers in response to container lifecycle events such as liveness/startup probe failure, preemption, resource contention, etc. + +There are two types that is currently supported: + +- `PreStop` - triggers before the container is terminated. +- `PostStart` - triggers after the container is initialized. + +``` +import kam.v1.app_configuration as ac +import service +import service.container as c + +myapp: ac.AppConfiguration { + workload: service.Service { + containers: { + "myapp": c.Container { + image: "gcr.io/google-samples/gb-frontend:v5" + # ... + # Configure lifecycle hooks + lifecycle: lc.Lifecycle { + # Configures an Exec type pre-stop hook that executes preStop.sh + preStop: p.Exec { + command: ["preStop.sh"] + } + # Configures an Http type pre-stop hook at /post-start + postStart: p.Http { + url: "/post-start" + } + } + } + } + } +} +``` + +### Create Files + +You can also create files on-demand during the container initialization. + +To create a custom file and mount it to `/home/admin/my-file` when the container starts: +``` +import kam.v1.app_configuration as ac +import service +import service.container as c + +myapp: ac.AppConfiguration { + workload: service.Service { + containers: { + "myapp": c.Container { + image: "gcr.io/google-samples/gb-frontend:v5" + } + # ... + # Creates a file during container startup + files: { + "/home/admin/my-file": c.FileSpec { + content: "some file contents" + mode: "0777" + } + } + } + } +} +``` + +### Customize container initialization + +You can also customize the container entrypoint via `command`, `args`, and `workingDir`. These should **most likely not be required**. In most of the cases, the entrypoint details should be baked into the application image itself. + +To customize the container entrypoint: +``` +import kam.v1.app_configuration as ac +import service +import service.container as c + +myapp: ac.AppConfiguration { + workload: service.Service { + containers: { + "myapp": c.Container { + image: "gcr.io/google-samples/gb-frontend:v5" + # ... + # This command will overwrite the entrypoint set in the image Dockerfile + command: ["/usr/local/bin/my-init-script.sh"] + # Extra arguments append to command defined above + args: [ + "--log-dir=/home/my-app/logs" + "--timeout=60s" + ] + # Run the command as defined above, in the directory "/tmp" + workingDir: "/tmp" + } + } + } +} +``` + +## Configure Replicas + +The `replicas` field in the `workload` instance describes the number of identical copies to run at the same time. It is generally recommended to have multiple replicas in production environments to eliminate any single point of failure. In Kubernetes, this corresponds to the `spec.replicas` field in the relevant workload manifests. + +To configure a workload to have a replica count of 3: +``` +import kam.v1.app_configuration as ac +import service +import service.container as c + +myapp: ac.AppConfiguration { + workload: service.Service { + containers: { + # ... + } + replicas: 3 + # ... + } + # ... +} +``` + +## Differences between Service and Job + +The two types of workloads, namely `Service` and `Job`, share a majority of the attributes with some minor differences. + +### Exposure + +A `Service` usually represents a long-running, scalable workload that responds to short-lived latency-sensitive requests and never go down. Hence, a `Service` has an additional attribute that determines how it is exposed and can be accessed. A `Job` does NOT have the option to be exposed. We will explore more in the [application networking walkthrough](networking). + +### Job Schedule + +A `Job` can be configured to run in a recurring manner. In this case, the job will have a cron-format schedule that represents its recurring schedule. + +To configure a job to run at 21:00 every night: +``` +import kam.v1.app_configuration as ac +import job +import job.container as c + +myjob: ac.AppConfiguration { + workload: job.Job { + containers: { + "busybox": c.Container { + image: "busybox:1.28" + # Run the following command as defined + command: ["/bin/sh", "-c", "echo hello"] + } + } + # Run every hour. + schedule: "0 * * * *" + } +} +``` + +## Workload References + +You can find workload references [here](../reference/modules/developer-schemas/workload/service). + +You can find workload schema source [here](https://github.com/KusionStack/catalog/tree/main/models/schema/v1/workload). \ No newline at end of file diff --git a/docs/kusion/4-configuration-walkthrough/5-networking.md b/docs/kusion/4-configuration-walkthrough/5-networking.md new file mode 100644 index 00000000..adaa9904 --- /dev/null +++ b/docs/kusion/4-configuration-walkthrough/5-networking.md @@ -0,0 +1,174 @@ +--- +id: networking +--- + +# Application Networking + +In addition to configuring application's [container specifications](workload#configure-containers), you can also configure its networking behaviors, including how to expose the application and how it can be accessed. You can specify a `network` module in the `accessories` field in `AppConfiguration` to achieve that. + +In future versions, this will also include ingress-based routing strategy and DNS configurations. + +## Import + +In the examples below, we are using schemas defined in the `kam` package and the `network` Kusion Module. For more details on KCL package and module import, please refer to the [Configuration File Overview](overview). + +The `import` statements needed for the following walkthrough: +``` +import kam.v1.app_configuration as ac +import service +import service.container as c +import network as n +``` + +The `kcl.mod` must contain reference to the network module: +``` +#... + +[dependencies] +network = { oci = "oci://ghcr.io/kusionstack/network", tag = "0.2.0" } + +#... +``` + +## Private vs Public Access + +Private network access means the service can only be access from within the target cluster. + +Public access is implemented using public load balancers on the cloud. This generally requires a Kubernetes cluster that is running on the cloud with a vendor-specific service controller. + +Any ports defined default to private access unless explicitly specified. + +To expose port 80 to be accessed privately: +``` +import kam.v1.app_configuration as ac +import service +import service.container as c +import network as n + +myapp: ac.AppConfiguration { + workload: service.Service { + # ... + } + accessories: { + "network": n.Network { + ports: [ + n.Port { + port: 80 + } + ] + } + } +} +``` + +To expose port 80 to be accessed publicly: +``` +import kam.v1.app_configuration as ac +import service +import service.container as c +import network as n + +myapp: ac.AppConfiguration { + workload: service.Service { + # ... + } + accessories: { + "network": n.Network { + ports: [ + n.Port { + port: 80 + public: True + } + ] + } + } +} +``` + +:::info +The CSP (Cloud Service Provider) used to provide load balancers is defined by platform engineers in workspace. +::: + +## Mapping ports + +To expose a port `80` that maps to a different port `8088` on the container: +``` +import kam.v1.app_configuration as ac +import service +import service.container as c +import network as n + +myapp: ac.AppConfiguration { + workload: service.Service { + # ... + } + accessories: { + "network": n.Network { + ports: [ + n.Port { + port: 80 + targetPort: 8088 + } + ] + } + } +} +``` + +## Exposing multiple ports + +You can also expose multiple ports and configure them separately. + +To expose port 80 to be accessed publicly, and port 9099 for private access (to be scraped by Prometheus, for example): +``` +import kam.v1.app_configuration as ac +import service +import service.container as c +import network as n + +myapp: ac.AppConfiguration { + workload: service.Service { + # ... + } + accessories: { + "network": n.Network { + ports: [ + n.Port { + port: 80 + public: True + } + n.Port { + port: 9099 + } + ] + } + } +} +``` + +## Choosing protocol + +To expose a port using the `UDP` protocol: +``` +import kam.v1.app_configuration as ac +import service +import service.container as c +import network as n + +myapp: ac.AppConfiguration { + workload: service.Service { + # ... + } + accessories: { + "network": n.Network { + ports: [ + n.Port { + port: 80 + targetPort: 8088 + protocol: "UDP" + } + ] + } + } +} +``` \ No newline at end of file diff --git a/docs/kusion/4-configuration-walkthrough/6-database.md b/docs/kusion/4-configuration-walkthrough/6-database.md new file mode 100644 index 00000000..6a8dedab --- /dev/null +++ b/docs/kusion/4-configuration-walkthrough/6-database.md @@ -0,0 +1,467 @@ +--- +id: databse +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Managed Databases + +You could also specify a database needed for the application. That can be achieved via a `mysql` or a `postgres` module (or bring-your-own-module) in the `accessories` field in `AppConfiguration` to achieve that. + +You can currently have several databases with **different database names** for an application at the same time. + +## Import + +In the examples below, we are using schemas defined in the `kam` package and the `mysql` Kusion Module. For more details on KCL package and module import, please refer to the [Configuration File Overview](./1-overview.md#configuration-file-overview). + +The `import` statements needed for the following walkthrough: +``` +import kam.v1.app_configuration as ac +import service +import service.container as c +import mysql +import postgres +``` + +The `kcl.mod` must contain reference to the `mysql` module or `postgres` module: +``` +#... + +[dependencies] +mysql = { oci = "oci://ghcr.io/kusionstack/mysql", tag = "0.2.0" } +postgres = { oci = "oci://ghcr.io/kusionstack/postgres", tag = "0.2.0" } +#... +``` + +## Types of Database offerings + +As of version 0.11.0, Kusion supports the following database offerings on the cloud: +- MySQL and PostgreSQL Relational Database Service (RDS) on [AWS](https://aws.amazon.com/rds/) +- MySQL and PostgreSQL Relational Database Service (RDS) on [AliCloud](https://www.alibabacloud.com/product/databases) + +More database types on more cloud vendors will be added in the future. + +Alternatively, Kusion also supports creating a database at `localhost` for local testing needs. A local database is quicker to stand up and easier to manage. It also eliminates the need for an account and any relevant costs with the cloud providers in the case that a local testing environment is sufficient. + +:::info +You do need a local Kubernetes cluster to run the local database workloads. You can refer to [Minikube](https://minikube.sigs.k8s.io/docs/start/) or [Kind](https://kind.sigs.k8s.io/docs/user/quick-start/) to get started. +To see an end-to-end use case for standing up a local testing environment including a local database, please refer to the [Kusion Quickstart](../2-getting-started/2-deliver-quickstart.md). +::: + +## Cloud Credentials and Permissions + +Kusion provisions databases on the cloud via [terraform](https://www.terraform.io/) providers. For it to create _any_ cloud resources, it requires a set of credentials that belongs to an account that has the appropriate write access so the terraform provider can be initialized properly. + +For AWS, the environment variables needed: +``` +export AWS_REGION=us-east-1 # replace it with your region +export AWS_ACCESS_KEY_ID="xxxxxxxxxxx" # replace it with your AccessKey +export AWS_SECRET_ACCESS_KEY="xxxxxxx" # replace it with your SecretKey +``` + +For AliCloud, the environment variables needed: +``` +export ALICLOUD_REGION=cn-shanghai # replace it with your region +export ALICLOUD_ACCESS_KEY="xxxxxxxxx" # replace it with your AccessKey +export ALICLOUD_SECRET_KEY="xxxxxxxxx" # replace it with your SecretKey +``` + +The user account that owns these credentials would need to have the proper permission policies attached to create databases and security groups. If you are using the cloud-managed policies, the policies needed to provision a database and configure firewall rules are listed below. + +For AWS: +- `AmazonVPCFullAccess` for creating and managing database firewall rules via security group +- `AmazonRDSFullAccess` for creating and managing RDS instances + +For AliCloud: +- `AliyunVPCFullAccess` for creating and managing database firewall rules via security group +- `AliyunRDSFullAccess` for creating and managing RDS instances + +Alternatively, you can use customer managed policies if the cloud provider built-in policies don't meet your needs. The list of permissions needed are in the [AmazonRDSFullAccess Policy Document](https://docs.aws.amazon.com/aws-managed-policy/latest/reference/AmazonRDSFullAccess.html#AmazonRDSFullAccess-json) and [AmazonVPCFullAccess Policy Document](https://docs.aws.amazon.com/aws-managed-policy/latest/reference/AmazonVPCFullAccess.html). It will most likely be a subset of the permissions in the policy documents. + +## Configure Database + +### Provision a Cloud Database + +Assuming the steps in the [Cloud Credentials and Permissions](#cloud-credentials-and-permissions) section is setup properly, you can now provision cloud databases via Kusion. + +#### AWS RDS Instance +To provision an AWS RDS instance with MySQL v8.0 or PostgreSQL v14.0, you can append the following YAML file to your own workspace configurations and update the corresponding workspace with command `kusion workspace update`. + + + + +```yaml +runtimes: + terraform: + random: + version: 3.5.1 + source: hashicorp/random + aws: + version: 5.0.1 + source: hashicorp/aws + region: us-east-1 # Please replace with your own aws provider region + +# MySQL configurations for AWS RDS +modules: + kusionstack/mysql@0.1.0: + default: + cloud: aws + size: 20 + instanceType: db.t3.micro + securityIPs: + - 0.0.0.0/0 + suffix: "-mysql" +``` + +```mdx-code-block + + +``` +```yaml +runtimes: + terraform: + random: + version: 3.5.1 + source: hashicorp/random + aws: + version: 5.0.1 + source: hashicorp/aws + region: us-east-1 # Please replace with your own aws provider region + +# PostgreSQL configurations for AWS RDS +modules: + kusionstack/postgres@0.1.0: + default: + cloud: aws + size: 20 + instanceType: db.t3.micro + securityIPs: + - 0.0.0.0/0 + suffix: "-postgres" +``` + +```mdx-code-block + + +``` + +For KCL configuration file declarations: + + + + +```python +wordpress: ac.AppConfiguration { + # ... + accessories: { + "mysql": mysql.MySQL { + type: "cloud" + version: "8.0" + } + } +} +``` + +```mdx-code-block + + +``` + +```python +pgadmin: ac.AppConfiguration { + # ... + accessories: { + "postgres": postgres.PostgreSQL { + type: "cloud" + version: "14.0" + } + } +} +``` + +```mdx-code-block + + +``` + +It's highly recommended to replace `0.0.0.0/0` and closely manage the whitelist of IPs that can access the database for security purposes. The `0.0.0.0/0` in the example above or if `securityIPs` is omitted altogether will allow connections from anywhere which would typically be a security bad practice. + +The `instanceType` field determines the computation and memory capacity of the RDS instance. The `db.t3.micro` instance type in the example above represents the `db.t3` instance class with a size of `micro`. In the same `db.t3` instance family there are also `db.t3.small`, `db.t3.medium`, `db.t3.2xlarge`, etc. + +The full list of supported `instanceType` values can be found [here](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html#Concepts.DBInstanceClass.Support). + +You can also adjust the storage capacity for the database instance by changing the `size` field which is storage size measured in gigabytes. The minimum is 20. More details can be found [here](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Storage.html#Concepts.Storage.GeneralSSD). + +#### AliCloud RDS Instance + +To provision an Alicloud RDS instance with MySQL or PostgreSQL, you can append the following YAML file to your own workspace configurations and update the corresponding workspace with command `kusion workspace update`. Note that AliCloud RDS has several additional fields such as `category`, `subnetID` and `privateRouting`: + + + + +```yaml +runtimes: + terraform: + random: + version: 3.5.1 + source: hashicorp/random + alicloud: + version: 1.209.1 + source: aliyun/alicloud + region: cn-beijing # Please replace with your own alicloud provider region + +# MySQL configurations for Alicloud RDS +modules: + kusionstack/mysql@0.1.0: + default: + cloud: alicloud + size: 20 + instanceType: mysql.n2.serverless.1c + category: serverless_basic + privateRouting: false + subnetID: [your-subnet-id] + securityIPs: + - 0.0.0.0/0 + suffix: "-mysql" +``` + +```mdx-code-block + + +``` +```yaml +runtimes: + terraform: + random: + version: 3.5.1 + source: hashicorp/random + alicloud: + version: 1.209.1 + source: aliyun/alicloud + region: cn-beijing # Please replace with your own alicloud provider region + +# PostgreSQL configurations for Alicloud RDS +modules: + kusionstack/postgres@0.1.0: + default: + cloud: alicloud + size: 20 + instanceType: pg.n2.serverless.1c + category: serverless_basic + privateRouting: false + subnetID: [your-subnet-id] + securityIPs: + - 0.0.0.0/0 + suffix: "-postgres" +``` + +```mdx-code-block + + +``` + +For KCL configuration file declarations: + + + + +```python +wordpress: ac.AppConfiguration { + # ... + accessories: { + "mysql": mysql.MySQL { + type: "cloud" + version: "8.0" + } + } +} +``` + +```mdx-code-block + + +``` + +```python +pgadmin: ac.AppConfiguration { + # ... + accessories: { + "postgres": postgres.PostgreSQL { + type: "cloud" + version: "14.0" + } + } +} +``` + +```mdx-code-block + + +``` + +We will walkthrough `subnetID` and `privateRouting` in the [Configure Network Access](#configure-network-access) section. + +The full list of supported `instanceType` values can be found in: +- [MySQL instance types(x86)](https://www.alibabacloud.com/help/en/rds/apsaradb-rds-for-mysql/primary-apsaradb-rds-for-mysql-instance-types#concept-2096487) +- [PostgreSQL instance types](https://www.alibabacloud.com/help/en/rds/apsaradb-rds-for-postgresql/primary-apsaradb-rds-for-postgresql-instance-types#concept-2096578) + +### Local Database + +To deploy a local database with MySQL v8.0 or PostgreSQL v14.0: + + + + +```python +wordpress: ac.AppConfiguration { + # ... + accessories: { + "mysql": mysql.MySQL { + type: "local" + version: "8.0" + } + } +} +``` + +```mdx-code-block + + +``` + +```python +pgadmin: ac.AppConfiguration { + # ... + accessories: { + "postgres": postgres.PostgreSQL { + type: "local" + version: "14.0" + } + } +} +``` + +```mdx-code-block + + +``` + +## Database Credentials + +There is no need to manage the database credentials manually. Kusion will automatically generate a random password, set it as the credential when creating the database, and then inject the hostname, username and password into the application runtime. + +You have the option to BYO (Bring Your Own) username for the database credential by specifying the `username` attribute in the `workspace.yaml`: +```yaml +modules: + kusionstack/mysql@0.1.0: + default: + # ... + username: "my_username" +``` + +You **cannot** bring your own password. The password will always be managed by Kusion automatically. + +The database credentials are injected into the environment variables of the application container. You can access them via the following env vars: +``` +# env | grep KUSION_DB +KUSION_DB_HOST_WORDPRESS_MYSQL=wordpress.xxxxxxxx.us-east-1.rds.amazonaws.com +KUSION_DB_USERNAME_WORDPRESS_MYSQL=xxxxxxxxx +KUSION_DB_PASSWORD_WORDPRESS_MYSQL=xxxxxxxxx +``` + +:::info +More details about the environment of database credentials injected by Kusion can be found at [mysql credentials and connectivity](../6-reference/2-modules/1-developer-schemas/database/mysql.md#credentials-and-connectivity) and [postgres credentials and connectivity](../6-reference/2-modules/1-developer-schemas/database/postgres.md#credentials-and-connectivity) +::: + +You can use these environment variables out of the box. Or most likely, your application might retrieve the connection details from a different set of environment variables. In that case, you can map the kusion environment variables to the ones expected by your application using the `$()` expression. + +This example below will assign the value of `KUSION_DB_HOST_WORDPRESS_MYSQL` into `WORDPRESS_DB_HOST`, `KUSION_DB_USERNAME_WORDPRESS_MYSQL` into `WORDPRESS_DB_USER`, likewise for `KUSION_DB_PASSWORD_WORDPRESS_MYSQL` and `WORDPRESS_DB_PASSWORD`: +``` +wordpress: ac.AppConfiguration { + workload: service.Service { + containers: { + wordpress: c.Container { + image = "wordpress:6.3-apache" + env: { + "WORDPRESS_DB_HOST": "$(KUSION_DB_HOST_WORDPRESS_MYSQL)" + "WORDPRESS_DB_USER": "$(KUSION_DB_USERNAME_WORDPRESS_MYSQL)" + "WORDPRESS_DB_PASSWORD": "$(KUSION_DB_PASSWORD_WORDPRESS_MYSQL)" + } + # ... + } + } + # ... + } + accessories: { + # ... + } +} +``` + +## Configure Network Access + +You can also optionally configure the network access to the database as part of the `AppConfiguration`. This is highly recommended because it dramatically increases the security posture of your cloud environment in the means of least privilege principle. + +The `securityIPs` field in the `Database` schema declares the list of network addresses that are allowed to access the database. The network addresses are in the [CIDR notation](https://aws.amazon.com/what-is/cidr/) and can be either a private IP range ([RFC-1918](https://datatracker.ietf.org/doc/html/rfc1918) and [RFC-6598](https://datatracker.ietf.org/doc/html/rfc6598) address) or a public one. + +If the database need to be accessed from a public location (which should most likely not be the case in a production environment), `securityIPs` need to include the public IP address of the traffic source (For instance, if the RDS database needs to be accessed from your computer). + +To configure AWS RDS to restrict network access from a VPC with a CIDR of `10.0.1.0/24` and a public IP of `103.192.227.125`: + +```yaml +modules: + kusionstack/mysql@0.1.0: + default: + cloud: aws + # ... + securityIPs: + - "10.0.1.0/24" + - "103.192.227.125/32" +``` + +Depending on the cloud provider, the default behavior of the database firewall settings may differ if omitted. + +### Subnet ID + +On AWS, you have the option to launch the RDS instance inside a specific VPC if a `subnetID` is present in the application configuration. By default, if `subnetID` is not provided, the RDS will be created in the default VPC for that account. However, the recommendation is to self-manage your VPCs to provider better isolation from a network security perspective. + +On AliCloud, the `subnetID` is required. The concept of subnet maps to VSwitch in AliCloud. + +To place the RDS instance into a specific VPC on Alicloud: + +```yaml +modules: + kusionstack/mysql@0.1.0: + default: + cloud: alicloud + # ... + subnetID: "subnet-xxxxxxxxxxxxxxxx" +``` + +### Private Routing + +There is an option to enforce private routing on certain cloud providers if both the workload and the database are running on the cloud. + +On AliCloud, you can set the `privateRouting` flag to `True`. The database host generated will be a private FQDN that is only resolvable and accessible from within the AliCloud VPCs. Setting `privateRouting` flag to `True` when `type` is `aws` is a no-op. + +To enforce private routing on AliCloud: + +```yaml +modules: + kusionstack/mysql@0.1.0: + default: + cloud: alicloud + # ... + privateRouting: true +``` + +Kusion will then generate a private FQDN and inject it into the application runtime as the environment variable `KUSION_DB_HOST_` for the application to use. A complete list of Kusion-managed environment variables for mysql database can be found [here](../6-reference/2-modules/1-developer-schemas/database/mysql.md#credentials-and-connectivity). + +Otherwise when using the public FQDN to connect to a database from the workload, the route will depend on cloud provider's routing preference. The options are generally either: +- Travel as far as possible on the cloud provider's global backbone network, or also referred to as cold potato routing, or +- Egress as early as possible to the public Internet and re-enter the cloud provider's datacenter later, or also referred to as hot potato routing + +The prior generally has better performance but is also more expensive. + +You can find a good read on the [AWS Blog](https://aws.amazon.com/blogs/architecture/internet-routing-and-traffic-engineering/) or the [Microsoft Learn](https://learn.microsoft.com/en-us/azure/virtual-network/ip-services/routing-preference-overview). \ No newline at end of file diff --git a/docs/kusion/4-configuration-walkthrough/7-secret.md b/docs/kusion/4-configuration-walkthrough/7-secret.md new file mode 100644 index 00000000..db1d576e --- /dev/null +++ b/docs/kusion/4-configuration-walkthrough/7-secret.md @@ -0,0 +1,251 @@ +--- +id: secret +--- + +# Secrets + +Secrets are used to store sensitive data like passwords, API keys, TLS certificates, tokens, or other credentials. Kusion provides multiple secret types, and makes it easy to be consumed in containers. + +For application dependent cloud resources that are managed by Kusion, their credentials are automatically managed by Kusion (generated and injected into application runtime environment variable). You shouldn't have to manually create those. + +## Using secrets in workload + +Secrets must be defined in AppConfiguration. The values can be generated by Kusion or reference existing secrets stored in third-party vault. Secrets can be consumed in containers by referencing them through the `secret:///` URI syntax. + +### Consume secret in an environment variable + +You can consume the data in Secrets as environment variable in your container. For example the db container uses an environment variable to set the root password. + +``` +import kam.v1.app_configuration as ac +import service +import service.container as c +import service.secret as sec + +sampledb: ac.AppConfiguration { + workload: service.Service { + containers: { + "db": c.Container { + image: "mysql" + env: { + # Consume db-root-password secret in environment + "ROOT_PASSWORD": "secret://db-root-password/token" + } + } + } + # Secrets used to generate token + secrets: { + "init-info": sec.Secret { + type: "token" + } + } + } +} +``` + +The example shows the secret `root-password` being consumed as an environment variable in the db container. The secret is of type token and will automatically be generated at runtime by Kusion. + +### Consume all secret keys as environment variables + +Sometimes your secret contains multiple data that need to be consumed as environment variables. The example below shows how to consume all the values in a secret as environment variables named after the keys. + +``` +import kam.v1.app_configuration as ac +import service +import service.container as c +import service.secret as sec + +sampledb: ac.AppConfiguration { + workload: service.Service { + containers: { + "db": c.Container { + image: "mysql" + env: { + # Consume all init-info secret keys as environment variables + "secret://init-info": "" + } + } + } + # Secrets used to init mysql instance + secrets: { + "init-info": sec.Secret { + type: "opaque" + data: { + "ROOT_PASSWORD": "admin" + } + } + } + } +} +``` + +This will set the environment variable "ROOT_PASSWORD" to the value "admin" in the db container. + +## Types of secrets + +Kusion provides multiple types of secrets to application developers. + +1. Basic: Used to generate and/or store usernames and passwords. +2. Token: Used to generate and/or store secret strings for password. +3. Opaque: A generic secret that can store arbitrary user-defined data. +4. Certificate: Used to store a certificate and its associated key that are typically used for TLS. +5. External: Used to retrieve secret form third-party vault. + +### Basic secrets + +Basic secrets are defined in the secrets block with the type "basic". + +``` +import kam.v1.app_configuration as ac +import service +import service.container as c +import service.secret as sec + +sampleapp: ac.AppConfiguration { + workload: service.Service { + # ... + secrets: { + "auth-info": sec.Secret { + type: "basic" + data: { + "username": "admin" + "password": "******" + } + } + } + } +} +``` + +The basic secret type is typically used for basic authentication. The key names must be username and password. If one or both of the fields are defined with a non-empty string, those values will be used. If the empty string, the default value, is used Acorn will generate random values for one or both. + +### Token secrets + +Token secrets are useful for generating a password or secure string used for passwords when the user is already known or not required. + +``` +import kam.v1.app_configuration as ac +import service +import service.container as c +import service.secret as sec + +sampleapp: ac.AppConfiguration { + workload: service.Service { + # ... + secrets: { + "api-token": sec.Secret { + type: "token" + data: { + "token": "" + } + } + } + } +} +``` + +The token secret type must be defined. The `token` field in the data object is optional and if left empty Kusion will generate the token, which is 54 characters in length by default. If the `token` is defined that value will always be used. + +### Opaque secrets + +Opaque secrets have no defined structure and can have arbitrary key value pairs. + +``` +import kam.v1.app_configuration as ac +import service +import service.container as c +import service.secret as sec + +sampleapp: ac.AppConfiguration { + workload: service.Service { + # ... + secrets: { + "my-secret": sec.Secret { + type: "opaque" + } + } + } +} +``` + +### Certificate secrets + +Certificate secrets are useful for storing a certificate and its associated key. One common use for TLS Secrets is to configure encryption in transit for an Ingress, but you can also use it with other resources or directly in your workload. + +``` +import kam.v1.app_configuration as ac +import service +import service.container as c +import service.secret as sec + +sampleapp: ac.AppConfiguration { + workload: service.Service { + # ... + secrets: { + "server-cert": sec.Secret { + type: "certificate" + data: { + # Please do not put private keys in configuration files + "tls.crt": "The cert file content" + "tls.key": "The key file content" + } + } + } + } +} +``` + +### External secrets + +As a general principle, storing secrets in a plain text configuration file is highly discouraged, keeping secrets outside of Git is especially important for future-proofing, even encrypted secrets are not recommended to check into Git. The most common approach is to store secrets in a third-party vault (such as Hashicorp Vault, AWS Secrets Manager and Azure Key Vault, etc) and retrieve the secret in the runtime only. External secrets are used to retrieve sensitive data from external secret store to make it easy to be consumed in containers. + +``` +import kam.v1.app_configuration as ac +import service +import service.container as c +import service.secret as sec + +sampleapp: ac.AppConfiguration { + workload: service.Service { + # ... + secrets: { + "api-access-token": sec.Secret { + type: "external" + data: { + # Please do not put private keys in configuration files + "accessToken": "ref://api-auth-info/accessToken?version=1" + } + } + } + } +} +``` + +The value field in data object follow `ref://PATH[?version=]` URI syntax. `PATH` is the provider-specific path for the secret to be retried. Kusion provides out-of-the-box integration with `Hashicorp Vault`, `AWS Secrets Manager`, `Azure Key Vault` and `Alicloud Secrets Manager`. + +## Immutable secrets + +You can also declare a secret as immutable to prevent it from being changed accidentally. + +To declare a secret as immutable: + +``` +import kam.v1.app_configuration as ac +import service +import service.container as c +import service.secret as sec + +sampleapp: ac.AppConfiguration { + workload: service.Service { + # ... + secrets: { + "my-secret": sec.Secret { + # ... + immutable: True + } + } + } +} +``` + +You can change a secret from mutable to immutable but not the other way around. That is because the Kubelet will stop watching secrets that are immutable. As the name suggests, you can only delete and re-create immutable secrets but you cannot change them. \ No newline at end of file diff --git a/docs/kusion/4-configuration-walkthrough/8-monitoring.md b/docs/kusion/4-configuration-walkthrough/8-monitoring.md new file mode 100644 index 00000000..13e430f3 --- /dev/null +++ b/docs/kusion/4-configuration-walkthrough/8-monitoring.md @@ -0,0 +1,102 @@ +# Application Monitoring + +You could also specify the collection of monitoring requirements for the application. That can be achieved via a `monitoring` module (or bring-your-own-module) in the `accessories` field in `AppConfiguration` to achieve that. + +As of version 0.11.0, Kusion supports integration with Prometheus by managing scraping behaviors in the configuration file. + +:::info + +For the monitoring configuration to work (more specifically, consumed by Prometheus), this requires the target cluster to have installed Prometheus correctly, either as a Kubernetes operator or a server/agent. + +More about how to set up Prometheus can be found in the [Prometheus User Guide for Kusion](../user-guides/observability/prometheus) +::: + +## Import + +In the examples below, we are using schemas defined in the `kam` package and the `monitoring` Kusion Module. For more details on KCL package and module import, please refer to the [Configuration File Overview](overview). + +The `import` statements needed for the following walkthrough: +``` +import kam.v1.app_configuration as ac +import kam.v1.workload as wl +import monitoring as m +``` + +## Workspace configurations + +In addition to the KCL configuration file, there are also workspace-level configurations that should be set first. In an ideal scenario, this step is done by the platform engineers. + +In the event that they do not exist for you or your organization, e.g. if you are an individual developer, you can either do it yourself or use the [default values](#default-values) provided by the KusionStack team. The steps to do this yourself can be found in the [Prometheus User Guide for Kusion](../user-guides/observability/prometheus#setting-up-workspace-configs). + +:::info + +For more details on how workspaces work, please refer to the [workspace concept](../3-concepts/4-workspace.md) +::: + +By separating configurations that the developers are interested in and those that platform owners are interested in, we can reduce the cognitive complexity of the application configuration and achieve separation of concern. + +You can append the following YAML file to your own workspace configurations and update the corresponding workspace with command `kusion workspace update`. + +```yaml +modules: + kusionstack/monitoring@v0.1.0: + default: + interval: 30s + monitorType: Pod + operatorMode: true + scheme: http + timeout: 15s +``` + +## Managing Scraping Configuration +To manage scrape configuration for the application: +``` +myapp: ac.AppConfiguration { + workload: service.Service { + # ... + } + # Add the monitoring configuration backed by Prometheus + accessories: { + "monitoring": m.Prometheus { + path: "/metrics" + port: "web" + } + } +} +``` + +The example above will instruct the Prometheus job to scrape metrics from the `/metrics` endpoint of the application on the port named `web`. + +To instruct Prometheus to scrape from `/actuator/metrics` on port `9099` instead: +``` +myapp: ac.AppConfiguration { + workload: service.Service { + # ... + } + # Add the monitoring configuration backed by Prometheus + accessories: { + "monitoring": m.Prometheus { + path: "/actuator/metrics" + port: "9099" + } + } +} +``` + +Note that numbered ports only work when your Prometheus is not running as an operator. + +Neither `path` and `port` are required fields if Prometheus runs as an operator. If omitted, `path` defaults to `/metrics`, and `port` defaults to the container port or service port, depending on which resource is being monitored. If Prometheus does not run as an operator, both fields are required. + +Scraping scheme, interval and timeout are considered platform-managed configurations and are therefore managed as part of the [workspace configurations](../user-guides/observability/prometheus#setting-up-workspace-configs). + +More details about how the Prometheus integration works can be found in the [design documentation](https://github.com/KusionStack/kusion/blob/main/docs/prometheus.md). + +## Default values + +If no workspace configurations are found, the default values provided by the KusionStack team are: +- Scraping interval defaults to 30 seconds +- Scraping timeout defaults to 15 seconds +- Scraping scheme defaults to http +- Defaults to NOT running as an operator + +If any of the default values does not meet your need, you can change them by [setting up the workspace configuration](../user-guides/observability/prometheus#setting-up-workspace-configs). \ No newline at end of file diff --git a/docs/kusion/4-configuration-walkthrough/9-operational-rules.md b/docs/kusion/4-configuration-walkthrough/9-operational-rules.md new file mode 100644 index 00000000..674d2f2c --- /dev/null +++ b/docs/kusion/4-configuration-walkthrough/9-operational-rules.md @@ -0,0 +1,54 @@ +--- +id: operational-rules +--- + +# Operational Rules + +You could also specify the collection of operational rule requirements for the application. That can be achieved via a `opsrule` module (or bring-your-own-module) in the `accessories` field in `AppConfiguration` to achieve that. Operational rules are used as a preemptive measure to police and stop any unwanted changes. + +## Import + +In the examples below, we are using schemas defined in the `kam` package and the `opsrule` Kusion Module. For more details on KCL package and module import, please refer to the [Configuration File Overview](overview). + +The `import` statements needed for the following walkthrough: +``` +import kam.v1.app_configuration as ac +import kam.v1.workload as wl +import opsrule as o +``` + +## Max Unavailable Replicas + +Currently, the `opsrule` module supports setting a `maxUnavailable` parameter, which specifies the maximum number of pods that can be rendered unavailable at any time. It can be either a fraction of the total pods for the current application or a fixed number. This operational rule is particularly helpful against unexpected changes or deletes to the workloads. It can also prevent too many workloads from going down during an application upgrade. + +More rules will be available in future versions of Kusion. + +To set `maxUnavailable` to a percentage of pods: +``` +myapp: ac.AppConfiguration { + workload: service.Service { + containers: { + # ... + } + } + accessories: { + "opsRule": o.OpsRule { + maxUnavailable: "30%" + } + } +} +``` + +To set `maxUnavailable` to a fixed number of pods: +``` +myapp: ac.AppConfiguration { + workload: service.Service { + # ... + } + accessories: { + "opsRule": o.OpsRule { + maxUnavailable: 2 + } + } +} +``` \ No newline at end of file diff --git a/docs/kusion/4-configuration-walkthrough/_category_.json b/docs/kusion/4-configuration-walkthrough/_category_.json new file mode 100644 index 00000000..64d45678 --- /dev/null +++ b/docs/kusion/4-configuration-walkthrough/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Configuration Walkthrough" +} diff --git a/docs/kusion/5-user-guides/1-cloud-resources/1-database.md b/docs/kusion/5-user-guides/1-cloud-resources/1-database.md new file mode 100644 index 00000000..497c3ab4 --- /dev/null +++ b/docs/kusion/5-user-guides/1-cloud-resources/1-database.md @@ -0,0 +1,305 @@ +--- +id: database +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Deliver the WordPress Application with Cloud RDS + +This tutorial will demonstrate how to deploy a WordPress application with Kusion, which relies on both Kubernetes and IaaS resources provided by cloud vendors. We can learn how to declare the Relational Database Service (RDS) to provide a cloud-based database solution with Kusion for our application from this article. + +## Prerequisites + +- Install [Kusion](../../2-getting-started/1-install-kusion.md). +- Install [kubectl CLI](https://kubernetes.io/docs/tasks/tools/#kubectl) and run a [Kubernetes](https://kubernetes.io/) or [k3s](https://docs.k3s.io/quick-start) or [k3d](https://k3d.io/v5.4.4/#installation) or [MiniKube](https://minikube.sigs.k8s.io/docs/tutorials/multi_node) cluster. +- Prepare a cloud service account and create a user with at least **VPCFullAccess** and **RDSFullAccess** related permissions to use the Relational Database Service (RDS). This kind of user can be created and managed in the Identity and Access Management (IAM) console of the cloud vendor. +- The environment that executes `kusion` needs to have connectivity to terraform registry to download the terraform providers. + +Additionally, we also need to configure the obtained AccessKey and SecretKey as well as the cloud resource region as environment variables for specific cloud provider: + + + + +```bash +export AWS_ACCESS_KEY_ID="AKIAQZDxxxx" # replace it with your AccessKey +export AWS_SECRET_ACCESS_KEY="oE/xxxx" # replace it with your SecretKey +export AWS_REGION=us-east-1 # replace it with your region +``` + +![aws iam account](/img/docs/user_docs/getting-started/aws-iam-account.png) + +```mdx-code-block + + +``` + +```bash +export ALICLOUD_ACCESS_KEY="LTAI5txxx" # replace it with your AccessKey +export ALICLOUD_SECRET_KEY="nxuowIxxx" # replace it with your SecretKey +export ALICLOUD_REGION=cn-hangzhou # replace it with your region +``` + +![alicloud iam account](/img/docs/user_docs/getting-started/set-rds-access.png) + +```mdx-code-block + + +``` + +## Init Workspace + +To deploy the WordPress application with cloud rds, we first need to initiate a `Workspace` for the targeted stack (here we are using `dev`). Please copy the following example YAML file to your local `workspace.yaml`. + + + + +`workspace.yaml` +```yaml +# MySQL configurations for AWS RDS +modules: + mysql: + path: oci://ghcr.io/kusionstack/mysql + version: 0.2.0 + configs: + default: + cloud: aws + size: 20 + instanceType: db.t3.micro + privateRouting: false + databaseName: "wordpress-mysql" +``` + +```mdx-code-block + + +``` + +`workspace.yaml` +```yaml +# MySQL configurations for Alicloud RDS +modules: + mysql: + path: oci://ghcr.io/kusionstack/mysql + version: 0.2.0 + configs: + default: + cloud: alicloud + size: 20 + instanceType: mysql.n2.serverless.1c + category: serverless_basic + privateRouting: false + subnetID: [your-subnet-id] + databaseName: "wordpress-mysql" +``` + +```mdx-code-block + + +``` + +If you would like to try creating the `Alicloud` RDS instance, you should replace the `[your-subnet-id]` of `modules.kusionstack/mysql@0.1.0.default.subnetID` field with the Alicloud `vSwitchID` to which the database will be provisioned in. After that, you can execute the following command line to initiate the configuration for `dev` workspace. + +```shell +kusion workspace create dev -f workspace.yaml +``` + +Since Kusion by default use the `default` workspace, we can switch to the `dev` workspace with the following cmd: + +```shell +kusion workspace switch dev +``` + +If you have already created and used the configuration of `dev` workspace, you can append the MySQL module configs to your workspace YAML file and use the following command line to update the workspace configuration. + +```shell +kusion workspace update dev -f workspace.yaml +``` + +We can use the following command lines to show the current workspace configurations for `dev` workspace. + +```shell +kusion workspace show +``` + +The `workspace.yaml` is a sample configuration file for workspace management, including `MySQL` module configs. Workspace configurations are usually declared by **Platform Engineers** and will take effect through the corresponding stack. + +:::info +More details about the configuration of Workspace can be found in [Concepts of Workspace](../../3-concepts/4-workspace.md). +::: + +## Create Project And Stack + +We can create a new project named `wordpress-rds-cloud` with the `kusion project create` command. + +```shell +# Create a new directory and navigate into it. +mkdir wordpress-rds-cloud && cd wordpress-rds-cloud + +# Create a new project with the name of the current directory. +kusion project create +``` + +After creating the new project, we can create a new stack named `dev` with the `kusion stack create` command. + +```shell +# Create a new stack with the specified name under current project directory. +kusion stack create dev +``` + +The created project and stack structure looks like below: + +```shell +tree +. +├── dev +│   ├── kcl.mod +│   ├── main.k +│   └── stack.yaml +└── project.yaml + +2 directories, 4 files +``` + +### Update And Review Configuration Codes + +The configuration codes in the created stack are basically empty, thus we should replace the `dev/kcl.mod` and `dev/main.k` with the below codes: + +```shell +# dev/kcl.mod +[dependencies] +kam = { git = "https://github.com/KusionStack/kam.git", tag = "0.2.0" } +service = { oci = "oci://ghcr.io/kusionstack/service", tag = "0.1.0" } +network = { oci = "oci://ghcr.io/kusionstack/network", tag = "0.2.0" } +mysql = { oci = "oci://ghcr.io/kusionstack/mysql", tag = "0.2.0" } +``` + +```python +# dev/main.k +import kam.v1.app_configuration as ac +import service +import service.container as c +import network as n +import mysql + +# main.k declares customized configurations for dev stacks. +wordpress: ac.AppConfiguration { + workload: service.Service { + containers: { + wordpress: c.Container { + image: "wordpress:6.3" + env: { + "WORDPRESS_DB_HOST": "$(KUSION_DB_HOST_WORDPRESS_MYSQL)" + "WORDPRESS_DB_USER": "$(KUSION_DB_USERNAME_WORDPRESS_MYSQL)" + "WORDPRESS_DB_PASSWORD": "$(KUSION_DB_PASSWORD_WORDPRESS_MYSQL)" + "WORDPRESS_DB_NAME": "mysql" + } + resources: { + "cpu": "500m" + "memory": "512Mi" + } + } + } + replicas: 1 + } + accessories: { + "network": n.Network { + ports: [ + n.Port { + port: 80 + } + ] + } + "mysql": mysql.MySQL { + type: "cloud" + version: "8.0" + } + } +} +``` + +## Application Delivery + +You can complete the delivery of the WordPress application in the folder of `wordpress-cloud-rds/dev` using the following command line. Kusion will enable the watching of the application resource creation and automatic port-forwarding of the specified port (80) from local to the Kubernetes Service. + +```shell +cd dev && kusion apply --watch +``` + +:::info +During the first apply, the models and modules as well as the Terraform CLI (if not exists) that the application depends on will be downloaded, so it may take some time (usually within two minutes). You can take a break and have a cup of coffee. +::: + + + + +![apply the wordpress application with aws rds](/img/docs/user_docs/getting-started/apply-wordpress-cloud-rds-aws.png) + +```mdx-code-block + + +``` + +![apply the wordpress application with alicloud rds](/img/docs/user_docs/getting-started/apply-wordpress-cloud-rds-alicloud.png) + +```mdx-code-block + + +``` + +After all the resources reconciled, we can port-forward our local port (e.g. 12345) to the WordPress frontend service port (80) in the cluster: + +```shell +kubectl port-forward -n wordpress-cloud-rds svc/wordpress-cloud-rds-dev-wordpress-private 12345:80 +``` + +![kubectl port-forward for wordpress](/img/docs/user_docs/getting-started/wordpress-cloud-rds-port-forward.png) + +## Verify WordPress Application + +Next, we will verify the WordPress site service we just delivered, along with the creation of the RDS instance it depends on. We can start using the WordPress site by accessing the link of local-forwarded port [(http://localhost:12345)](http://localhost:12345) we just configured in the browser. + +![wordpress site page](/img/docs/user_docs/getting-started/wordpress-site-page.png) + +In addition, we can also log in to the cloud service console page to view the RDS instance we just created. + + + + +![aws rds instance](/img/docs/user_docs/getting-started/cloud-rds-instance-aws.png) + +```mdx-code-block + + +``` + +![alicloud rds instance](/img/docs/user_docs/getting-started/cloud-rds-instance-alicloud.png) + +```mdx-code-block + + +``` + +## Delete WordPress Application + +You can delete the WordPress application and related RDS resources using the following command line. + +```shell +kusion destroy --yes +``` + + + + +![kusion destroy wordpress with aws rds](/img/docs/user_docs/getting-started/destroy-wordpress-cloud-rds-aws.png) + +```mdx-code-block + + +``` + +![kusion destroy wordpress with alicloud rds](/img/docs/user_docs/getting-started/destroy-wordpress-cloud-rds-alicloud.png) + +```mdx-code-block + + diff --git a/docs/kusion/5-user-guides/1-cloud-resources/2-expose-service.md b/docs/kusion/5-user-guides/1-cloud-resources/2-expose-service.md new file mode 100644 index 00000000..b3e78d73 --- /dev/null +++ b/docs/kusion/5-user-guides/1-cloud-resources/2-expose-service.md @@ -0,0 +1,259 @@ +--- +id: expose-service +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Expose Application Service Deployed on CSP Kubernetes + +Deploying applications on the Kubernetes provided by CSP (Cloud Service Provider) is convenient and reliable, which is adopted by many enterprises. Kusion has a good integration with CSP Kubernetes service. You can deploy your application to the Kubernetes cluster, and expose the service in a quite easy way. + +This tutorial demonstrates how to expose service of the application deployed on CSP Kubernetes. And the responsibilities of platform engineers and application developers are also clearly defined. + +## Prerequisites + +Create a Kubernetes cluster provided by CSP, and complete the corresponding configurations for `KUBECONFIG`. The following CSP Kubernetes services are supported: + +- [Amazon Elastic Kubernetes Service (EKS)](https://aws.amazon.com/eks) +- [Alibaba Cloud Container Service for Kubernetes (ACK)](https://www.alibabacloud.com/product/kubernetes) + +## Expose Service Publicly + +If you want the application to be accessed from outside the cluster, you should expose the service publicly. Follow the steps below, you will simply hit the goal. + +### Set up Workspace + +Create the workspace as the target where the application will be deployed to. The workspace is usually set up by **Platform Engineers**, which contains platform-standard and application-agnostic configurations. The workspace configurations are organized through a YAML file. + + + + +```yaml +modules: + network: + path: oci://ghcr.io/kusionstack/network + version: 0.2.0 + configs: + default: + port: + type: aws +``` + +```mdx-code-block + + +``` + +```yaml +modules: + network: + path: oci://ghcr.io/kusionstack/network + version: 0.2.0 + configs: + default: + port: + type: alicloud + annotations: + service.beta.kubernetes.io/alibaba-cloud-loadbalancer-spec: slb.s1.small +``` + +```mdx-code-block + + +``` + +The YAML shown above gives an example of the workspace configuration to expose service on **EKS** and **ACK**. The block `port` contains the workspace configuration of Kusion module `network`, which has the following fields: + +- type: the CSP providing Kubernetes service, support `alicloud` and `aws` +- annotations: annotations attached to the service, should be a map +- labels: labels attached to the service, should be a map + +Then, create the workspace with the configuration file. The following command creates a workspace named `dev` with configuration file `workspace.yaml`. + +```bash +kusion workspace create dev -f workspace.yaml +``` + +After that, we can switch to the `dev` workspace with the following cmd: + +```shell +kusion workspace switch dev +``` + +If you already create and use the configuration of `dev` workspace, you can append the MySQL module configs to your workspace YAML file and use the following command line to update the workspace configuration. + +```shell +kusion workspace update dev -f workspace.yaml +``` + +We can use the following command lines to show the current workspace configurations for `dev` workspace. + +```shell +kusion workspace show +``` + + +### Init Project + +After creating workspace, you should write application configuration code, which only contains simple and application-centric configurations. This step is usually accomplished by application developers. + +We can start by initializing this tutorial project with `kusion init` cmd: + +```shell +# Create a new directory and navigate into it. +mkdir nginx && cd nginx + +# Initialize the demo project with the name of the current directory. +kusion init +``` + +The created project structure looks like below: + +```shell +tree +. +├── dev +│   ├── kcl.mod +│   ├── main.k +│   └── stack.yaml +└── project.yaml + +2 directories, 4 files +``` + +:::info +More details about the directory structure can be found in [Project](../../3-concepts/1-project/1-overview.md) and [Stack](../../3-concepts/2-stack/1-overview.md). +::: + +### Update And Review Configuration Codes + +The initiated configuration codes are for the demo quickstart application, we should replace the `dev/kcl.mod` and `dev/main.k` with the below codes: + +`dev/kcl.mod` +```shell +[package] +name = "nginx" +version = "0.1.0" + +[dependencies] +kam = { git = "https://github.com/KusionStack/kam.git", tag = "0.2.0" } +service = { oci = "oci://ghcr.io/kusionstack/service", tag = "0.1.0" } +network = { oci = "oci://ghcr.io/kusionstack/network", tag = "0.2.0" } + +[profile] +entries = ["main.k"] +``` + +`dev/main.k` +```python +import kam.v1.app_configuration as ac +import service +import service.container as c +import network as n + +# main.k declares customized configurations for dev stacks. +nginx: ac.AppConfiguration { + workload: service.Service { + containers: { + nginx: c.Container { + image: "nginx:1.25.2" + resources: { + "cpu": "500m" + "memory": "512Mi" + } + } + } + replicas: 1 + } + accessories: { + "network": n.Network { + ports: [ + n.Port { + port: 80 + protocol: "TCP" + public: True + } + ] + } + } +} +``` + +The code shown above describes how to expose service publicly. Kusion use schema `Port` to describe the network configuration, the primary fields of Port are as follows: + +- port: port number to expose service +- protocol: protocol to expose service, support `TCP` and `UDP` +- public: whether to public the service + +To public the service, you should set `public` as True. Besides, schema `Service` should be used to describe the workload configuration. + +That's all what an application developer needs to configure! Next, preview and apply the configuration, the application will get deployed and exposed publicly. + +:::info +Kusion uses Load Balancer (LB) provided by the CSP to expose service publicly. For more detailed network configuration, please refer to [Application Networking](https://www.kusionstack.io/docs/kusion/configuration-walkthrough/networking) +::: + +:::info +During the first preview and apply, the models and modules as well as the Terraform CLI (if not exists) that the application depends on will be downloaded, so it may take some time (usually within two minutes). You can take a break and have a cup of coffee. +::: + +### Preview and Apply + +Execute `kusion preview` under the stack path, you will get what will be created in the real infrastructure. The picture below gives the preview result of the example. A Namespace, Service and Deployment will be created, which meets the expectation. The service name has a suffix `public`, which shows it can be accessed publicly. + +![preview-public](/img/docs/user_docs/cloud-resources/expose-service/preview-public.png) + +Then, execute `kusion apply --yes` to do the real deploying job. Just a command and a few minutes, you have accomplished deploying application and expose it publicly. + +![apply-public](/img/docs/user_docs/cloud-resources/expose-service/apply-public.png) + +### Verify Accessibility + +In the example above, the kubernetes Namespace whose name is nginx, and a `Service` and `Deployment` under the Namespace should be created. Use `kubectl get` to check, the Service whose type is `LoadBalancer` and Deployment are created indeed. And the Service has `EXTERNAL-IP` 106.5.190.109, which means it can be accessed from outside the cluster. + +![k8s-resource-public](/img/docs/user_docs/cloud-resources/expose-service/k8s-resource-public.png) + +Visit the `EXTERNAL-IP` via browser, the correct result is returned, which illustrates the servie getting publicly exposed successfully. + +![result-public](/img/docs/user_docs/cloud-resources/expose-service/result-public.png) + +## Expose Service Inside Cluster + +If you only need the application to be accessed inside the cluster, just configure `Public` as `False` in schema `Port`. There is no need to change the workspace, which means an application developer can easily change a service exposure range, without the involvement of platform engineers. + +```python +import kam.v1.app_configuration as ac +import service +import service.container as c +import network as n + +# main.k declares customized configurations for dev stacks. +nginx: ac.AppConfiguration { + workload: service.Service { + ... + } + accessories: { + "network": n.Network { + ports: [ + n.Port { + port: 80 + protocol: "TCP" + public: False + } + ] + } + } +} +``` + +Execute `kusion apply --yes`, the generated Service has suffix `private`. + +![apply-private](/img/docs/user_docs/cloud-resources/expose-service/apply-private.png) + +And the Service type is `ClusterIP`, only has `CLUSTER_IP` and no `EXTERNAL_IP`, which means it cannot get accessed from outside the cluster. + +![k8s-resource-private](/img/docs/user_docs/cloud-resources/expose-service/k8s-resource-private.png) + +## Summary +This tutorial demonstrates how to expose service of the application deployed on the CSP Kubernetes. By platform engineers' setup of workspace, and application developers' configuration of schema `Port` of Kusion module `network`, Kusion enables you expose service simply and efficiently. diff --git a/docs/kusion/5-user-guides/1-cloud-resources/_category_.json b/docs/kusion/5-user-guides/1-cloud-resources/_category_.json new file mode 100644 index 00000000..f6f2c380 --- /dev/null +++ b/docs/kusion/5-user-guides/1-cloud-resources/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Cloud Resources" +} diff --git a/docs/kusion/5-user-guides/2-working-with-k8s/1-deploy-application.md b/docs/kusion/5-user-guides/2-working-with-k8s/1-deploy-application.md new file mode 100644 index 00000000..9793e2e7 --- /dev/null +++ b/docs/kusion/5-user-guides/2-working-with-k8s/1-deploy-application.md @@ -0,0 +1,282 @@ +--- +id: deploy-application +--- + +# Deploy Application + +This guide shows you how to use Kusion CLIs to complete the deployment of an application running in Kubernetes. +We call the abstraction of application operation and maintenance configuration as `AppConfiguration`, and its instance as `Application`. +It is essentially a configuration model that describes an application. The complete definition can be seen [here](../../reference/modules/developer-schemas/app-configuration). + +In production, the application generally includes minimally several k8s resources: + +- Namespace +- Deployment +- Service + +:::tip +This guide requires you to have a basic understanding of Kubernetes. +If you are not familiar with the relevant concepts, please refer to the links below: + +- [Learn Kubernetes Basics](https://kubernetes.io/docs/tutorials/kubernetes-basics/) +- [Namespace](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/) +- [Deployment](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/) +- [Service](https://kubernetes.io/docs/concepts/services-networking/service/) +::: + +## Prerequisites + +Before we start, we need to complete the following steps: + +1、Install Kusion + +We recommend using HomeBrew(Mac), Scoop(Windows), or an installation shell script to download and install Kusion. +See [Download and Install](../../getting-started/install-kusion) for more details. + +2、Running Kubernetes cluster + +There must be a running and accessible Kubernetes cluster and a [kubectl](https://Kubernetes.io/docs/tasks/tools/#kubectl) command line tool. +If you don't have a cluster yet, you can use [Minikube](https://minikube.sigs.k8s.io/docs/tutorials/multi_node/) to start one of your own. + +## Initializing + +This guide is to deploy an app using Kusion, relying on the Kusion CLI and an existing Kubernetes cluster. + +### Initializing workspace configuration + +In version 0.10.0, we have introduced the new concept of [workspaces](../../concepts/workspace), which is a logical layer whose configurations represent an opinionated set of defaults, often appointed by the platform team. In most cases workspaces are represented with an "environment" in traditional SDLC terms. These workspaces provide a means to separate the concerns between the **application developers** who wish to focus on business logic, and a group of **platform engineers** who wish to standardize the applications on the platform. + +Driven by the discipline of Platform Engineering, management of the workspaces, including create/updating/deleting workspaces and their configurations should be done by dedicated platform engineers in a large software organizations to facilitate a more mature and scalable collaboration pattern. + +:::tip +More on the collaboration pattern can be found in the [design doc](https://github.com/KusionStack/kusion/blob/main/docs/design/collaboration/collaboration_paradigm.md). +::: + +However, if that does NOT apply to your scenario, e.g. if you work in a smaller org without platform engineers or if you are an individual developer, we wish Kusion can still be a value tool to have when delivering an application. In this guide, we are NOT distinctively highlighting the different roles or what the best practices entails (the design doc above has all that) but rather the steps needed to get Kusion tool to work. + +As of version 0.11.0, workspace configurations in Kusion can not only be managed on the local filesystem in the form of YAML files, but the remotely-managed workspaces have been supported as well. + +To initialize the workspace configuration: + +```bash +~/playground$ touch ~/dev.yaml +~/playground$ kusion workspace create dev -f ~/dev.yaml +create workspace dev successfully +``` + +To verify the workspace has been created properly: + +``` +~/playground$ kusion workspace list +- default +- dev +~/playground$ kusion workspace show dev +{} +``` + +Note that `show` command tells us the workspace configuration is currently empty, which is expected because we created the `dev` workspace with an empty YAML file. An empty workspace configuration will suffice in some cases, where no platform configurations are needed. + +Kusion by default uses the `default` workspace, thus we need to switch to the `dev` workspace we have just created. + +```bash +~/playground$ kusion workspace switch dev +``` + +We will progressively add more workspace configurations throughout this user guide. + +### Initializing application configuration + +Now that workspaces are properly initialized, we can begin by initializing the application configuration: + +```bash +# Create a new directory and navigate into it. +mkdir simple-service && cd simple-service + +# Initialize the demo project with the name of the current directory. +kusion init +``` + +The directory structure is as follows: + +```shell +simple-service/ +. +├── dev +│   ├── kcl.mod +│   ├── main.k +│   └── stack.yaml +└── project.yaml + +2 directories, 4 files +``` + +The project directory has the following files that are automatically generated: +- `project.yaml` represents project-level configurations. +- `dev` directory stores the customized stack configuration: + - `dev/main.k` stores configurations in the `dev` stack. + - `dev/stack.yaml` stores stack-level configurations. + - `dev/kcl.mod` stores stack-level dependencies. + +In general, the `.k` files are the KCL source code that represents the application configuration, and the `.yaml` is the static configuration file that describes behavior at the project or stack level. + +:::info +See [Project](../../concepts/project/overview) and [Stack](../../concepts/stack/overview) for more details about Project and Stack. +::: + +The `kusion init` command will create a demo quickstart application, we may update the `dev/kcl.mod` and `dev/main.k` later. + +#### kcl.mod +There should be a `kcl.mod` file generated automatically under the project directory. The `kcl.mod` file describes the dependency for the current project or stack. By default, it should contain a reference to the official [`kam` repository](https://github.com/KusionStack/kam) which holds the Kusion `AppConfiguration` and related workload model definitions that fits best practices. You can also create your own models library and reference that. + +You can change the package name in `kcl.mod` to `simple-service`: + +`dev/kcl.mod` +```shell +[package] +name = "simple-service" +version = "0.1.0" + +[dependencies] +kam = { git = "https://github.com/KusionStack/kam.git", tag = "0.2.0" } +service = { oci = "oci://ghcr.io/kusionstack/service", tag = "0.1.0" } +network = { oci = "oci://ghcr.io/kusionstack/network", tag = "0.2.0" } + +[profile] +entries = ["main.k"] +``` + +#### main.k +The configuration file `main.k`, usually written by the application developers, declare customized configurations for a specific stack, including an `Application` instance of `AppConfiguration` model. + +You can update the `main.k` as follows: + +```python +import kam.v1.app_configuration as ac +import service +import service.container as c +import network as n + +helloworld: ac.AppConfiguration { + workload: service.Service { + containers: { + "helloworld": c.Container { + image = "kusionstack/kusion-quickstart:latest" + } + } + replicas: 2 + } + accessories: { + "network": n.Network { + ports: [ + n.Port { + port: 80 + } + ] + } + } +} +``` + +## Previewing + +At this point, the project has been completely initialized. +The configuration is written in KCL, not JSON/YAML which Kubernetes recognizes, so it needs to be built to get the final output. And we can use the `kusion preview` cmd to preview the Kubernetes resources intended to deliver. + +Enter stack dir `simple-service/dev` and preview: + +```bash +cd simple-service/dev && kusion preview +``` + +:::tip +For instructions on the kusion command line tool, execute `kusion -h`, or refer to the tool's online [documentation](../../reference/commands). +::: + +## Applying + +Preview is now completed. We can apply the configuration as the next step. In the output from `kusion preview`, you can see 3 resources: + +- a Namespace named `simple-service` +- a Deployment named `simple-service-dev-helloworld` in the `simple-service` namespace +- a Service named `simple-service-dev-helloworld-private` in the `simple-service` namespace + +Execute command: + +```bash +kusion apply +``` + +The output is similar to: + +``` + ✔︎ Generating Spec in the Stack dev... +Stack: dev ID Action +* ├─ v1:Namespace:simple-service Create +* ├─ v1:Service:simple-service:simple-service-dev-helloworld-private Create +* └─ apps/v1:Deployment:simple-service:simple-service-dev-helloworld Create + + +? Do you want to apply these diffs? yes +Start applying diffs ... + SUCCESS Create v1:Namespace:simple-service success + SUCCESS Create v1:Service:simple-service:simple-service-dev-helloworld-private success + SUCCESS Create apps/v1:Deployment:simple-service:simple-service-dev-helloworld success +Create apps/v1:Deployment:simple-service:simple-service-dev-helloworld success [3/3] ███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████ 100% | 0s +Apply complete! Resources: 3 created, 0 updated, 0 deleted. +``` + +After the configuration applying successfully, you can use the `kubectl` to check the actual status of these resources. + +1、 Check Namespace + +```bash +kubectl get ns +``` + +The output is similar to: + +``` +NAME STATUS AGE +default Active 117d +simple-service Active 38s +kube-system Active 117d +... +``` + +2、Check Deployment + +```bash +kubectl get deploy -n simple-service +``` + +The output is similar to: + +``` +NAME READY UP-TO-DATE AVAILABLE AGE +simple-service-dev-helloworld 1/1 1 1 59s +``` + +3、Check Service + +```bash +kubectl get svc -n simple-service +``` + +The output is similar to: + +``` +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +simple-service-dev-helloworld-private ClusterIP 10.98.89.104 80/TCP 79s +``` + +4、Validate app + +Using the `kubectl` tool, forward native port `30000` to the service port `80`. + +```bash +kubectl port-forward svc/simple-service-dev-helloworld-private -n simple-service 30000:80 +``` + +Open browser and visit [http://127.0.0.1:30000](http://127.0.0.1:30000): + +![app-preview](/img/docs/user_docs/guides/working-with-k8s/app-preview.png) diff --git a/docs/kusion/5-user-guides/2-working-with-k8s/10-customize-health-policy.md b/docs/kusion/5-user-guides/2-working-with-k8s/10-customize-health-policy.md new file mode 100644 index 00000000..8261953e --- /dev/null +++ b/docs/kusion/5-user-guides/2-working-with-k8s/10-customize-health-policy.md @@ -0,0 +1,194 @@ +--- +id: health-policy +--- + +# Customized Health Check with KCL + +Kusion now offers advanced customized health checks leveraging the power of `KCL`. This robust feature empowers users to define complex and tailored health policies for their Kubernetes resources. By implementing these custom policies, you can ensure that your resources not only meet specific criteria but also satisfy complex conditions before being deemed healthy. This capability is particularly valuable when assessing the health status of Kubernetes Custom Resources (CRs), providing a flexible and precise mechanism to validate the state of your entire `project`. + +## Prerequisites + +Please refer to the [prerequisites](deploy-application#prerequisites) in the guide for deploying an application. + +The example below also requires you to have [initialized the project](deploy-application#initializing) using the `kusion workspace create` and `kusion init` command, which will create a workspace and also generate a [`kcl.mod` file](deploy-application#kclmod) under the stack directory. + +## Defining a Health Policy + +You can define a health policy in the `Workspace` configuration via the `healthPolicy` field. The `healthPolicy` should contain a KCL script that defines the health check logic and can be used to assert healthy conditions on your `Kubernetes` resources. + +Firstly, you need to initialize the workspace configuration: + +```shell +~/$ touch ~/dev.yaml +~/$ kusion workspace create dev -f ~/dev.yaml +create workspace dev successfully +``` + +### Example Health Policy + +Here is an example of how to define a health policy for a Kubernetes Deployment. This policy checks multiple aspects of the Deployment's health status. Update ~/dev.yaml with this example: + +```yaml +modules: + service: + configs: + default: + healthPolicy: + health.kcl: | + assert res.metadata.generation == res.status.observedGeneration + assert res.status.replicas == 1 +``` + +In this example, the custom health check ensures that: + +1. The Deployment has 1 replicas +2. The observed generation matches the current generation (indicating that the latest changes have been processed) + +:::note +`res` represents the Kubernetes resource being evaluated. It's a fixed expression in this feature that provides access to all fields and properties of the resource. You can use dot notation (e.g., `res.metadata.name`) to access nested fields within the resource. This allows you to create complex health checks based on various aspects of your Kubernetes resources. +::: + +## How It Works + +When you apply your configuration, `kusion` will patch the provided `KCL` script into the `extension` field of the specified resource in the `Spec` and use the provided KCL script to evaluate the health of this resource. The health check will be performed repeatedly until it passes or a timeout is reached. + +The KCL script has access to the full Kubernetes resource object through the `res` variable. You can use any fields or properties of the resource in your health check logic. + +Besides configuring the workspace, platform engineers can also utilize the useful `PatchHealthPolicyToExtension` function in SDK to perform this feature while constructing the `module`. This function allows for a more programmatic and flexible approach to applying health policies while it's multiple resources case for a module. + +Here's a code snippet of how to use the `PatchHealthPolicyToExtension` function: + +```golang +// Generate Kusion resource ID and wrap the Kubernetes Service into Kusion resource +// with the SDK provided by kusion module framework. +resourceID := module.KubernetesResourceID(svc.TypeMeta, svc.ObjectMeta) +resource, err := module.WrapK8sResourceToKusionResource(resourceID, svc) +if err != nil { + return nil, err +} +module.PatchHealthPolicyToExtension(resource, "assert res.metadata.generation == res.status.observedGeneration") +``` + +## Applying the Health Policy + +To apply the health policy, update your workspace configuration: + +```shell +~/$ kusion workspace update dev -f ~/dev.yaml +update workspace dev successfully +``` + +After updating the workspace configuration, apply your new configuration with the customized health check with the following commands: + +```shell +~/$ cd quickstart/default +~/quickstart/default/$ kusion apply + ✔︎ Generating Spec in the Stack default... + +Stack: default +ID Action +v1:Namespace:quickstart Create +v1:Service:quickstart:quickstart-default-quickstart-private Create +apps/v1:Deployment:quickstart:quickstart-default-quickstart Create + + +Do you want to apply these diffs?: + > yes + +Start applying diffs ... + ✔︎ Succeeded v1:Namespace:quickstart + ⣽ Creating v1:Service:quickstart:quickstart-default-quickstart-private (0s) + ✔︎ Succeeded v1:Namespace:quickstart + ⢿ Creating v1:Service:quickstart:quickstart-default-quickstart-private (0s) + ⢿ Creating apps/v1:Deployment:quickstart:quickstart-default-quickstart (0s) + ...... + ✔︎ Succeeded v1:Namespace:quickstart + ✔︎ Succeeded v1:Service:quickstart:quickstart-default-quickstart-private + ✔︎ Succeeded apps/v1:Deployment:quickstart:quickstart-default-quickstart +Apply complete! Resources: 3 created, 0 updated, 0 deleted. + +[v1:Namespace:quickstart] +Type Kind Name Detail +READY Namespace quickstart Phase: Active +READY ServiceAccount default Secrets: 0, Age: 0s +[v1:Service:quickstart:quickstart-default-quickstart-private] +Type Kind Name Detail +READY Service quickstart-default-quickstart-private Type: ClusterIP, InternalIP: 10.96.196.38, ExternalIP: , Port(s): 8080/TCP +READY EndpointSlice quickstart-default-quickstart-private-v42zc AddressType: IPv4, Ports: 8080, Endpoints: 10.244.1.99 +[apps/v1:Deployment:quickstart:quickstart-default-quickstart] +Type Kind Name Detail +READY Deployment quickstart-default-quickstart Reconciled +READY ReplicaSet quickstart-default-quickstart-67459cd68d Desired: 1, Current: 1, Ready: 1 +READY Pod quickstart-default-quickstart-67459cd68d-jqtt7 Ready: 1/1, Status: Running, Restart: 0, Age: 4s +``` + +## Explanation + +The `Detail` column for the Deployment `quickstart-default-quickstart` provides crucial information about the resource's reconciliation status: + +- If it shows "Reconciled", it means the resource has successfully met the conditions defined in the health policy. + +```shell +Type Kind Name Detail +READY Deployment quickstart-default-quickstart Reconciled +``` + +- If it displays "Reconciling...", it indicates that the resource is still in the process of reaching the desired state as per the health policy. + +```shell +Type Kind Name Detail +MODIFIED Deployment quickstart-default-quickstart Reconciling... +``` + +- In case of any errors or unsupported configurations, appropriate messages will be shown, and customized health check will be skipped. + +```shell +Type Kind Name Detail +READY Deployment quickstart-default-quickstart health policy err: invalid syntax error, skip +``` + +This `Detail` helps you quickly assess whether your Kubernetes resources have reached their intended state after applying changes. It's an essential feedback mechanism for ensuring the reliability and correctness of your deployments. + +:::note +`Detail` showing as `Unsupported kind, skip` indicates that the health policy is not configured for this resource's health check. This can occur due to several reasons: + +1. There's a mismatch between the resource kind in your Kubernetes manifests and the kinds specified in your health policy. +2. The health policy in your workspace configuration might be missing or incorrectly defined for this particular resource. +3. You might forgot to updated your workspace with the new configuration. + +To resolve this: + +1. Review your workspace configuration to ensure the health policy is correctly defined for all intended resource kinds. +2. Check that the resource kind in your Kubernetes manifests matches the kinds specified in your health policy. + +If the issue persists, you may need to update your workspace configuration or adjust your health policy to include the specific resource kind. +::: + +## Best Practices + +- Keep your health check logic simple and focused on key indicators of health for your specific resource. +- Use assertions to clearly define what conditions must be true for the resource to be considered healthy. +- Consider both the desired state (e.g., number of replicas) and the current state (e.g., available replicas) in your health checks. +- For complex resources, you may want to check multiple conditions to ensure full health and readiness. + +## Limitations + +- The customized health check feature is currently only available for Kubernetes resources. +- The KCL script must complete execution within a reasonable time to avoid timeouts during the apply process. +- Errors in the KCL script syntax will cause the health check to be skipped, so be sure to test your scripts thoroughly. + +## Validation + +To verify the health policy, you can check the status of your Kubernetes resources: + +```bash +kubectl get -n quickstart deployment quickstart-default-quickstart -o yaml +``` + +Ensure that the resource meets the conditions defined in your health policy. + +## Conclusion + +Customized health checks provides a powerful way to ensure your Kubernetes resources are in the desired state before considering an `apply` operation complete. By defining health policies, you can automate the validation of your resources and ensure they meet specific criteria before being considered healthy. By leveraging KCL, you can create sophisticated health check logic tailored to your specific `project` needs. + +For more details on KCL and its syntax, refer to the [KCL documentation](../../4-configuration-walkthrough/2-kcl-basics.md). diff --git a/docs/kusion/5-user-guides/2-working-with-k8s/2-container.md b/docs/kusion/5-user-guides/2-working-with-k8s/2-container.md new file mode 100644 index 00000000..eb51ec5e --- /dev/null +++ b/docs/kusion/5-user-guides/2-working-with-k8s/2-container.md @@ -0,0 +1,146 @@ +--- +id: container +--- + +# Configure Containers + +You can manage container-level configurations in the `AppConfiguration` model via the `containers` field (under the `workload` schema). By default, everything defined in the `containers` field will be treated as application containers. Sidecar containers will be supported in a future version of kusion. + +For the full `Container` schema reference, please see [here](../../reference/modules/developer-schemas/workload/service#schema-container) for more details. + +## Pre-requisite + +Please refer to the [prerequisites](deploy-application#prerequisites) in the guide for deploying an application. + +The example below also requires you to have [initialized the project](deploy-application#initializing) using the `kusion workspace create` and `kusion init` command, which will create a workspace and also generate a [`kcl.mod` file](deploy-application#kclmod) under the stack directory. + +## Managing Workspace Configuration + +In the last guide, we introduced a step to [initialize a workspace](deploy-application#initializing-workspace-configuration) with an empty configuration. The same empty configuration will still work in this guide, no changes are required there. + +However, if you (or the platform team) would like to set default values for the workloads to standardize the behavior of applications in the `dev` workspace, you can do so by updating the `~/dev.yaml`: + +```yaml +modules: + service: + default: + replicas: 3 + labels: + label-key: label-value + annotations: + annotation-key: annotation-value + type: CollaSet +``` + +Please note that the `replicas` in the workspace configuration only works as a default value and will be overridden by the value set in the application configuration. + +The workspace configuration need to be updated with the command: + +```bash +kusion workspace update dev -f ~/dev.yaml +``` + +For a full reference of what can be configured in the workspace level, please see the [workspace reference](../../reference/modules/workspace-configs/workload/service). + +## Example + +`simple-service/dev/main.k`: +```python +import kam.v1.app_configuration as ac +import service +import service.container as c +import network as n + +"helloworld": ac.AppConfiguration { + workload: service.Service { + containers: { + "helloworld": c.Container { + image = "gcr.io/google-samples/gb-frontend:v4" + env: { + "env1": "VALUE" + "env2": "VALUE2" + } + resources: { + "cpu": "500m" + "memory": "512Mi" + } + # Configure an HTTP readiness probe + readinessProbe: p.Probe { + probeHandler: p.Http { + url: "http://localhost:80" + } + initialDelaySeconds: 10 + } + } + } + replicas: 2 + } + accessories: { + "network": n.Network { + ports: [ + n.Port { + port: 80 + } + ] + } + } +} +``` + +## Apply + +Re-run steps in [Applying](deploy-application#applying), new container configuration can be applied. + +``` +$ kusion apply + ✔︎ Generating Spec in the Stack dev... +Stack: dev ID Action +* ├─ v1:Namespace:simple-service UnChanged +* ├─ v1:Service:simple-service:simple-service-dev-helloworld-private UnChanged +* └─ apps/v1:Deployment:simple-service:simple-service-dev-helloworld Update + + +? Do you want to apply these diffs? yes +Start applying diffs ... + SUCCESS UnChanged v1:Namespace:simple-service, skip + SUCCESS UnChanged v1:Service:simple-service:simple-service-dev-helloworld-private, skip + SUCCESS Update apps/v1:Deployment:simple-service:simple-service-dev-helloworld success +Update apps/v1:Deployment:simple-service:simple-service-dev-helloworld success [3/3] ███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████ 100% | 0s +Apply complete! Resources: 0 created, 1 updated, 0 deleted. +``` + +## Validation + +We can verify the container (in the deployment template) now has the updated attributes as defined in the container configuration: +``` +$ kubectl get deployment -n simple-service -o yaml +... + template: + ... + spec: + containers: + - env: + - name: env1 + value: VALUE + - name: env2 + value: VALUE2 + image: gcr.io/google-samples/gb-frontend:v4 + imagePullPolicy: IfNotPresent + name: helloworld + readinessProbe: + failureThreshold: 3 + httpGet: + host: localhost + path: / + port: 80 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + resources: + limits: + cpu: 500m + memory: 512M +... +``` \ No newline at end of file diff --git a/docs/kusion/5-user-guides/2-working-with-k8s/3-service.md b/docs/kusion/5-user-guides/2-working-with-k8s/3-service.md new file mode 100644 index 00000000..f795430c --- /dev/null +++ b/docs/kusion/5-user-guides/2-working-with-k8s/3-service.md @@ -0,0 +1,139 @@ +--- +id: service +--- + +# Expose Service + +You can determine how to expose your service in the `AppConfiguration` model via the `ports` field (under the `network` accessory). The `ports` field defines a list of all the `Port`s you want to expose for the application (and their corresponding listening ports on the container, if they don't match the service ports), so that it can be consumed by other applications. + +Unless explicitly defined, each of the ports exposed is by default exposed privately as a `ClusterIP` type service. You can expose a port publicly by specifying the `public` field in the `Port` schema. At the moment, the implementation for publicly access is done via Load Balancer type service backed by cloud providers. Ingress will be supported in a future version of kusion. + +For the `Port` schema reference, please see [here](../../reference/modules/developer-schemas/workload/service#schema-port) for more details. + +## Prerequisites + +Please refer to the [prerequisites](deploy-application#prerequisites) in the guide for deploying an application. + +The example below also requires you to have [initialized the project](deploy-application#initializing) using the `kusion workspace create` and `kusion init` command, which will create a workspace and also generate a [`kcl.mod` file](deploy-application#kclmod) under the stack directory. + +## Managing Workspace Configuration + +In the first guide in this series, we introduced a step to [initialize a workspace](deploy-application#initializing-workspace-configuration) with an empty configuration. The same empty configuration will still work in this guide, no changes are required there. + +However, if you (or the platform team) would like to set default values for the services to standardize the behavior of applications in the `dev` workspace, you can do so by updating the `~/dev.yaml`: + +```yaml +modules: + kusionstack/network@0.1.0: + default: + port: + type: alicloud + labels: + kusionstack.io/control: "true" + annotations: + service.beta.kubernetes.io/alibaba-cloud-loadbalancer-spec: slb.s1.small +``` + +The workspace configuration need to be updated with the command: + +```bash +kusion workspace update dev -f ~/dev.yaml +``` + +For a full reference of what can be configured in the workspace level, please see the [workspace reference](../../reference/modules/workspace-configs/networking/network). + +## Example + +`simple-service/dev/main.k`: +```python +import kam.v1.app_configuration as ac +import service +import service.container as c +import network as n + +"helloworld": ac.AppConfiguration { + workload: service.Service { + containers: { + "helloworld": c.Container { + image = "gcr.io/google-samples/gb-frontend:v4" + env: { + "env1": "VALUE" + "env2": "VALUE2" + } + resources: { + "cpu": "500m" + "memory": "512Mi" + } + # Configure an HTTP readiness probe + readinessProbe: p.Probe { + probeHandler: p.Http { + url: "http://localhost:80" + } + initialDelaySeconds: 10 + } + } + } + replicas: 2 + } + accessories: { + "network": n.Network { + ports: [ + n.Port { + port: 8080 + targetPort: 80 + } + ] + } + } +} +``` + +The code above changes the service port to expose from `80` in the last guide to `8080`, but still targeting the container port `80` because that's what the application is listening on. + +## Applying + +Re-run steps in [Applying](deploy-application#applying), new service configuration can be applied. + +``` +$ kusion apply + ✔︎ Generating Spec in the Stack dev... +Stack: dev ID Action +* ├─ v1:Namespace:simple-service UnChanged +* ├─ v1:Service:simple-service:simple-service-dev-helloworld-private Update +* └─ apps/v1:Deployment:simple-service:simple-service-dev-helloworld UnChanged + + +? Do you want to apply these diffs? yes +Start applying diffs ... + SUCCESS UnChanged v1:Namespace:simple-service, skip + SUCCESS Update v1:Service:simple-service:simple-service-dev-helloworld-private success + SUCCESS UnChanged apps/v1:Deployment:simple-service:simple-service-dev-helloworld, skip +UnChanged apps/v1:Deployment:simple-service:simple-service-dev-helloworld, skip [3/3] ██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████ 100% | 0s +Apply complete! Resources: 0 created, 1 updated, 0 deleted. +``` + +## Validation + +We can verify the Kubernetes service now has the updated attributes (mapping service port 8080 to container port 80) as defined in the `ports` configuration: + +``` +kubectl get svc -n simple-service -o yaml +... + spec: + ... + ports: + - name: simple-service-dev-helloworld-private-8080-tcp + port: 8080 + protocol: TCP + targetPort: 80 +... +``` + +Exposing service port 8080: +``` +kubectl port-forward svc/simple-service-dev-helloworld-private -n simple-service 30000:8080 +``` + +Open browser and visit [http://127.0.0.1:30000](http://127.0.0.1:30000), the application should be up and running: + +![app-preview](/img/docs/user_docs/guides/working-with-k8s/app-preview.png) \ No newline at end of file diff --git a/docs/kusion/5-user-guides/2-working-with-k8s/4-image-upgrade.md b/docs/kusion/5-user-guides/2-working-with-k8s/4-image-upgrade.md new file mode 100644 index 00000000..ccee54e0 --- /dev/null +++ b/docs/kusion/5-user-guides/2-working-with-k8s/4-image-upgrade.md @@ -0,0 +1,78 @@ +--- +id: image-upgrade +--- + +# Upgrade Image + +You can declare the application's container image via `image` field of the `Container` schema. + +For the full `Container` schema reference, please see [here](../../reference/modules/developer-schemas/workload/service#schema-container) for more details. + +## Pre-requisite + +Please refer to the [prerequisites](deploy-application#prerequisites) in the guide for deploying an application. + +The example below also requires you to have [initialized the project](deploy-application#initializing) using the `kusion workspace create` and `kusion init` command, which will create a workspace and also generate a [`kcl.mod` file](deploy-application#kclmod) under the stack directory. + +## Managing Workspace Configuration + +In the first guide in this series, we introduced a step to [initialize a workspace](deploy-application#initializing-workspace-configuration) with an empty configuration. The same empty configuration will still work in this guide, no changes are required there. + +## Example + +Update the image value in `simple-service/dev/main.k`: +```python +import kam.v1.app_configuration as ac + +helloworld: ac.AppConfiguration { + workload.containers.nginx: { + ... + # before: + # image = "gcr.io/google-samples/gb-frontend:v4" + # after: + image = "gcr.io/google-samples/gb-frontend:v5" + ... + } +} +``` + +Everything else in `main.k` stay the same. + +## Applying + +Re-run steps in [Applying](deploy-application#applying), update image is completed. + +``` +$ kusion apply + ✔︎ Generating Spec in the Stack dev... +Stack: dev ID Action +* ├─ v1:Namespace:simple-service UnChanged +* ├─ v1:Service:simple-service:simple-service-dev-helloworld-private UnChanged +* └─ apps/v1:Deployment:simple-service:simple-service-dev-helloworld Update + + +? Do you want to apply these diffs? yes +Start applying diffs ... + SUCCESS UnChanged v1:Namespace:simple-service, skip + SUCCESS UnChanged v1:Service:simple-service:simple-service-dev-helloworld-private, skip + SUCCESS Update apps/v1:Deployment:simple-service:simple-service-dev-helloworld success +Update apps/v1:Deployment:simple-service:simple-service-dev-helloworld success [3/3] ███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████ 100% | 0s +Apply complete! Resources: 0 created, 1 updated, 0 deleted. +``` + +## Validation + +We can verify the application container (in the deployment template) now has the updated image (v5) as defined in the container configuration: +``` +kubectl get deployment -n simple-service -o yaml +... + template: + ... + spec: + containers: + - env: + ... + image: gcr.io/google-samples/gb-frontend:v5 + ... +... +``` \ No newline at end of file diff --git a/docs/kusion/5-user-guides/2-working-with-k8s/5-resource-spec.md b/docs/kusion/5-user-guides/2-working-with-k8s/5-resource-spec.md new file mode 100644 index 00000000..1e5f208a --- /dev/null +++ b/docs/kusion/5-user-guides/2-working-with-k8s/5-resource-spec.md @@ -0,0 +1,90 @@ +--- +id: resource-spec +--- + +# Configure Resource Specification + +You can manage container-level resource specification in the `AppConfiguration` model via the `resources` field (under the `Container` schema). + +For the full `Container` schema reference, please see [here](../../reference/modules/developer-schemas/workload/service#schema-container) for more details. + +## Prerequisites + +Please refer to the [prerequisites](deploy-application#prerequisites) in the guide for deploying an application. + +The example below also requires you to have [initialized the project](deploy-application#initializing) using the `kusion workspace create` and `kusion init` command, which will create a workspace and also generate a [`kcl.mod` file](deploy-application#kclmod) under the stack directory. + +## Managing Workspace Configuration + +In the first guide in this series, we introduced a step to [initialize a workspace](deploy-application#initializing-workspace-configuration) with an empty configuration. The same empty configuration will still work in this guide, no changes are required there. + +## Example + +Update the resources value in `simple-service/dev/main.k`: + +```py +import kam.v1.app_configuration as ac + +helloworld: ac.AppConfiguration { + workload.containers.helloworld: { + ... + # before: + # resources: { + # "cpu": "500m" + # "memory": "512M" + # } + # after: + resources: { + "cpu": "250m" + "memory": "256Mi" + } + ... + } +} +``` + +Everything else in `main.k` stay the same. + +## Applying + +Re-run steps in [Applying](deploy-application#applying), resource scaling is completed. + +``` +$ kusion apply + ✔︎ Generating Spec in the Stack dev... +Stack: dev ID Action +* ├─ v1:Namespace:simple-service UnChanged +* ├─ v1:Service:simple-service:simple-service-dev-helloworld-private UnChanged +* └─ apps/v1:Deployment:simple-service:simple-service-dev-helloworld Update + + +? Do you want to apply these diffs? yes +Start applying diffs ... + SUCCESS UnChanged v1:Namespace:simple-service, skip + SUCCESS UnChanged v1:Service:simple-service:simple-service-dev-helloworld-private, skip + SUCCESS Update apps/v1:Deployment:simple-service:simple-service-dev-helloworld success +Update apps/v1:Deployment:simple-service:simple-service-dev-helloworld success [3/3] ███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████ 100% | 0s +Apply complete! Resources: 0 created, 1 updated, 0 deleted. +``` + +## Validation + +We can verify the application container (in the deployment template) now has the updated resources attributes (cpu:250m, memory:256Mi) as defined in the container configuration: + +``` +kubectl get deployment -n simple-service -o yaml +... + template: + ... + spec: + containers: + - env: + ... + image: gcr.io/google-samples/gb-frontend:v5 + ... + resources: + limits: + cpu: 250m + memory: 256Mi +... +``` \ No newline at end of file diff --git a/docs/kusion/5-user-guides/2-working-with-k8s/6-set-up-operational-rules.md b/docs/kusion/5-user-guides/2-working-with-k8s/6-set-up-operational-rules.md new file mode 100644 index 00000000..915f84c4 --- /dev/null +++ b/docs/kusion/5-user-guides/2-working-with-k8s/6-set-up-operational-rules.md @@ -0,0 +1,86 @@ +--- +id: set-up-operational-rules +--- + +# Set up Operational Rules + +You can set up operational rules in the `AppConfiguration` model with the `opsrule` accessory and corresponding platform configurations in the workspace directory. The `opsrule` is the collection of operational rule requirements for the application that are used as a preemptive measure to police and stop any unwanted changes. + +## Prerequisites + +Please refer to the [prerequisites](deploy-application#prerequisites) in the guide for deploying an application. + +The example below also requires you to have [initialized the project](deploy-application#initializing) using the `kusion workspace create` and `kusion init` command, which will create a workspace and also generate a [`kcl.mod` file](deploy-application#kclmod) under the stack directory. + +## Managing Workspace Configuration + +In the first guide in this series, we introduced a step to [initialize a workspace](deploy-application#initializing-workspace-configuration) with an empty configuration. The same empty configuration will still work in this guide, no changes are required there. + +However, if you (or the platform team) would like to set default values for the opsrule to standardize the behavior of applications, you can do so by updating the `~/dev.yaml`. +Note that the platform engineers should set the default workload to [Kusion Operation CollaSet](https://github.com/KusionStack/operating) and installed the Kusion Operation controllers properly, the `opsrules` module will generate a [PodTransitionRule](https://www.kusionstack.io/docs/operating/manuals/podtransitionrule) instead of updating the `maxUnavailable` value in the deployment: + +```yaml +modules: + service: + default: + type: CollaSet + kusionstack/opsrule@0.1.0: + default: + maxUnavailable: 30% +``` + +Please note that the `maxUnavailable` in the workspace configuration only works as a default value and will be overridden by the value set in the application configuration. + +The workspace configuration need to be updated with the command: + +```bash +kusion workspace update dev -f ~/dev.yaml +``` + +## Example + +Add the `opsrule` module dependency to `kcl.mod`: + +```shell +[package] +name = "simple-service" +version = "0.1.0" + +[dependencies] +kam = { git = "https://github.com/KusionStack/kam.git", tag = "0.2.0" } +service = { oci = "oci://ghcr.io/kusionstack/service", tag = "0.1.0" } +network = { oci = "oci://ghcr.io/kusionstack/network", tag = "0.2.0" } +opsrule = { oci = "oci://ghcr.io/kusionstack/opsrule", tag = "0.1.0" } + +[profile] +entries = ["main.k"] +``` + +Add the `opsrule` snippet to the `AppConfiguration` in `simple-service/dev/main.k`: + +```py +import kam.v1.app_configuration as ac +import service +import service.container as c +import opsrule + +helloworld: ac.AppConfiguration { + workload: service.Service { + ... + } + # Configure the maxUnavailable rule + accessories: { + "opsrule": opsrule.OpsRule { + "maxUnavailable": 50% + } + } +} +``` + +## Applying + +Re-run steps in [Applying](deploy-application#applying), resource scaling is completed. + +## Validation + +We can verify the application deployment strategy now has the updated attributes `maxUnavailable: 50%` in the container configuration. diff --git a/docs/kusion/5-user-guides/2-working-with-k8s/7-job.md b/docs/kusion/5-user-guides/2-working-with-k8s/7-job.md new file mode 100644 index 00000000..29a4466d --- /dev/null +++ b/docs/kusion/5-user-guides/2-working-with-k8s/7-job.md @@ -0,0 +1,146 @@ +--- +id: job +--- + +# Schedule a Job + +The guides above provide examples on how to configure workloads of the type `service.Service`, which is typically used for long-running web applications that should **never** go down. Alternatively, you could also schedule another kind of workload profile, namely `wl.Job` which corresponds to a one-off or recurring execution of tasks that run to completion and then stop. + +## Prerequisites + +Please refer to the [prerequisites](deploy-application#prerequisites) in the guide for scheduling a job. + +The example below also requires you to have [initialized the project](deploy-application#initializing) using the `kusion workspace create` and `kusion init` command, which will create a workspace and also generate a [`kcl.mod` file](deploy-application#kclmod) under the stack directory. + +## Managing Workspace Configuration + +In the first guide in this series, we introduced a step to [initialize a workspace](deploy-application#initializing-workspace-configuration) with an empty configuration. The same empty configuration will still work in this guide, no changes are required there. Alternatively, if you have updated your workspace config in the previous guides, no changes need to be made either. + +However, if you (or the platform team) would like to set default values for the workloads to standardize the behavior of applications in the `dev` workspace, you can do so by updating the `~/dev.yaml`: + +```yaml +modules: + service: + default: + replicas: 3 + labels: + label-key: label-value + annotations: + annotation-key: annotation-value +``` + +Please note that the `replicas` in the workspace configuration only works as a default value and will be overridden by the value set in the application configuration. + +The workspace configuration need to be updated with the command: + +```bash +kusion workspace update dev -f ~/dev.yaml +``` + +For a full reference of what can be configured in the workspace level, please see the [workspace reference](../../reference/modules/workspace-configs/workload/job). + +## Example + +To schedule a job with cron expression, update `simple-service/dev/kcl.mod` and `simple-service/dev/main.k` to the following: + +`simple-service/dev/kcl.mod`: +```py +[package] +name = "simple-service" +version = "0.1.0" + +[dependencies] +kam = { git = "https://github.com/KusionStack/kam.git", tag = "0.2.0" } +job = { oci = "oci://ghcr.io/kusionstack/job", tag = "0.1.0" } +network = { oci = "oci://ghcr.io/kusionstack/network", tag = "0.2.0" } + +[profile] +entries = ["main.k"] +``` + +`simple-service/dev/main.k`: +```py +import kam.v1.app_configuration as ac +import job +import job.container as c + +helloworld: ac.AppConfiguration { + workload: job.Job { + containers: { + "busybox": c.Container { + # The target image + image: "busybox:1.28" + # Run the following command as defined + command: ["/bin/sh", "-c", "echo hello"] + } + } + # Run every minute. + schedule: "* * * * *" + } +} +``` + +The KCL snippet above schedules a job. Alternatively, if you want a one-time job without cron, simply remove the `schedule` from the configuration. + +You can find the full example in here in the [konfig repo](https://github.com/KusionStack/konfig/tree/main/example/simple-job). + +## Applying + +Re-run steps in [Applying](deploy-application#applying) and schedule the job. Your output might look like one of the following: + +If you are starting from scratch, all resources are created on the spot: + +``` +$ kusion apply + ✔︎ Generating Spec in the Stack dev... +Stack: dev ID Action +* ├─ v1:Namespace:simple-service Create +* └─ batch/v1:CronJob:simple-service:simple-service-dev-helloworld Create + + +? Do you want to apply these diffs? yes +Start applying diffs ... + SUCCESS Create v1:Namespace:simple-service success + SUCCESS Create batch/v1:CronJob:simple-service:helloworld-dev-helloworld success +Create batch/v1:CronJob:simple-service:simple-service-dev-helloworld success [2/2] ██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████ 100% | 0s +Apply complete! Resources: 2 created, 0 updated, 0 deleted. +``` + +If you are starting from the last guide which configures an `opsrule`, the output looks like the following which destroys the `Deployment` and `Service` and replace it with a `CronJob`: + +``` +$ kusion apply + ✔︎ Generating Spec in the Stack dev... +Stack: dev ID Action +* ├─ v1:Namespace:simple-service UnChanged +* ├─ batch/v1:CronJob:simple-service:simple-service-dev-helloworld Create +* ├─ apps/v1:Deployment:simple-service:simple-service-dev-helloworld Delete +* └─ v1:Service:simple-service:simple-service-dev-helloworld-private Delete + + +? Do you want to apply these diffs? yes +Start applying diffs ... + SUCCESS UnChanged v1:Namespace:simple-service, skip + SUCCESS Delete apps/v1:Deployment:simple-service:simple-service-dev-helloworld success + SUCCESS Create batch/v1:CronJob:simple-service:simple-service-dev-helloworld success + SUCCESS Delete v1:Service:simple-service:simple-service-dev-helloworld-private success +Delete v1:Service:simple-service:simple-service-dev-helloworld-private success [4/4] ███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████ 100% | 0s +Apply complete! Resources: 1 created, 0 updated, 2 deleted. +``` + +## Validation + +We can verify the job has now been scheduled: + +```shell +$ kubectl get cronjob -n simple-service +NAME SCHEDULE SUSPEND ACTIVE LAST SCHEDULE AGE +simple-service-dev-helloworld * * * * * False 0 2m18s +``` + +Verify the job has been triggered after the minute mark since we scheduled it to run every minute: +```shell +$ kubectl get job -n simple-service +NAME COMPLETIONS DURATION AGE +simple-service-dev-helloworld-28415748 1/1 5s 11s +``` diff --git a/docs/kusion/5-user-guides/2-working-with-k8s/9-k8s-manifest.md b/docs/kusion/5-user-guides/2-working-with-k8s/9-k8s-manifest.md new file mode 100644 index 00000000..b706c71a --- /dev/null +++ b/docs/kusion/5-user-guides/2-working-with-k8s/9-k8s-manifest.md @@ -0,0 +1,208 @@ +--- +id: k8s-manifest +--- + +# Apply the Raw K8s Manifest YAML + +The guides above provide examples on how to configure workloads and accessories with KCL, and generate the related Kubernetes resources with Kusion Module generators, which is the usage method we recommend, as it can achieve the separation of concerns between developers and platform engineers, reducing the cognitive burden on developers. + +However, in some specific scenario, users may also have the need to directly use Kusion to apply and manage the raw Kubernetes manifest YAML files, such as taking over some existing resources and deploying CRD (CustomResourceDefinition), or other special resources. + +To help users directly apply raw K8s manifests, the KusionStack community has provided the [k8s_manifest](../../6-reference/2-modules/1-developer-schemas/k8s_manifest/k8s_manifest.md) Kusion Module. + +:::info +The module definition and implementation, as well as the example can be found at [here](https://github.com/KusionStack/catalog/tree/main/modules/k8s_manifest). +::: + +## Prerequisites + +Please refer to the [prerequisites](deploy-application#prerequisites) in the guide for deploying an application. + +The example below also requires you to have [initialized the project](deploy-application#initializing) using the `kusion workspace create`, `kusion project create`, `kusion stack create` command, which will create a workspace and project, and also generate a [kcl.mod](deploy-application#kclmod) file under the stack directory. + +## Managing Workspace Configuration + +In the first guide in this series, we introduced a step to [initialize a workspace](deploy-application#initializing-workspace-configuration) with an empty configuration. The same empty configuration will still work in this guide, no changes are required there. Alternatively, if you have updated your workspace config in the previous guides, no changes need to be made either. + +However, if you (or the platform team) would like to set some default paths for the raw K8s manifest YAML files to standardize the behavior of applications in the `dev` workspace, you can do so by updating the `dev.yaml` with the following config block: + +```yaml +modules: + k8s_manifest: + path: oci://ghcr.io/kusionstack/k8s_manifest + version: 0.1.0 + configs: + default: + # The default paths to apply for the raw K8s manifest YAML files. + paths: + - /path/to/k8s_manifest.yaml + - /dir/to/k8s_manifest/ +``` + +Please note that the `paths` decalred by the platform engineers in the workspace configs will be merged with the ones declared by the developers in the `AppConfiguration` in `main.k`. + +The workspace configuration needs to be updated with the command: + +```bash +kusion workspace update dev -f dev.yaml +``` + +## Example + +To apply the specified raw K8s manifest YAML files with `k8s_manifest` module, please use the `v0.2.1` version of `kam`, whose `workload` is no longer a required field in the `AppConfiguration` model. An example is shown below: + +`kcl.mod`: +```py +[dependencies] +kam = { git = "https://github.com/KusionStack/kam.git", tag = "v0.2.1" } +k8s_manifest = { oci = "oci://ghcr.io/kusionstack/k8s_manifest", tag = "0.1.0" } +``` + +`stack.yaml`: +```yaml +# Generate a specified namespace +name: dev +extensions: + - kind: kubernetesNamespace + kubernetesNamespace: + namespace: test +``` + +`main.k`: +```py +import kam.v1.app_configuration as ac +import k8s_manifest + +test: ac.AppConfiguration { + accessories: { + "k8s_manifests": k8s_manifest.K8sManifest { + paths: [ + # The `test.yaml` should be placed under the stack directory, + # as it is declared using a relative path. + "./test.yaml" + ] + } + } +} +``` + +`test.yaml`: +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-deployment + namespace: test + labels: + app: nginx +spec: + replicas: 3 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx:1.14.2 + ports: + - containerPort: 80 +``` + +## Generate and Applying + +Execute the `kusion generate` command, the `Deployment` in the `test.yaml` will be generated into a Kusion `Resource` with a Kusion ID in the `Spec`. + +``` +➜ dev git:(main) ✗ kusion generate + ✔︎ Generating Spec in the Stack dev... +resources: + - id: v1:Namespace:test + type: Kubernetes + attributes: + apiVersion: v1 + kind: Namespace + metadata: + creationTimestamp: null + name: test + spec: {} + status: {} + extensions: + GVK: /v1, Kind=Namespace + - id: apps/v1:Deployment:test:nginx-deployment + type: Kubernetes + attributes: + apiVersion: apps/v1 + kind: Deployment + metadata: + labels: + app: nginx + name: nginx-deployment + namespace: test + spec: + replicas: 3 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - image: nginx:1.14.2 + name: nginx + ports: + - containerPort: 80 + dependsOn: + - v1:Namespace:test +secretStore: null +context: {} +``` + +Execute the `kusion apply` command, you may get the output like the following: + +``` +➜ dev git:(main) ✗ kusion apply + ✔︎ Generating Spec in the Stack dev... +Stack: dev +ID Action +v1:Namespace:test Create +apps/v1:Deployment:test:nginx-deployment Create + + +Do you want to apply these diffs?: + > yes + +Start applying diffs ... + ✔︎ Succeeded v1:Namespace:test + ✔︎ Succeeded apps/v1:Deployment:test:nginx-deployment +Apply complete! Resources: 2 created, 0 updated, 0 deleted. + +[v1:Namespace:test] +Type Kind Name Detail +READY Namespace test Phase: Active +[apps/v1:Deployment:test:nginx-deployment] +Type Kind Name Detail +READY Deployment nginx-deployment Ready: 3/3, Up-to-date: 3, Available: 3 +READY ReplicaSet nginx-deployment-7fb96c846b Desired: 3, Current: 3, Ready: 3 +READY Pod nginx-deployment-7fb96c846b-d9pp4 Ready: 1/1, Status: Running, Restart: 0, Age: 2s +``` + +## Validation + +We can verify the `Deployment` and `Pod` we have just applied: + +```shell +➜ dev git:(main) ✗ kubectl get deployment -n test +NAME READY UP-TO-DATE AVAILABLE AGE +nginx-deployment 3/3 3 3 70s +➜ dev git:(main) ✗ kubectl get pod -n test +NAME READY STATUS RESTARTS AGE +nginx-deployment-7fb96c846b-d9pp4 1/1 Running 0 87s +nginx-deployment-7fb96c846b-j45nt 1/1 Running 0 87s +nginx-deployment-7fb96c846b-tnz5f 1/1 Running 0 87s +``` diff --git a/docs/kusion/5-user-guides/2-working-with-k8s/_category_.json b/docs/kusion/5-user-guides/2-working-with-k8s/_category_.json new file mode 100644 index 00000000..79d3c6c5 --- /dev/null +++ b/docs/kusion/5-user-guides/2-working-with-k8s/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Kubernetes" +} diff --git a/docs/kusion/5-user-guides/3-observability/1-prometheus.md b/docs/kusion/5-user-guides/3-observability/1-prometheus.md new file mode 100644 index 00000000..d67141de --- /dev/null +++ b/docs/kusion/5-user-guides/3-observability/1-prometheus.md @@ -0,0 +1,327 @@ +--- +id: prometheus +--- + +# Configure Monitoring Behavior With Prometheus + +This document provides the step-by-step instruction to set up monitoring for your application. + +As of today, Kusion supports the configuration of Prometheus scraping behaviors for the target application. In the future, we will add more cloud-provider-native solutions, such as AWS CloudWatch, Azure Monitor, etc. + +The user guide below is composed of the following components: + +- Namespace +- Deployment +- Service +- ServiceMonitor + +:::tip + +This guide requires you to have a basic understanding of Kubernetes and Prometheus. +If you are not familiar with the relevant concepts, please refer to the links below: + +- [Learn Kubernetes Basics](https://kubernetes.io/docs/tutorials/kubernetes-basics/) +- [Prometheus Introduction](https://prometheus.io/docs/introduction/overview/) +::: + +## Pre-requisite +Please refer to the [prerequisites](../working-with-k8s/deploy-application#prerequisites) in the guide for deploying an application. + +The example below also requires you to have [initialized the project](../working-with-k8s/deploy-application#initializing) using the `kusion init` command, which will generate a [`kcl.mod` file](../working-with-k8s/deploy-application#kclmod) under the project directory. + +## Setting up your own Prometheus + +There a quite a few ways to set up Prometheus in your cluster: +1. Installing a Prometheus operator +2. Installing a standalone Prometheus server +3. Installing a Prometheus agent and connect to a remote Prometheus server + +[The advice from the Prometheus team](https://github.com/prometheus-operator/prometheus-operator/issues/1547#issuecomment-401092041) is to use the `ServiceMonitor` or `PodMonitor` CRs via the Prometheus operator to manage scrape configs going forward[2]. + +In either case, you only have to do this setup once per cluster. This doc will use a minikube cluster and Prometheus operator as an example. + +### Installing Prometheus operator[3]. +To get the example in this user guide working, all you need is a running Prometheus operator. You can have that installed by running: +``` +LATEST=$(curl -s https://api.github.com/repos/prometheus-operator/prometheus-operator/releases/latest | jq -cr .tag_name) +curl -sL https://github.com/prometheus-operator/prometheus-operator/releases/download/${LATEST}/bundle.yaml | kubectl create -f - +``` + +This will install all the necessary CRDs and the Prometheus operator itself in the default namespace. Wait a few minutes, you can confirm the operator is up by running: +``` +kubectl wait --for=condition=Ready pods -l app.kubernetes.io/name=prometheus-operator -n default +``` + +### Make sure RBAC is properly set up +If you have RBAC enabled on the cluster, the following must be created for Prometheus to work properly: +``` +apiVersion: v1 +kind: ServiceAccount +metadata: + name: prometheus +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: prometheus +rules: +- apiGroups: [""] + resources: + - nodes + - nodes/metrics + - services + - endpoints + - pods + verbs: ["get", "list", "watch"] +- apiGroups: [""] + resources: + - configmaps + verbs: ["get"] +- apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: ["get", "list", "watch"] +- nonResourceURLs: ["/metrics"] + verbs: ["get"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: prometheus +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: prometheus +subjects: +- kind: ServiceAccount + name: prometheus + namespace: default +``` + +### Configure Prometheus instance via the operator +Once all of the above is set up, you can then configure the Prometheus instance via the operator: +``` +apiVersion: monitoring.coreos.com/v1 +kind: Prometheus +metadata: + name: prometheus +spec: + serviceAccountName: prometheus + serviceMonitorNamespaceSelector: {} + serviceMonitorSelector: {} + podMonitorNamespaceSelector: {} + podMonitorSelector: {} + resources: + requests: + memory: 400Mi +``` +This Prometheus instance above will be cluster-wide, picking up ALL the service monitors and pod monitors across ALL the namespaces. +You can adjust the requests and limits accordingly if you have a larger cluster. + +### Exposing the Prometheus portal (optional) +Once you have the managed Prometheus instance created via the Prometheus CR above, you should be able to see a service created called `prometheus-operated`: + +![prometheus-operated](/img/docs/user_docs/guides/prometheus/prometheus-operated.png) + +If you are also running on minikube, you can expose it onto your localhost via kubectl: +``` +kubectl port-forward svc/prometheus-operated 9099:9090 +``` + +You should then be able to see the Prometheus portal via `localhost:9099` in your browser: + +![prometheus-portal](/img/docs/user_docs/guides/prometheus/prometheus-portal.png) + +If you are running a non-local cluster, you can try to expose it via another way, through an ingress controller for example. + +## Setting up workspace configs + +Since v0.10.0, we have introduced the concept of [workspaces](../../3-concepts/4-workspace.md), whose configurations represent the part of the application behaviors that platform teams are interested in standardizing, or the ones to eliminate from developer's mind to make their lives easier. + +In the case of setting up Prometheus, there are a few things to set up on the workspace level: + +### Operator mode + +The `operatorMode` flag indicates to Kusion whether the Prometheus instance installed in the cluster runs as a Kubernetes operator or not. This determines the different kinds of resources Kusion manages. + +To see more about different ways to run Prometheus in the Kubernetes cluster, please refer to the [design documentation](https://github.com/KusionStack/kusion/blob/main/docs/prometheus.md#prometheus-installation). + +Most cloud vendors provide an out-of-the-box monitoring solutions for workloads running in a managed-Kubernetes cluster (EKS, AKS, etc), such as AWS CloudWatch, Azure Monitor, etc. These solutions mostly involve installing an agent (CloudWatch Agent, OMS Agent, etc) in the cluster and collecting the metrics to a centralized monitoring server. In those cases, you don't need to set `operatorMode` to `True`. It only needs to be set to `True` when you have an installation of the [Prometheus operator](https://github.com/prometheus-operator/prometheus-operator) running inside the Kubernetes cluster. + +:::info + +For differences between [Prometheus operator](https://github.com/prometheus-operator/prometheus-operator), [kube-prometheus](https://github.com/prometheus-operator/kube-prometheus) and the [community kube-prometheus-stack helm chart](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack), the details are documented [here](https://github.com/prometheus-operator/prometheus-operator#prometheus-operator-vs-kube-prometheus-vs-community-helm-chart). +::: + +### Monitor types + +The `monitorType` flag indicates the kind of monitor Kusion will create. It only applies when `operatorMode` is set to `True`. As of version 0.10.0, Kusion provides options to scrape metrics from either the application pods or its corresponding Kubernetes services. This determines the different kinds of resources Kusion manages when Prometheus runs as an operator in the target cluster. + +A sample `workspace.yaml` with Prometheus settings: +```yaml +modules: + ... + kusionstack/monitoring@0.1.0: + default: + operatorMode: True + monitorType: Service + scheme: http + interval: 30s + timeout: 15s +... +``` + +To instruct Prometheus to scrape from pod targets instead: +```yaml +modules: + ... + kusionstack/monitoring@0.1.0: + default: + operatorMode: True + monitorType: Pod + scheme: http + interval: 30s + timeout: 15s +... +``` + +If the `operatorMode` is omitted from the `workspace.yaml`, Kusion defaults `operatorMode` to false. + +### Overriding with projectSelector + +Workspace configurations contain a set of default setting group for all projects in the workspace, with means to override them by Projects using a `projectSelector` keyword. + +Projects with the name matching those in projectSelector will use the values defined in that override group instead of the default. If a key is not present in the override group, the default value will be used. + +Take a look at the sample `workspace.yaml`: +```yaml +modules: + ... + kusionstack/monitoring@0.1.0: + default: + operatorMode: True + monitorType: Pod + scheme: http + interval: 30s + timeout: 15s + low_frequency: + operatorMode: False + interval: 2m + projectSelector: + - foobar + high_frequency: + monitorType: Service + projectSelector: + - helloworld +... +``` + +In the example above, a project with the name `helloworld` will have the monitoring settings where `operatorMode` is set to `False`, a 2 minute scraping interval, 15 seconds timeout (coming from default) and http scheme (coming from default). + +You cannot have the same project appear in two projectSelectors. + +For a full reference of what can be configured in the workspace level, please see the [workspace reference](../../reference/modules/workspace-configs/monitoring/prometheus). + +## Updating the workspace config + +Assuming you now have a `workspace.yaml` that looks like the following: +```yaml +modules: + kusionstack/monitoring@0.1.0: + default: + operatorMode: True + monitorType: Service + scheme: http + interval: 30s + timeout: 15s +... +``` + +Update the workspace configuration by running the following command: +``` +kusion workspace update dev -f workspace.yaml +``` +Verify the workspace config is properly updated by running the command: +``` +kusion workspace show dev +``` + +## Using kusion to deploy your application with monitoring requirements + +At this point we are set up for good! Any new applications you deploy via kusion will now automatically have the monitoring-related resources created, should you declare you want it via the `monitoring` field in the `AppConfiguration` model. + +The monitoring in an AppConfiguration is declared in the `monitoring` field. See the example below for a full, deployable AppConfiguration. + +Please note we are using a new image `quay.io/brancz/prometheus-example-app` since the app itself need to expose metrics for Prometheus to scrape: + +`helloworld/dev/kcl.mod`: +``` +[package] +name = "helloworld" + +[dependencies] +monitoring = { oci = "oci://ghcr.io/kusionstack/monitoring", tag = "0.2.0" } +kam = { git = "https://github.com/KusionStack/kam.git", tag = "0.2.0" } +service = { oci = "oci://ghcr.io/kusionstack/service", tag = "0.1.0" } + +[profile] +entries = ["main.k"] +``` + +`helloworld/dev/main.k`: +``` +import kam.v1.app_configuration as ac +import service +import service.container as c +import monitoring as m +import network.network as n + +helloworld: ac.AppConfiguration { + workload: service.Service { + containers: { + "monitoring-sample-app": c.Container { + image: "quay.io/brancz/prometheus-example-app:v0.3.0" + } + } + } + # Add the monitoring configuration backed by Prometheus + accessories: { + "monitoring": m.Prometheus { + path: "/metrics" + } + "network": n.Network { + ports: [ + n.Port { + port: 8080 + } + ] + } + } +} +``` + +The KCL file above represents an application with a service type workload, exposing the port 8080, and would like Prometheus to scrape the `/metrics` endpoint every 2 minutes. + +Running `kusion apply` would show that kusion will create a `Namespace`, a `Deployment`, a `Service` and a `ServiceMonitor`: +![kusion-apply-with-monitor](/img/docs/user_docs/guides/prometheus/kusion-apply-with-monitor.png) + +Continue applying all resources: +![kusion-apply-success](/img/docs/user_docs/guides/prometheus/kusion-apply-success.png) + +If we want to, we can verify the service monitor has been created successfully: +![service-monitor](/img/docs/user_docs/guides/prometheus/service-monitor.png) + +In a few seconds, you should be able to see in the Prometheus portal that the service we just deployed has now been discovered and monitored by Prometheus: +![prometheus-targets](/img/docs/user_docs/guides/prometheus/prometheus-targets.png) + +You can run a few simply queries for the data that Prometheus scraped from your application: +![prometheus-simple-query](/img/docs/user_docs/guides/prometheus/prometheus-simple-query.png) + +For more info about PromQL, you can find them [here](https://prometheus.io/docs/prometheus/latest/querying/basics/)[4]. + +## References +1. Prometheus: https://prometheus.io/docs/introduction/overview/ +2. Prometheus team advise: https://github.com/prometheus-operator/prometheus-operator/issues/1547#issuecomment-446691500 +3. Prometheus operator getting started doc: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/user-guides/getting-started.md +4. PromQL basics: https://prometheus.io/docs/prometheus/latest/querying/basics/ \ No newline at end of file diff --git a/docs/kusion/5-user-guides/3-observability/2-resource-graph.md b/docs/kusion/5-user-guides/3-observability/2-resource-graph.md new file mode 100644 index 00000000..1d169d6c --- /dev/null +++ b/docs/kusion/5-user-guides/3-observability/2-resource-graph.md @@ -0,0 +1,88 @@ +--- +id: resource-graph +--- + +# Resource Graph + +Kusion provides a powerful feature to visualize the relationships and dependencies between kusion `resources` using a resource graph. This feature offers several key benefits: + +- Comprehensive Visualization: The resource graph offers a clear, visual representation of your entire infrastructure, allowing you to see all resources and their interconnections at a glance. It includes detailed information about each cloud resource, such as its type, name, and unique identifiers, making it easier to locate and manage resources in your cloud environment. + +- Dependency Tracking: It helps you understand how resources are linked, making it easier to identify potential impacts when making changes to your infrastructure. + +- Troubleshooting Aid: When issues arise during the `apply` operation, the resource graph becomes an invaluable tool for pinpointing the source of problems. It provides a clear visual representation of resource relationships and their current status. This comprehensive view significantly reduces debugging time and enhances your ability to maintain a stable and efficient infrastructure. + +- Visual Documentation: The resource graph provides a clear, up-to-date visual representation of your infrastructure. It automatically updates as changes occur,providing a clear and current representation of your resource +landscape. It improves team understanding and communication about the infrastructure setup. + +This feature empowers you to gain a comprehensive and intuitive understanding of your infrastructure's architecture, enabling more efficient troubleshooting and decision-making. + +## Prerequisites + +Please refer to the [Deliver the WordPress Application with Cloud RDS](../1-cloud-resources/1-database.md) in the guide for deploying an application. + +This guide will assume that you have already deployed an application following the guide. + +## Display Resource Graph + +To display a resource graph, you need to run the following command with project name specified: + +```bash +kusion resource graph --project wordpress-rds-cloud +``` + +The output will be a resource graph in the terminal: + +```shell +Displaying resource graph in the project wordpress-rds-cloud... + +Workspace: demo + +Workload Resources: +ID Kind Name CloudResourceID Status +apps/v1:Deployment:wordpress-rds-cloud:wordpress-rds-cl Kubernetes:apps/v1:Deployment wordpress-rds-cloud/wordpress- Apply succeeded | Reconciled +oud-dev-wordpress rds-cloud-dev-wordpress + +Dependency Resources: +ID Kind Name CloudResourceID Status +v1:Secret:wordpress-rds-cloud:wordpress-mysql-mysql Kubernetes:v1:Secret wordpress-rds-cloud/wordpress- Apply succeeded | Reconciled + mysql-mysql +v1:Service:wordpress-rds-cloud:wordpress-rds-cloud-dev- Kubernetes:v1:Service wordpress-rds-cloud/wordpress- Apply succeeded | Reconciled +wordpress-private rds-cloud-dev-wordpress-privat + e +v1:Namespace:wordpress-rds-cloud Kubernetes:v1:Namespace wordpress-rds-cloud Apply succeeded | Reconciled + +Other Resources: +ID Kind Name CloudResourceID Status +aliyun:alicloud:alicloud_db_connection:wordpress-mysql alicloud:alicloud_db_connectio wordpress-mysql rm-2zer0f93xy490fdzq:rm-2zer0f Apply succeeded | Reconciled + n 93xy490fdzqtf +aliyun:alicloud:alicloud_db_instance:wordpress-mysql alicloud:alicloud_db_instance wordpress-mysql rm-2zer0f93xy490fdzq Apply succeeded | Reconciled +aliyun:alicloud:alicloud_rds_account:wordpress-mysql alicloud:alicloud_rds_account wordpress-mysql rm-2zer0f93xy490fdzq:root Apply succeeded | Reconciled +hashicorp:random:random_password:wordpress-mysql-mysql custom:random_password Apply succeeded +``` + +The resource graph output provides a comprehensive overview of the resources in your project. Let's break down each field: + +- ID: This is a unique identifier for each resource. + +- Kind: This field indicates the type of resource. + +- Name: This is the name of the resource within its namespace or scope. + +- CloudResourceID: For cloud resources, this field shows the unique identifier assigned by the cloud provider. For Kubernetes resources, this field is often empty. + +- Status: This field shows the current state of the resource. Common statuses include: + - "Apply succeeded | Reconciled": The resource has been successfully created and is in sync with the desired state. + - "Apply succeeded | Reconcile failed": The resource has been successfully created, but the resource is not in sync with the desired state. + - "Apply succeeded": The `apply` operation has completed, but the resource might not in sync with the desired state. + - "Apply failed": The `apply` operation has failed. + +The graph is divided into three sections: + +1. Workload Resources: These are the main application components, such as Kubernetes Deployments. + +2. Dependency Resources: These are resources that the workload depends on, such as Kubernetes Secrets, Services, and Namespaces. + +3. Other Resources: This section includes additional resources, often cloud provider-specific, such as database instances and connections. + +This graph gives you a clear view of all the resources in your project, their types, names, cloud identifiers (if applicable), and current status. It's particularly useful for understanding the structure of your application and its dependencies, as well as for troubleshooting and ensuring all resources are in the expected state. diff --git a/docs/kusion/5-user-guides/3-observability/_category_.json b/docs/kusion/5-user-guides/3-observability/_category_.json new file mode 100644 index 00000000..b061ae3e --- /dev/null +++ b/docs/kusion/5-user-guides/3-observability/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Automated Observability" +} diff --git a/docs/kusion/5-user-guides/4-secrets-management/1-using-cloud-secrets.md b/docs/kusion/5-user-guides/4-secrets-management/1-using-cloud-secrets.md new file mode 100644 index 00000000..2eb46f74 --- /dev/null +++ b/docs/kusion/5-user-guides/4-secrets-management/1-using-cloud-secrets.md @@ -0,0 +1,101 @@ +--- +id: using-cloud-secrets +--- + +# Using Cloud Secrets Manager + +Applications usually store sensitive data in secrets by using centralized secrets management solutions. For example, you authenticate databases, services, and external systems with passwords, API keys, tokens, and other credentials stored in a secret store, e.g. Hashicorp Vault, AWS Secrets Manager, Azure Key Vault, etc + +Kusion provides out-of-the-box support to reference existing external secrets management solution, this tutorial introduces that how to pull the secret from AWS Secrets Manager to make it available to applications. + +## Prerequisites + +Please refer to the [prerequisites](../working-with-k8s/deploy-application#prerequisites) in the guide for deploying an application. + +The example below also requires you to have [initialized the project](../working-with-k8s/deploy-application#initializing) using the `kusion init` command, which will generate a [`kcl.mod` file](../working-with-k8s/deploy-application#kclmod) under the project directory. + +Additionally, you also need to configure the obtained AccessKey and SecretKey as environment variables: + +```bash +export AWS_ACCESS_KEY_ID="AKIAQZDxxxx" # replace it with your AccessKey +export AWS_SECRET_ACCESS_KEY="oE/xxxx" # replace it with your SecretKey +``` + +![aws iam account](/img/docs/user_docs/getting-started/aws-iam-account.png) + +## Setting up workspace + +Since v0.10.0, we have introduced the concept of [workspaces](../../3-concepts/4-workspace.md), whose configurations represent the part of the application behaviors that platform teams are interested in standardizing, or the ones to eliminate from developer's mind to make their lives easier. + +In the case of setting up cloud secrets manager, platform teams need to specify which secrets management solution to use and necessary information to access on the workspace level. + +A sample `workspace.yaml` with AWS Secrets Manager settings: + +``` +modules: + ... +secretStore: + provider: + aws: + region: us-east-1 + profile: The optional profile to be used to interact with AWS Secrets Manager. +... +``` + +:::note +The `provider` of the `secretStore` now supports `aws`, `alicloud` and `viettelcloud`. +::: + +## Update AppConfiguration + +At this point we are set up for good! Now you can declare external type of secrets via the `secrets` field in the `AppConfiguration` model to consume sensitive data stored in AWS Secrets Manager. + +See the example below for a full, deployable AppConfiguration. + +``` +import kam.v1.app_configuration as ac +import service +import service.container as c +import service.secret as sec + +gitsync: ac.AppConfiguration { + workload: service.Service { + containers: { + "syncer": c.Container { + image: "dyrnq/git-sync" + # Run the following command as defined + command: [ + "--repo=https://github.com/KusionStack/kusion" + "--ref=HEAD" + "--root=/mnt/git" + ] + # Consume secrets in environment variables + env: { + "GIT_SYNC_USERNAME": "secret://git-auth/username" + "GIT_SYNC_PASSWORD": "secret://git-auth/password" + } + } + } + # Secrets used to retrieve secret data from AWS Secrets Manager + secrets: { + "git-auth": sec.Secret { + type: "external" + data: { + "username": "ref://git-auth-info/username" + "password": "ref://git-auth-info/password" + } + } + } + } +} +``` + +## Apply and Verify + +Run `kusion apply` command to deploy above application, then use the below command to verify if the secret got deployed: + +``` +kubectl get secret -n secretdemo +``` + +You will find `git-auth` of type Opaque automatically created and contains sensitive information pulled from AWS Secrets Manager. \ No newline at end of file diff --git a/docs/kusion/5-user-guides/4-secrets-management/_category_.json b/docs/kusion/5-user-guides/4-secrets-management/_category_.json new file mode 100644 index 00000000..8990c11b --- /dev/null +++ b/docs/kusion/5-user-guides/4-secrets-management/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Secrets Management" +} diff --git a/docs/kusion/5-user-guides/5-production-practice-case/1-production-practice-case.md b/docs/kusion/5-user-guides/5-production-practice-case/1-production-practice-case.md new file mode 100644 index 00000000..52d5f07e --- /dev/null +++ b/docs/kusion/5-user-guides/5-production-practice-case/1-production-practice-case.md @@ -0,0 +1,190 @@ +--- +id: collaborate-with-github-actions +--- + +# Achieving Team Collaboration in Production Practice with GitHub Actions + +In this article, we will introduce how to use Kusion CLI in combination with GitHub Actions to achieve team collaboration in production practice. + +Adopting the concept of separation of concerns, we divide the staff involved in application delivery and operation into two groups: **Platform Engineers (PEs)** and **Developers (Devs)**. As the builders of the Internal Developer Platform (IDP), platform engineers are primarily responsible for creating the [storage backend](../../3-concepts/7-backend.md) for the Kusion CLI in team collaborative scenarios (e.g. AWS S3 or Alicloud OSS), developing custom reusable [Kusion modules](../../3-concepts/3-module/1-overview.md), and creating and maintaining standardized platform configurations in [workspace](../../3-concepts/4-workspace.md). While application developers can focus on writing the application business logic and the configuration codes, self-serving the application delivery and operation by triggering the automated CI/CD pipelines. [GitHub Actions](https://github.com/features/actions) is such a CI/CD platform, and by customizing [GitHub Actions workflow](https://docs.github.com/en/actions/using-workflows), the pipeline such as building, testing, and deploying will be executed automatically. + +In the following sections, we will demonstrate the specific workflow from the perspectives of both PEs and Devs with the sample workflows from our [konfg](https://github.com/KusionStack/konfig) and [catalog](https://github.com/KusionStack/catalog) repository. + +## Perspective of PE + +### Setup Kusion Storage Backend + +In order to enable multiple people to collaboratively edit and modify application configuration code within a team, PEs need to create a centralized remote storage backend for Kusion CLI, such as [AWS S3](https://aws.amazon.com/pm/serv-s3/) or [Alicloud OSS](https://www.alibabacloud.com/en/product/object-storage-service). Below is an example OSS bucket, we can see that it is mainly used to store application **releases** and **workspace** configurations. + +![alicloud oss bucket for storage backend](/img/docs/user_docs/guides/github-actions/alicloud_oss_storage_backend.png) + +Suppose PEs have set up the Alicloud OSS storage backend and get the AK/SK with the permission to read and write the bucket, they can use the following commands to set up the remote storage backend. + +```shell +# please replace the env with actual AK/SK +export OSS_ACCESS_KEY_ID=LTAxxxxxxxxxxxxxx +export OSS_ACCESS_KEY_SECRET=uUPxxxxxxxxxx + +# set up backend +kusion config set backends.oss_test '{"type":"oss","configs":{"bucket":"kusion-test","endpoint":"oss-cn-shanghai.aliyuncs.com"}}' +kusion config set backends.current oss_test +``` + +### Develop Customized Kusion Modules + +In the production practice of an enterprise, a common scenario is that PEs need to abstract and encapsulate the on-premises infrastructural computing, storage and networking resources to reduce the cognitive burden of the developers. And they can develop customized Kusion modules, a kind of reusable building blocks to achieve this goal. Below shows an example [GitHub Actions workflow](https://github.com/KusionStack/catalog/actions/runs/9398478367/job/25883893076) for pushing the module artifacts provided by KusionStack Official with multiple os/arch to [GitHub Packages](https://github.com/features/packages). + +![upload kusion modules through github actions](/img/docs/user_docs/guides/github-actions/upload_modules.png) + +### Create and Update Workspace + +Moreover, PEs also need to create and update the workspace configurations, where they can declare the Kusion modules available in the workspace, and add some standardized default or application-specific configurations across the entire scope of the workspace. + +Suppose PEs have set up the remote storage backend, they can use the following commands to create and update workspace. + +```shell +# create workspace with the name of 'dev' +kusion workspace create dev + +# update workspace with 'dev.yaml' +kusion workspace update dev -f dev.yaml + +# switch to the 'dev' workspace +kusion workspace switch dev +``` + +```yaml +# dev.yaml declares 'mysql' and 'network' modules in the workspace +modules: + mysql: + path: oci://ghcr.io/kusionstack/mysql + version: 0.2.0 + network: + path: oci://ghcr.io/kusionstack/network + version: 0.2.0 +``` + +So far, PE has almost completed the fundamental work for setting up the IDP. + +## Perspective of Dev + +### Setup Kusion Storage Backend + +In order to get the available modules of the workspace and validate the generated [spec](../../3-concepts/6-spec.md), developers need to communicate with PEs to obtain the AK/SK (usually with **Read-Only** permission), bucket name, and the endpoint to access the remote storage backend. And similar to the PEs, developers can set up the backend configs with the following commands. + +```shell +# please replace the env with actual AK/SK +export OSS_ACCESS_KEY_ID=LTAxxxxxxxxxxxxxx +export OSS_ACCESS_KEY_SECRET=uUPxxxxxxxxxx + +# set up backend +kusion config set backends.oss_test '{"type":"oss","configs":{"bucket":"kusion-test","endpoint":"oss-cn-shanghai.aliyuncs.com"}}' +kusion config set backends.current oss_test +``` + +### Create and Update Project and Stack + +Next, developers can create and update the [Project](../../3-concepts/1-project/1-overview.md) and [Stack](../../3-concepts/2-stack/1-overview.md) configurations with `kusion project` and `kusion stack` command. + +```shell +# create a new project named quickstart +mkdir quickstart && cd quickstart +kusion project create + +# create a stack named dev +kusion stack create dev +``` + +Below shows the initiated project and stack contents. + +```yaml +# quickstart/project.yaml +name: quickstart +``` + +```yaml +# quickstart/dev/stack.yaml +# The metadata information of the stack. +name: dev +``` + +```python +# kcl.mod +# Please add the modules you need in 'dependencies'. +[dependencies] +kam = { git = "https://github.com/KusionStack/kam.git", tag = "0.2.0" } +service = {oci = "oci://ghcr.io/kusionstack/service", tag = "0.1.0" } +``` + +```python +# main.k +# The configuration codes in perspective of developers. +import kam.v1.app_configuration as ac +import service +import service.container as c + +# Please replace the ${APPLICATION_NAME} with the name of your application, and complete the +# 'AppConfiguration' instance with your own workload and accessories. +${APPLICATION_NAME}: ac.AppConfiguration { + workload: service.Service { + containers: { + + } + } + accessories: { + + } +} +``` + +Developers can use `kusion mod list` to get the available modules in current workspace and use `kusion mod add` to add a specified module to current stack. + +```shell +# list the available modules in the current workspace +➜ kusion mod list +Name Version URL +mysql 0.2.0 oci://ghcr.io/kusionstack/mysql +network 0.2.0 oci://ghcr.io/kusionstack/network +``` + +```shell +# add the specified modules to the current stack +kusion mod add mysql && kusion mod add network +``` + +The corresponding module artifacts will be downloaded and the declaration of the modules will be added to `kcl.mod`, which can be compared to `go mod tidy` and `go.mod`. + +```python +# kcl.mod after executing 'kusion mod add' +[package] + +[dependencies] +kam = { git = "https://github.com/KusionStack/kam.git", tag = "0.2.0" } +service = { oci = "oci://ghcr.io/kusionstack/service", tag = "0.1.0" } +mysql = { oci = "oci://ghcr.io/kusionstack/mysql", tag = "0.2.0" } +network = { oci = "oci://ghcr.io/kusionstack/network", tag = "0.2.0" } +``` + +After this, developers can edit the application configuration codes according to the actual needs. + +### Trigger Preview and Apply Pipeline + +[KusionStack/konfig](https://github.com/KusionStack/konfig) is the official example repository, and provides a set of GitHub Actions workflows [preview.yml](https://github.com/KusionStack/konfig/blob/main/.github/workflows/preview.yml) and [apply.yml](https://github.com/KusionStack/konfig/blob/main/.github/workflows/apply.yml). The `preview.yml` is triggered by a pull request to the main branch, while `apply.yml` is triggered by a push to the main branch. + +![preview workflow](/img/docs/user_docs/guides/github-actions/github_actions_preview.png) + +![apply workflow](/img/docs/user_docs/guides/github-actions/github_actions_apply.png) + +The previewing workflow will first get the changed projects and stacks. + +![get changed projects and stacks](/img/docs/user_docs/guides/github-actions/github_actions_get_changed_projects_stacks.png) + +Then the previewing workflow will execute the `kusion preview` command to all of the changed stacks, and open an issue for manual approval to merge the changes after the approvers check the preview result artifact. + +![preview workflow details](/img/docs/user_docs/guides/github-actions/github_actions_preview_details.png) + +![mannual approval](/img/docs/user_docs/guides/github-actions/github_actions_mannual_approval.png) + +Once the code review is completed and the pull request is merged into the main branch, it will trigger the apply workflow, which will deploy the changes to the affected Projects and Stacks, and upload the respective results to the GitHub Actions Artifacts. + +![apply workflow details](/img/docs/user_docs/guides/github-actions/github_actions_apply_details.png) \ No newline at end of file diff --git a/docs/kusion/5-user-guides/5-production-practice-case/_category_.json b/docs/kusion/5-user-guides/5-production-practice-case/_category_.json new file mode 100644 index 00000000..2b76a644 --- /dev/null +++ b/docs/kusion/5-user-guides/5-production-practice-case/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Production Practice Case" +} diff --git a/docs/kusion/5-user-guides/6-llm-ops/1-inference.md b/docs/kusion/5-user-guides/6-llm-ops/1-inference.md new file mode 100644 index 00000000..a2851189 --- /dev/null +++ b/docs/kusion/5-user-guides/6-llm-ops/1-inference.md @@ -0,0 +1,409 @@ +--- +id: inference +--- + +# Provide LLM Service with Inference Module for AI Application + +In the wave of Artificial Intelligence (AI), Large Language Models (LLMs) are gradually becoming a key factor in driving innovation and productivity. As a result, researchers and developers are looking for a more efficient way to deploy and manage complex LLM models and AI applications. + +To simplify the process from model construction, deployment and interaction with applications, the KusionStack community has provided an `inference` module. We will explore in detail how to deploy an AI application using LLM service provided by this module in this article. + +:::info +The module definition and implementation, as well as the example application we are about to show can be found [here](https://github.com/KusionStack/catalog/tree/main/modules/inference). +::: + +## Prerequisites + +Before we begin, we need to perform the following steps to set up the environment required by Kusion: + +- Install Kusion +- Running Kubernetes cluster + +For more details, please refer to the [prerequisites](https://www.kusionstack.io/docs/user-guides/working-with-k8s/deploy-application#prerequisites) in the guide for deploying an application with Kusion. + +## Initializing and Managing Workspace Configuration + +For information on how to initialize and switch a workspace with `kusion workspace create` and `kusion workspace switch`, please refer to [this document](https://www.kusionstack.io/docs/user-guides/working-with-k8s/deploy-application#initializing-workspace-configuration). + +For the current version of the `inference` module, an empty configuration for workspace initialization is enough, and users may need to configure the `network` module as an accessory to provide the network service for the AI application, whose workload is described with `service` module. Users can also add other modules' platform configurations in the workspace according to their need. + +An example is shown below: + +```yaml +modules: + service: + path: oci://ghcr.io/kusionstack/service + version: 0.2.0 + configs: + default: {} + network: + path: oci://ghcr.io/kusionstack/network + version: 0.2.0 + configs: + default: {} + inference: + path: oci://ghcr.io/kusionstack/inference + version: 0.1.0-beta.4 + configs: + default: {} +``` + +## Example + +After creating and switching to the workspace shown above, we can initialize the example `Project` and `Stack` with `kusion project create` and `kusion stack create`. Please refer to [this document](https://www.kusionstack.io/docs/user-guides/working-with-k8s/deploy-application#initializing-application-configuration) for more details. + +The directory structure, and configuration file contents of the example project is shown below: + +```shell +example/ +. +├── default +│ ├── kcl.mod +│ ├── main.k +│ └── stack.yaml +└── project.yaml +``` + +`project.yaml`: + +```yaml +name: example +``` + +`stack.yaml`: + +```yaml +name: default +``` + +`kcl.mod`: + +```yaml +[dependencies] +kam = { git = "https://github.com/KusionStack/kam.git", tag = "0.2.0" } +service = {oci = "oci://ghcr.io/kusionstack/service", tag = "0.1.0" } +network = { oci = "oci://ghcr.io/kusionstack/network", tag = "0.2.0" } +inference = { oci = "oci://ghcr.io/kusionstack/inference", tag = "0.1.0-beta.4" } +``` + +`main.k`: + +```python +import kam.v1.app_configuration as ac +import service +import service.container as c +import network as n +import inference.v1.inference + +inference: ac.AppConfiguration { + # Declare the workload configurations. + workload: service.Service { + containers: { + myct: c.Container {image: "kangy126/app"} + } + replicas: 1 + } + # Declare the inference module configurations. + accessories: { + "inference": inference.Inference { + model: "llama3" + framework: "Ollama" + } + "network": n.Network {ports: [n.Port { + port: 80 + targetPort: 5000 + }]} + } +} +``` + +In the above example, we configure the `model` and `framework` item of the `inference` module, which are two required configuration items for this module. The inference service of different models with different inference frameworks could be quickly built up by changing these two configuration items. + +As for how the AI application use the LLM service provided by the `inference` module, an environment variable named `INFERENCE_URL` will be injected by the module and the application can call the LLM service with the address. + +Which model used in the application is transparent, and you only need to provide the `prompt` parameter to the request address. Of course, you can directly modify the model and other configuration items in the `main.k` file and update the deployment resources by `kusion apply`. + +There are also some optional configuration items in the `inference` module for adjusting the LLM service, whose details can be found [here](../../6-reference/2-modules/1-developer-schemas/inference/inference.md). + +## Deployment + +Now we can generate and deploy the `Spec` containing all the relevant resources the AI application needs with Kusion. + +First, we should navigate to the folder `example/default` and execute the `kusion generate` command, and a `Spec` will be generated. + +``` +➜ default git:(main) ✗ kusion generate + ✔︎ Generating Spec in the Stack default... +resources: + - id: v1:Namespace:example + type: Kubernetes + attributes: + apiVersion: v1 + kind: Namespace + metadata: + creationTimestamp: null + name: example + spec: {} + status: {} + extensions: + GVK: /v1, Kind=Namespace + - id: apps/v1:Deployment:example:example-default-inference + type: Kubernetes + attributes: + apiVersion: apps/v1 + kind: Deployment + metadata: + creationTimestamp: null + labels: + app.kubernetes.io/name: inference + app.kubernetes.io/part-of: example + name: example-default-inference + namespace: example + spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: inference + app.kubernetes.io/part-of: example + strategy: {} + template: + metadata: + creationTimestamp: null + labels: + app.kubernetes.io/name: inference + app.kubernetes.io/part-of: example + spec: + containers: + - env: + - name: INFERENCE_URL + value: ollama-infer-service + image: kangy126/app + name: myct + resources: {} + status: {} + dependsOn: + - v1:Namespace:example + - v1:Service:example:ollama-infer-service + - v1:Service:example:example-default-inference-private + extensions: + GVK: apps/v1, Kind=Deployment + kusion.io/is-workload: true + - id: apps/v1:Deployment:example:ollama-infer-deployment + type: Kubernetes + attributes: + apiVersion: apps/v1 + kind: Deployment + metadata: + creationTimestamp: null + name: ollama-infer-deployment + namespace: example + spec: + selector: + matchLabels: + accessory: ollama + strategy: {} + template: + metadata: + creationTimestamp: null + labels: + accessory: ollama + spec: + containers: + - command: + - /bin/sh + - -c + - |- + echo 'FROM llama3 + PARAMETER top_k 40 + PARAMETER top_p 0.900000 + PARAMETER temperature 0.800000 + PARAMETER num_predict 128 + PARAMETER num_ctx 2048 + ' > Modelfile && ollama serve & OLLAMA_SERVE_PID=$! && sleep 5 && ollama create llama3 -f Modelfile && wait $OLLAMA_SERVE_PID + image: ollama/ollama + name: ollama-infer-container + ports: + - containerPort: 11434 + name: ollama-port + resources: {} + volumeMounts: + - mountPath: /root/.ollama + name: ollama-infer-storage + volumes: + - emptyDir: {} + name: ollama-infer-storage + status: {} + dependsOn: + - v1:Namespace:example + - v1:Service:example:ollama-infer-service + - v1:Service:example:example-default-inference-private + extensions: + GVK: apps/v1, Kind=Deployment + - id: v1:Service:example:ollama-infer-service + type: Kubernetes + attributes: + apiVersion: v1 + kind: Service + metadata: + creationTimestamp: null + labels: + accessory: ollama + name: ollama-infer-service + namespace: example + spec: + ports: + - port: 80 + targetPort: 11434 + selector: + accessory: ollama + type: ClusterIP + status: + loadBalancer: {} + dependsOn: + - v1:Namespace:example + extensions: + GVK: /v1, Kind=Service + - id: v1:Service:example:example-default-inference-private + type: Kubernetes + attributes: + apiVersion: v1 + kind: Service + metadata: + creationTimestamp: null + labels: + app.kubernetes.io/name: inference + app.kubernetes.io/part-of: example + name: example-default-inference-private + namespace: example + spec: + ports: + - name: example-default-inference-private-80-tcp + port: 80 + protocol: TCP + targetPort: 5000 + selector: + app.kubernetes.io/name: inference + app.kubernetes.io/part-of: example + type: ClusterIP + status: + loadBalancer: {} + dependsOn: + - v1:Namespace:example + extensions: + GVK: /v1, Kind=Service +secretStore: null +context: {} +``` + +Next, we can execute the `kusion preview` command and review the resource three-way diffs for a more secure deployment. + +``` +➜ default git:(main) ✗ kusion preview + ✔︎ Generating Spec in the Stack default... +Stack: default +ID Action +v1:Namespace:example Create +v1:Service:example:ollama-infer-service Create +v1:Service:example:example-default-inference-private Create +apps/v1:Deployment:example:example-default-inference Create +apps/v1:Deployment:example:ollama-infer-deployment Create + + +Which diff detail do you want to see?: +> all + v1:Namespace:example Create + v1:Service:example:ollama-infer-service Create + v1:Service:example:example-default-inference-private Create + apps/v1:Deployment:example:example-default-inference Create +``` + +Finally, execute the `kusion apply` command to deploy the related Kubernetes resources. + +``` +➜ default git:(main) ✗ kusion apply + ✔︎ Generating Spec in the Stack default... +Stack: default +ID Action +v1:Namespace:example Create +v1:Service:example:ollama-infer-service Create +v1:Service:example:example-default-inference-private Create +apps/v1:Deployment:example:ollama-infer-deployment Create +apps/v1:Deployment:example:example-default-inference Create + + +Do you want to apply these diffs?: + > yes + +Start applying diffs ... + ✔︎ Succeeded v1:Namespace:example + ✔︎ Succeeded v1:Service:example:ollama-infer-service + ✔︎ Succeeded v1:Service:example:example-default-inference-private + ✔︎ Succeeded apps/v1:Deployment:example:ollama-infer-deployment + ✔︎ Succeeded apps/v1:Deployment:example:example-default-inference +Apply complete! Resources: 5 created, 0 updated, 0 deleted. + +``` + +## Testing + +Execute the `kubectl get all -n example` command, and the deployed Kubernetes resources will be shown. + +``` +➜ ~ kubectl get all -n example +NAME READY STATUS RESTARTS AGE +pod/example-dev-inference-5cf6c74574-7w92f 1/1 Running 0 2d6h +pod/mynginx 1/1 Running 0 2d6h +pod/ollama-infer-deployment-7c56845496-s5snb 1/1 Running 0 2d6h + +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +service/example-dev-inference-public ClusterIP 192.168.116.121 80:32693/TCP 2d6h +service/ollama-infer-service ClusterIP 192.168.28.208 80/TCP 2d6h + +NAME READY UP-TO-DATE AVAILABLE AGE +deployment.apps/example-dev-inference 1/1 1 1 2d6h +deployment.apps/ollama-infer-deployment 1/1 1 1 2d6h + +NAME DESIRED CURRENT READY AGE +replicaset.apps/example-dev-inference-5cf6c74574 1 1 1 2d6h +replicaset.apps/ollama-infer-deployment-7c56845496 1 1 1 2d6h +``` + +The AI application in the example provides a simple service that returns the LLM responses when sending a GET request with the `prompt` parameter. + +We can test the application service locally by `port-forward`, allowing us to directly send requests to the application via our browser. + +```sh +kubectl port-forward service/example-dev-inference-public 8080:80 -n example +``` + +The test results are shown in the figure below. + +![](/img/docs/user_docs/guides/llm-ops/inference-test-1.png) + +By modifying the `model` parameter in the `main.k` file, you can switch to a different model without having to change the application itself. + +For example, we change the value of `model` from `llama3` to `qwen`. Then we execute the `kusion apply` command to update the K8S resources. + +```sh +❯ kusion apply + ✔︎ Generating Spec in the Stack dev... +Stack: dev +ID Action +v1:Namespace:example UnChanged +v1:Service:example:ollama-infer-service UnChanged +v1:Service:example:proxy-infer-service UnChanged +v1:Service:example:example-dev-inference-public UnChanged +apps/v1:Deployment:example:example-dev-inference UnChanged +apps/v1:Deployment:example:proxy-infer-deployment Update +apps/v1:Deployment:example:ollama-infer-deployment Update + + +Do you want to apply these diffs?: + yes +> details + no +``` + +We repeat to send the request to the application via the browser, and the new results are as follows. + +![](/img/docs/user_docs/guides/llm-ops/inference-test-2.png) diff --git a/docs/kusion/5-user-guides/6-llm-ops/_category_.json b/docs/kusion/5-user-guides/6-llm-ops/_category_.json new file mode 100644 index 00000000..d0ed9947 --- /dev/null +++ b/docs/kusion/5-user-guides/6-llm-ops/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "LLM Ops" +} diff --git a/docs/kusion/5-user-guides/_category_.json b/docs/kusion/5-user-guides/_category_.json new file mode 100644 index 00000000..abf4c874 --- /dev/null +++ b/docs/kusion/5-user-guides/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "User Guides" +} diff --git a/docs/kusion/6-reference/1-commands/_category_.json b/docs/kusion/6-reference/1-commands/_category_.json new file mode 100644 index 00000000..d783ca2e --- /dev/null +++ b/docs/kusion/6-reference/1-commands/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Kusion Commands" +} diff --git a/docs/kusion/6-reference/1-commands/index.md b/docs/kusion/6-reference/1-commands/index.md new file mode 100644 index 00000000..dd782a3e --- /dev/null +++ b/docs/kusion/6-reference/1-commands/index.md @@ -0,0 +1,40 @@ +# Kusion Commands + +Kusion is the Platform Orchestrator of Internal Developer Platform + +Find more information at: https://www.kusionstack.io + +### Synopsis + +As a Platform Orchestrator, Kusion delivers user intentions to Kubernetes, Clouds and On-Premise resources. Also enables asynchronous cooperation between the development and the platform team and drives the separation of concerns. + +``` +kusion [flags] +``` + +### Options + +``` + -h, --help help for kusion + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion apply](kusion-apply.md) - Apply the operational intent of various resources to multiple runtimes +* [kusion config](kusion-config.md) - Interact with the Kusion config +* [kusion destroy](kusion-destroy.md) - Destroy resources within the stack. +* [kusion generate](kusion-generate.md) - Generate and print the resulting Spec resources of target Stack +* [kusion init](kusion-init.md) - Initialize the scaffolding for a demo project +* [kusion mod](kusion-mod.md) - Manage Kusion modules +* [kusion options](kusion-options.md) - Print the list of flags inherited by all commands +* [kusion preview](kusion-preview.md) - Preview a series of resource changes within the stack +* [kusion project](kusion-project.md) - Project is a folder that contains a project.yaml file and is linked to a Git repository +* [kusion release](kusion-release.md) - Manage Kusion release files +* [kusion resource](kusion-resource.md) - Observe Kusion resource information +* [kusion stack](kusion-stack.md) - Stack is a folder that contains a stack.yaml file within the corresponding project directory +* [kusion version](kusion-version.md) - Print the Kusion version information for the current context +* [kusion workspace](kusion-workspace.md) - Workspace is a logical concept representing a target that stacks will be deployed to + +###### Auto generated by spf13/cobra on 26-Sep-2024 diff --git a/docs/kusion/6-reference/1-commands/kusion-apply.md b/docs/kusion/6-reference/1-commands/kusion-apply.md new file mode 100644 index 00000000..032fe3dc --- /dev/null +++ b/docs/kusion/6-reference/1-commands/kusion-apply.md @@ -0,0 +1,77 @@ +# kusion apply + +Apply the operational intent of various resources to multiple runtimes + +### Synopsis + +Apply a series of resource changes within the stack. + + Create, update or delete resources according to the operational intent within a stack. By default, Kusion will generate an execution preview and prompt for your approval before performing any actions. You can review the preview details and make a decision to proceed with the actions or abort them. + +``` +kusion apply [flags] +``` + +### Examples + +``` + # Apply with specified work directory + kusion apply -w /path/to/workdir + + # Apply with specified arguments + kusion apply -D name=test -D age=18 + + # Apply with specifying spec file + kusion apply --spec-file spec.yaml + + # Skip interactive approval of preview details before applying + kusion apply --yes + + # Apply without output style and color + kusion apply --no-style=true + + # Apply without watching the resource changes and waiting for reconciliation + kusion apply --watch=false + + # Apply with the specified timeout duration for kusion apply command, measured in second(s) + kusion apply --timeout=120 + + # Apply with localhost port forwarding + kusion apply --port-forward=8080 +``` + +### Options + +``` + -a, --all --detail Automatically show all preview details, combined use with flag --detail + -D, --argument stringArray Specify arguments on the command line + --backend string The backend to use, supports 'local', 'oss' and 's3'. + -d, --detail Automatically show preview details with interactive options (default true) + --dry-run Preview the execution effect (always successful) without actually applying the changes + -h, --help help for apply + --ignore-fields strings Ignore differences of target fields + --no-style no-style sets to RawOutput mode and disables all of styling + -o, --output string Specify the output format + --port-forward int Forward the specified port from local to service + --spec-file string Specify the spec file path as input, and the spec file must be located in the working directory or its subdirectories + --timeout int The timeout duration for kusion apply command, measured in second(s) + --watch After creating/updating/deleting the requested object, watch for changes (default true) + -w, --workdir string The work directory to run Kusion CLI. + --workspace string The name of target workspace to operate in. + -y, --yes Automatically approve and perform the update after previewing it +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion](index.md) - Kusion is the Platform Orchestrator of Internal Developer Platform + +Find more information at: https://www.kusionstack.io + +###### Auto generated by spf13/cobra on 26-Sep-2024 diff --git a/docs/kusion/6-reference/1-commands/kusion-config-get.md b/docs/kusion/6-reference/1-commands/kusion-config-get.md new file mode 100644 index 00000000..4d50b615 --- /dev/null +++ b/docs/kusion/6-reference/1-commands/kusion-config-get.md @@ -0,0 +1,37 @@ +# kusion config get + +Get a config item + +### Synopsis + +This command gets the value of a specified kusion config item, where the config item must be registered. + +``` +kusion config get +``` + +### Examples + +``` + # Get a config item + kusion config get backends.current +``` + +### Options + +``` + -h, --help help for get +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion config](kusion-config.md) - Interact with the Kusion config + +###### Auto generated by spf13/cobra on 26-Sep-2024 diff --git a/docs/kusion/6-reference/1-commands/kusion-config-list.md b/docs/kusion/6-reference/1-commands/kusion-config-list.md new file mode 100644 index 00000000..95d9620c --- /dev/null +++ b/docs/kusion/6-reference/1-commands/kusion-config-list.md @@ -0,0 +1,37 @@ +# kusion config list + +List all config items + +### Synopsis + +This command lists all the kusion config items and their values. + +``` +kusion config list +``` + +### Examples + +``` + # List config items + kusion config list +``` + +### Options + +``` + -h, --help help for list +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion config](kusion-config.md) - Interact with the Kusion config + +###### Auto generated by spf13/cobra on 26-Sep-2024 diff --git a/docs/kusion/6-reference/1-commands/kusion-config-set.md b/docs/kusion/6-reference/1-commands/kusion-config-set.md new file mode 100644 index 00000000..cf3d3213 --- /dev/null +++ b/docs/kusion/6-reference/1-commands/kusion-config-set.md @@ -0,0 +1,40 @@ +# kusion config set + +Set a config item + +### Synopsis + +This command sets the value of a specified kusion config item, where the config item must be registered, and the value must be in valid type. + +``` +kusion config set +``` + +### Examples + +``` + # Set a config item with string type value + kusion config set backends.current s3-pre + + # Set a config item with struct or map type value + kusion config set backends.s3-pre.configs '{"bucket":"kusion"}' +``` + +### Options + +``` + -h, --help help for set +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion config](kusion-config.md) - Interact with the Kusion config + +###### Auto generated by spf13/cobra on 26-Sep-2024 diff --git a/docs/kusion/6-reference/1-commands/kusion-config-unset.md b/docs/kusion/6-reference/1-commands/kusion-config-unset.md new file mode 100644 index 00000000..538668fe --- /dev/null +++ b/docs/kusion/6-reference/1-commands/kusion-config-unset.md @@ -0,0 +1,37 @@ +# kusion config unset + +Unset a config item + +### Synopsis + +This command unsets a specified kusion config item, where the config item must be registered. + +``` +kusion config unset +``` + +### Examples + +``` + # Unset a config item + kusion config unset backends.s3-pre.configs.bucket +``` + +### Options + +``` + -h, --help help for unset +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion config](kusion-config.md) - Interact with the Kusion config + +###### Auto generated by spf13/cobra on 26-Sep-2024 diff --git a/docs/kusion/6-reference/1-commands/kusion-config.md b/docs/kusion/6-reference/1-commands/kusion-config.md new file mode 100644 index 00000000..e8264ee8 --- /dev/null +++ b/docs/kusion/6-reference/1-commands/kusion-config.md @@ -0,0 +1,34 @@ +# kusion config + +Interact with the Kusion config + +### Synopsis + +Config contains the operation of Kusion configurations. + +``` +kusion config [flags] +``` + +### Options + +``` + -h, --help help for config +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion](index.md) - Kusion is the Platform Orchestrator of Internal Developer Platform +* [kusion config get](kusion-config-get.md) - Get a config item +* [kusion config list](kusion-config-list.md) - List all config items +* [kusion config set](kusion-config-set.md) - Set a config item +* [kusion config unset](kusion-config-unset.md) - Unset a config item + +###### Auto generated by spf13/cobra on 26-Sep-2024 diff --git a/docs/kusion/6-reference/1-commands/kusion-destroy.md b/docs/kusion/6-reference/1-commands/kusion-destroy.md new file mode 100644 index 00000000..6881a3ce --- /dev/null +++ b/docs/kusion/6-reference/1-commands/kusion-destroy.md @@ -0,0 +1,47 @@ +# kusion destroy + +Destroy resources within the stack. + +### Synopsis + +Destroy resources within the stack. + + Please note that the destroy command does NOT perform resource version checks. Therefore, if someone submits an update to a resource at the same time you execute a destroy command, their update will be lost along with the rest of the resource. + +``` +kusion destroy [flags] +``` + +### Examples + +``` + # Delete resources of current stack + kusion destroy +``` + +### Options + +``` + --backend string The backend to use, supports 'local', 'oss' and 's3'. + -d, --detail Automatically show preview details after previewing it + -h, --help help for destroy + --no-style no-style sets to RawOutput mode and disables all of styling + -w, --workdir string The work directory to run Kusion CLI. + --workspace string The name of target workspace to operate in. + -y, --yes Automatically approve and perform the update after previewing it +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion](index.md) - Kusion is the Platform Orchestrator of Internal Developer Platform + +Find more information at: https://www.kusionstack.io + +###### Auto generated by spf13/cobra on 26-Sep-2024 diff --git a/docs/kusion/6-reference/1-commands/kusion-generate.md b/docs/kusion/6-reference/1-commands/kusion-generate.md new file mode 100644 index 00000000..3514b3ee --- /dev/null +++ b/docs/kusion/6-reference/1-commands/kusion-generate.md @@ -0,0 +1,53 @@ +# kusion generate + +Generate and print the resulting Spec resources of target Stack + +### Synopsis + +This command generates Spec resources with given values, then write the resulting Spec resources to specific output file or stdout. + + The nearest parent folder containing a stack.yaml file is loaded from the project in the current directory. + +``` +kusion generate [flags] +``` + +### Examples + +``` + # Generate and write Spec resources to specific output file + kusion generate -o /tmp/spec.yaml + + # Generate spec with custom workspace + kusion generate -o /tmp/spec.yaml --workspace dev + + # Generate spec with specified arguments + kusion generate -D name=test -D age=18 +``` + +### Options + +``` + -D, --argument stringArray Specify arguments on the command line + --backend string The backend to use, supports 'local', 'oss' and 's3'. + -h, --help help for generate + --no-style no-style sets to RawOutput mode and disables all of styling + -o, --output string File to write generated Spec resources to + -w, --workdir string The work directory to run Kusion CLI. + --workspace string The name of target workspace to operate in. +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion](index.md) - Kusion is the Platform Orchestrator of Internal Developer Platform + +Find more information at: https://www.kusionstack.io + +###### Auto generated by spf13/cobra on 26-Sep-2024 diff --git a/docs/kusion/6-reference/1-commands/kusion-init.md b/docs/kusion/6-reference/1-commands/kusion-init.md new file mode 100644 index 00000000..2d24082f --- /dev/null +++ b/docs/kusion/6-reference/1-commands/kusion-init.md @@ -0,0 +1,46 @@ +# kusion init + +Initialize the scaffolding for a demo project + +### Synopsis + +This command initializes the scaffolding for a demo project with the name of the current directory to help users quickly get started. + + Note that target directory needs to be an empty directory. + +``` +kusion init [flags] +``` + +### Examples + +``` + # Initialize a demo project with the name of the current directory + mkdir quickstart && cd quickstart + kusion init + + # Initialize the demo project in a different target directory + kusion init --target projects/my-demo-project +``` + +### Options + +``` + -h, --help help for init + -t, --target string specify the target directory +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion](index.md) - Kusion is the Platform Orchestrator of Internal Developer Platform + +Find more information at: https://www.kusionstack.io + +###### Auto generated by spf13/cobra on 26-Sep-2024 diff --git a/docs/kusion/6-reference/1-commands/kusion-mod-add.md b/docs/kusion/6-reference/1-commands/kusion-mod-add.md new file mode 100644 index 00000000..a0530340 --- /dev/null +++ b/docs/kusion/6-reference/1-commands/kusion-mod-add.md @@ -0,0 +1,39 @@ +# kusion mod add + +Add a module from a workspace + +``` +kusion mod add MODULE_NAME [--workspace WORKSPACE] [flags] +``` + +### Examples + +``` + # Add a kusion module to the kcl.mod from the current workspace to use it in AppConfiguration + kusion mod add my-module + + # Add a module to the kcl.mod from a specified workspace to use it in AppConfiguration + kusion mod add my-module --workspace=dev +``` + +### Options + +``` + --backend string The backend to use, supports 'local', 'oss' and 's3'. + -h, --help help for add + -w, --workdir string The work directory to run Kusion CLI. + --workspace string The name of target workspace to operate in. +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion mod](kusion-mod.md) - Manage Kusion modules + +###### Auto generated by spf13/cobra on 26-Sep-2024 diff --git a/docs/kusion/6-reference/1-commands/kusion-mod-init.md b/docs/kusion/6-reference/1-commands/kusion-mod-init.md new file mode 100644 index 00000000..c94b9653 --- /dev/null +++ b/docs/kusion/6-reference/1-commands/kusion-mod-init.md @@ -0,0 +1,40 @@ +# kusion mod init + +Create a kusion module along with common files and directories in the current directory + +``` +kusion mod init MODULE_NAME PATH [flags] +``` + +### Examples + +``` + # Create a kusion module template in the current directory + kusion mod init my-module + + # Init a kusion module at the specified Path + kusion mod init my-module ./modules + + # Init a module from a remote git template repository + kusion mod init my-module --template https://github.com// +``` + +### Options + +``` + -h, --help help for init + --template string Initialize with specified template +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion mod](kusion-mod.md) - Manage Kusion modules + +###### Auto generated by spf13/cobra on 26-Sep-2024 diff --git a/docs/kusion/6-reference/1-commands/kusion-mod-list.md b/docs/kusion/6-reference/1-commands/kusion-mod-list.md new file mode 100644 index 00000000..0ea4e426 --- /dev/null +++ b/docs/kusion/6-reference/1-commands/kusion-mod-list.md @@ -0,0 +1,39 @@ +# kusion mod list + +List kusion modules in a workspace + +``` +kusion mod list [--workspace WORKSPACE] [flags] +``` + +### Examples + +``` + # List kusion modules in the current workspace + kusion mod list + + # List modules in a specified workspace + kusion mod list --workspace=dev +``` + +### Options + +``` + --backend string The backend to use, supports 'local', 'oss' and 's3'. + -h, --help help for list + -w, --workdir string The work directory to run Kusion CLI. + --workspace string The name of target workspace to operate in. +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion mod](kusion-mod.md) - Manage Kusion modules + +###### Auto generated by spf13/cobra on 26-Sep-2024 diff --git a/docs/kusion/6-reference/1-commands/kusion-mod-push.md b/docs/kusion/6-reference/1-commands/kusion-mod-push.md new file mode 100644 index 00000000..2f0d0a72 --- /dev/null +++ b/docs/kusion/6-reference/1-commands/kusion-mod-push.md @@ -0,0 +1,62 @@ +# kusion mod push + +Push a module to OCI registry + +### Synopsis + +The push command packages the module as an OCI artifact and pushes it to the OCI registry using the version as the image tag. + +``` +kusion mod push MODULE_PATH OCI_REPOSITORY_URL [--creds CREDENTIALS] +``` + +### Examples + +``` + # Push a module of current OS arch to an OCI Registry using a token + kusion mod push /path/to/my-module oci://ghcr.io/org --creds + + # Push a module of specific OS arch to an OCI Registry using a token + kusion mod push /path/to/my-module oci://ghcr.io/org --os-arch=darwin/arm64 --creds + + # Push a module to an OCI Registry using a credentials in : format. + kusion mod push /path/to/my-module oci://ghcr.io/org --creds : + + # Push a release candidate without marking it as the latest stable + kusion mod push /path/to/my-module oci://ghcr.io/org --latest=false + + # Push a module with custom OCI annotations + kusion mod push /path/to/my-module oci://ghcr.io/org \ + --annotation='org.opencontainers.image.documentation=https://app.org/docs' + + # Push and sign a module with Cosign (the cosign binary must be present in PATH) + export COSIGN_PASSWORD=password + kusion mod push /path/to/my-module oci://ghcr.io/org \ + --sign=cosign --cosign-key=/path/to/cosign.key +``` + +### Options + +``` + -a, --annotations strings Set custom OCI annotations in '=' format. + --cosign-key string The Cosign private key for signing the module. + --creds string The credentials token for the OCI registry in or : format. + -h, --help help for push + --insecure-registry If true, allows connecting to a OCI registry without TLS or with self-signed certificates. + --latest Tags the current version as the latest stable module version. (default true) + --os-arch string The os arch of the module e.g. 'darwin/arm64', 'linux/amd64'. + --sign string Signs the module with the specified provider. +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion mod](kusion-mod.md) - Manage Kusion modules + +###### Auto generated by spf13/cobra on 26-Sep-2024 diff --git a/docs/kusion/6-reference/1-commands/kusion-mod.md b/docs/kusion/6-reference/1-commands/kusion-mod.md new file mode 100644 index 00000000..4ba93969 --- /dev/null +++ b/docs/kusion/6-reference/1-commands/kusion-mod.md @@ -0,0 +1,36 @@ +# kusion mod + +Manage Kusion modules + +### Synopsis + +Commands for managing Kusion modules. + + These commands help you manage the lifecycle of Kusion modules. + +``` +kusion mod +``` + +### Options + +``` + -h, --help help for mod +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion](index.md) - Kusion is the Platform Orchestrator of Internal Developer Platform +* [kusion mod add](kusion-mod-add.md) - Add a module from a workspace +* [kusion mod init](kusion-mod-init.md) - Create a kusion module along with common files and directories in the current directory +* [kusion mod list](kusion-mod-list.md) - List kusion modules in a workspace +* [kusion mod push](kusion-mod-push.md) - Push a module to OCI registry + +###### Auto generated by spf13/cobra on 26-Sep-2024 diff --git a/docs/kusion/6-reference/1-commands/kusion-options.md b/docs/kusion/6-reference/1-commands/kusion-options.md new file mode 100644 index 00000000..d7fbc13d --- /dev/null +++ b/docs/kusion/6-reference/1-commands/kusion-options.md @@ -0,0 +1,37 @@ +# kusion options + +Print the list of flags inherited by all commands + +### Synopsis + +Print the list of flags inherited by all commands + +``` +kusion options [flags] +``` + +### Examples + +``` + # Print flags inherited by all commands + kubectl options +``` + +### Options + +``` + -h, --help help for options +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion](index.md) - Kusion is the Platform Orchestrator of Internal Developer Platform + +###### Auto generated by spf13/cobra on 26-Sep-2024 diff --git a/docs/kusion/6-reference/1-commands/kusion-preview.md b/docs/kusion/6-reference/1-commands/kusion-preview.md new file mode 100644 index 00000000..61103f19 --- /dev/null +++ b/docs/kusion/6-reference/1-commands/kusion-preview.md @@ -0,0 +1,64 @@ +# kusion preview + +Preview a series of resource changes within the stack + +### Synopsis + +Preview a series of resource changes within the stack. + + Create, update or delete resources according to the intent described in the stack. By default, Kusion will generate an execution preview and present it for your approval before taking any action. + +``` +kusion preview [flags] +``` + +### Examples + +``` + # Preview with specified work directory + kusion preview -w /path/to/workdir + + # Preview with specified arguments + kusion preview -D name=test -D age=18 + + # Preview with specifying spec file + kusion preview --spec-file spec.yaml + + # Preview with ignored fields + kusion preview --ignore-fields="metadata.generation,metadata.managedFields" + + # Preview with json format result + kusion preview -o json + + # Preview without output style and color + kusion preview --no-style=true +``` + +### Options + +``` + -a, --all --detail Automatically show all preview details, combined use with flag --detail + -D, --argument stringArray Specify arguments on the command line + --backend string The backend to use, supports 'local', 'oss' and 's3'. + -d, --detail Automatically show preview details with interactive options (default true) + -h, --help help for preview + --ignore-fields strings Ignore differences of target fields + --no-style no-style sets to RawOutput mode and disables all of styling + -o, --output string Specify the output format + --spec-file string Specify the spec file path as input, and the spec file must be located in the working directory or its subdirectories + -w, --workdir string The work directory to run Kusion CLI. + --workspace string The name of target workspace to operate in. +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion](index.md) - Kusion is the Platform Orchestrator of Internal Developer Platform + +###### Auto generated by spf13/cobra on 26-Sep-2024 diff --git a/docs/kusion/6-reference/1-commands/kusion-project-create.md b/docs/kusion/6-reference/1-commands/kusion-project-create.md new file mode 100644 index 00000000..5f914717 --- /dev/null +++ b/docs/kusion/6-reference/1-commands/kusion-project-create.md @@ -0,0 +1,44 @@ +# kusion project create + +Create a new project + +### Synopsis + +This command creates a new project.yaml file under the target directory which by default is the current working directory. + + Note that the target directory needs to be an empty directory. + +``` +kusion project create +``` + +### Examples + +``` + # Create a new project with the name of the current working directory + mkdir my-project && cd my-project + kusion project create + + # Create a new project in a specified target directory + kusion project create --target /dir/to/projects/my-project +``` + +### Options + +``` + -h, --help help for create + -t, --target string specify the target directory +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion project](kusion-project.md) - Project is a folder that contains a project.yaml file and is linked to a Git repository + +###### Auto generated by spf13/cobra on 26-Sep-2024 diff --git a/docs/kusion/6-reference/1-commands/kusion-project-list.md b/docs/kusion/6-reference/1-commands/kusion-project-list.md new file mode 100644 index 00000000..c42f05e4 --- /dev/null +++ b/docs/kusion/6-reference/1-commands/kusion-project-list.md @@ -0,0 +1,51 @@ +# kusion project list + +List the applied projects + +### Synopsis + +This command lists all the applied projects in the target backend and target workspace. + + By default list the projects in the current backend and current workspace. + +``` +kusion project list +``` + +### Examples + +``` + # List the applied project in the current backend and current workspace + kusion project list + + # List the applied project in a specified backend and current workspace + kusion project list --backend default + + # List the applied project in a specified backend and specified workspaces + kusion project list --backend default --workspace dev,default + + # List the applied project in a specified backend and all the workspaces + kusion project list --backend default --all +``` + +### Options + +``` + -a, --all List all the projects in all the workspaces + --backend string The backend to use, supports 'local', 'oss' and 's3' + -h, --help help for list + --workspace strings The name of the target workspace +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion project](kusion-project.md) - Project is a folder that contains a project.yaml file and is linked to a Git repository + +###### Auto generated by spf13/cobra on 26-Sep-2024 diff --git a/docs/kusion/6-reference/1-commands/kusion-project.md b/docs/kusion/6-reference/1-commands/kusion-project.md new file mode 100644 index 00000000..6866aedf --- /dev/null +++ b/docs/kusion/6-reference/1-commands/kusion-project.md @@ -0,0 +1,34 @@ +# kusion project + +Project is a folder that contains a project.yaml file and is linked to a Git repository + +### Synopsis + +Project in Kusion is defined as any folder that contains a project.yaml file and is linked to a Git repository. + + Project organizes logical configurations for internal components to orchestrate the application and assembles them to suit different roles, such as developers and platform engineers. + +``` +kusion project [flags] +``` + +### Options + +``` + -h, --help help for project +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion](index.md) - Kusion is the Platform Orchestrator of Internal Developer Platform +* [kusion project create](kusion-project-create.md) - Create a new project +* [kusion project list](kusion-project-list.md) - List the applied projects + +###### Auto generated by spf13/cobra on 26-Sep-2024 diff --git a/docs/kusion/6-reference/1-commands/kusion-release-list.md b/docs/kusion/6-reference/1-commands/kusion-release-list.md new file mode 100644 index 00000000..36aafa28 --- /dev/null +++ b/docs/kusion/6-reference/1-commands/kusion-release-list.md @@ -0,0 +1,45 @@ +# kusion release list + +List all releases of the current stack + +### Synopsis + +List all releases of the current stack. + + This command displays information about all releases of the current stack in the current or a specified workspace, including their revision, phase, and creation time. + +``` +kusion release list [flags] +``` + +### Examples + +``` + # List all releases of the current stack in current workspace + kusion release list + + # List all releases of the current stack in a specified workspace + kusion release list --workspace=dev +``` + +### Options + +``` + --backend string The backend to use, supports 'local', 'oss' and 's3'. + -h, --help help for list + -w, --workdir string The work directory to run Kusion CLI. + --workspace string The name of target workspace to operate in. +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion release](kusion-release.md) - Manage Kusion release files + +###### Auto generated by spf13/cobra on 26-Sep-2024 diff --git a/docs/kusion/6-reference/1-commands/kusion-release-show.md b/docs/kusion/6-reference/1-commands/kusion-release-show.md new file mode 100644 index 00000000..b5773b68 --- /dev/null +++ b/docs/kusion/6-reference/1-commands/kusion-release-show.md @@ -0,0 +1,56 @@ +# kusion release show + +Show details of a release of the current or specified stack + +### Synopsis + +Show details of a release of the current or specified stack. + + This command displays detailed information about a release of the current project in the current or a specified workspace + +``` +kusion release show [flags] +``` + +### Examples + +``` + # Show details of the latest release of the current project in the current workspace + kusion release show + + # Show details of a specific release of the current project in the current workspace + kusion release show --revision=1 + + # Show details of a specific release of the specified project in the specified workspace + kusion release show --revision=1 --project=hoangndst --workspace=dev + + # Show details of the latest release with specified backend + kusion release show --backend=local + + # Show details of the latest release with specified output format + kusion release show --output=json +``` + +### Options + +``` + --backend string The backend to use, supports 'local', 'oss' and 's3' + -h, --help help for show + -o, --output string Specify the output format + --project string The project name + --revision uint The revision number of the release + --workspace string The workspace name +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion release](kusion-release.md) - Manage Kusion release files + +###### Auto generated by spf13/cobra on 26-Sep-2024 diff --git a/docs/kusion/6-reference/1-commands/kusion-release-unlock.md b/docs/kusion/6-reference/1-commands/kusion-release-unlock.md new file mode 100644 index 00000000..471fbafc --- /dev/null +++ b/docs/kusion/6-reference/1-commands/kusion-release-unlock.md @@ -0,0 +1,47 @@ +# kusion release unlock + +Unlock the latest release file of the current stack + +### Synopsis + +Unlock the latest release file of the current stack. + + The phase of the latest release file of the current stack in the current or a specified workspace will be set to 'failed' if it was in the stages of 'generating', 'previewing', 'applying' or 'destroying'. + + Please note that using the 'kusion release unlock' command may cause unexpected concurrent read-write issues with release files, so please use it with caution. + +``` +kusion release unlock [flags] +``` + +### Examples + +``` + # Unlock the latest release file of the current stack in the current workspace. + kusion release unlock + + # Unlock the latest release file of the current stack in a specified workspace. + kusion release unlock --workspace=dev +``` + +### Options + +``` + --backend string The backend to use, supports 'local', 'oss' and 's3'. + -h, --help help for unlock + -w, --workdir string The work directory to run Kusion CLI. + --workspace string The name of target workspace to operate in. +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion release](kusion-release.md) - Manage Kusion release files + +###### Auto generated by spf13/cobra on 26-Sep-2024 diff --git a/docs/kusion/6-reference/1-commands/kusion-release.md b/docs/kusion/6-reference/1-commands/kusion-release.md new file mode 100644 index 00000000..47557d34 --- /dev/null +++ b/docs/kusion/6-reference/1-commands/kusion-release.md @@ -0,0 +1,35 @@ +# kusion release + +Manage Kusion release files + +### Synopsis + +Commands for managing Kusion release files. + + These commands help you manage the lifecycle of Kusion release files. + +``` +kusion release +``` + +### Options + +``` + -h, --help help for release +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion](index.md) - Kusion is the Platform Orchestrator of Internal Developer Platform +* [kusion release list](kusion-release-list.md) - List all releases of the current stack +* [kusion release show](kusion-release-show.md) - Show details of a release of the current or specified stack +* [kusion release unlock](kusion-release-unlock.md) - Unlock the latest release file of the current stack + +###### Auto generated by spf13/cobra on 26-Sep-2024 diff --git a/docs/kusion/6-reference/1-commands/kusion-resource-graph.md b/docs/kusion/6-reference/1-commands/kusion-resource-graph.md new file mode 100644 index 00000000..d222e3c9 --- /dev/null +++ b/docs/kusion/6-reference/1-commands/kusion-resource-graph.md @@ -0,0 +1,54 @@ +# kusion resource graph + +Display a graph of all the resources' information of the target project and target workspaces + +### Synopsis + +Display information of all the resources of a project. + + This command displays information of all the resources of a project in the current or specified workspaces. + +``` +kusion resource graph [flags] +``` + +### Examples + +``` + # Display information of all the resources of a project in the current workspace. + kusion resource graph --project quickstart + + # Display information of all the resources of a project in specified workspaces. + kusion resource graph --project quickstart --workspace=dev,default + + # Display information of all the resource of a project in all the workspaces that has been deployed. + kusion resource graph --project quickstart --all + kusion resource graph --project quickstart -a + + # Display information of all the resource of a project with in specified workspaces with json format result. + kusion resource graph --project quickstart --workspace dev -o json +``` + +### Options + +``` + -a, --all Display all the resources of all the workspaces + --backend string The backend to use, supports 'local', 'oss' and 's3' + -h, --help help for graph + -o, --output string Specify the output format, json only + --project string The name of the target project + --workspace strings The name of the target workspace +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion resource](kusion-resource.md) - Observe Kusion resource information + +###### Auto generated by spf13/cobra on 26-Sep-2024 diff --git a/docs/kusion/6-reference/1-commands/kusion-resource.md b/docs/kusion/6-reference/1-commands/kusion-resource.md new file mode 100644 index 00000000..9ac6ba50 --- /dev/null +++ b/docs/kusion/6-reference/1-commands/kusion-resource.md @@ -0,0 +1,33 @@ +# kusion resource + +Observe Kusion resource information + +### Synopsis + +Commands for observing Kusion resources. + + These commands help you observe the information of Kusion resources within a project. + +``` +kusion resource +``` + +### Options + +``` + -h, --help help for resource +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion](index.md) - Kusion is the Platform Orchestrator of Internal Developer Platform +* [kusion resource graph](kusion-resource-graph.md) - Display a graph of all the resources' information of the target project and target workspaces + +###### Auto generated by spf13/cobra on 26-Sep-2024 diff --git a/docs/kusion/6-reference/1-commands/kusion-stack-create.md b/docs/kusion/6-reference/1-commands/kusion-stack-create.md new file mode 100644 index 00000000..ec0d0fef --- /dev/null +++ b/docs/kusion/6-reference/1-commands/kusion-stack-create.md @@ -0,0 +1,49 @@ +# kusion stack create + +Create a new stack + +### Synopsis + +This command creates a new stack under the target directory which by default is the current working directory. + + The stack folder to be created contains 'stack.yaml', 'kcl.mod' and 'main.k' with the specified values. + + Note that the target directory needs to be a valid project directory with project.yaml file + +``` +kusion stack create +``` + +### Examples + +``` + # Create a new stack at current project directory + kusion stack create dev + + # Create a new stack in a specified target project directory + kusion stack create dev --target /dir/to/projects/my-project + + # Create a new stack copied from the referenced stack under the target project directory + kusion stack create prod --copy-from dev +``` + +### Options + +``` + --copy-from string specify the referenced stack path to copy from + -h, --help help for create + -t, --target string specify the target project directory +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion stack](kusion-stack.md) - Stack is a folder that contains a stack.yaml file within the corresponding project directory + +###### Auto generated by spf13/cobra on 26-Sep-2024 diff --git a/docs/kusion/6-reference/1-commands/kusion-stack.md b/docs/kusion/6-reference/1-commands/kusion-stack.md new file mode 100644 index 00000000..41fcbef2 --- /dev/null +++ b/docs/kusion/6-reference/1-commands/kusion-stack.md @@ -0,0 +1,33 @@ +# kusion stack + +Stack is a folder that contains a stack.yaml file within the corresponding project directory + +### Synopsis + +Stack in Kusion is defined as any folder that contains a stack.yaml file within the corresponding project directory. + + A stack provides a mechanism to isolate multiple deployments of the same application, serving with the target workspace to which an application will be deployed. + +``` +kusion stack [flags] +``` + +### Options + +``` + -h, --help help for stack +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion](index.md) - Kusion is the Platform Orchestrator of Internal Developer Platform +* [kusion stack create](kusion-stack-create.md) - Create a new stack + +###### Auto generated by spf13/cobra on 26-Sep-2024 diff --git a/docs/kusion/6-reference/1-commands/kusion-version.md b/docs/kusion/6-reference/1-commands/kusion-version.md new file mode 100644 index 00000000..d2011a26 --- /dev/null +++ b/docs/kusion/6-reference/1-commands/kusion-version.md @@ -0,0 +1,38 @@ +# kusion version + +Print the Kusion version information for the current context + +### Synopsis + +Print the Kusion version information for the current context + +``` +kusion version [flags] +``` + +### Examples + +``` + # Print the Kusion version + kusion version +``` + +### Options + +``` + -h, --help help for version + -o, --output string Output format. Only json format is supported for now +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion](index.md) - Kusion is the Platform Orchestrator of Internal Developer Platform + +###### Auto generated by spf13/cobra on 26-Sep-2024 diff --git a/docs/kusion/6-reference/1-commands/kusion-workspace-create.md b/docs/kusion/6-reference/1-commands/kusion-workspace-create.md new file mode 100644 index 00000000..473570e5 --- /dev/null +++ b/docs/kusion/6-reference/1-commands/kusion-workspace-create.md @@ -0,0 +1,46 @@ +# kusion workspace create + +Create a new workspace + +### Synopsis + +This command creates a workspace with specified name and configuration file, where the file must be in the YAML format. + +``` +kusion workspace create +``` + +### Examples + +``` + # Create a workspace + kusion workspace create dev -f dev.yaml + + # Create a workspace and set as current + kusion workspace create dev -f dev.yaml --current + + # Create a workspace in a specified backend + kusion workspace create prod -f prod.yaml --backend oss-prod +``` + +### Options + +``` + --backend string the backend name + --current set the creating workspace as current + -f, --file string the path of workspace configuration file + -h, --help help for create +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion workspace](kusion-workspace.md) - Workspace is a logical concept representing a target that stacks will be deployed to + +###### Auto generated by spf13/cobra on 26-Sep-2024 diff --git a/docs/kusion/6-reference/1-commands/kusion-workspace-delete.md b/docs/kusion/6-reference/1-commands/kusion-workspace-delete.md new file mode 100644 index 00000000..a465fe4a --- /dev/null +++ b/docs/kusion/6-reference/1-commands/kusion-workspace-delete.md @@ -0,0 +1,44 @@ +# kusion workspace delete + +Delete a workspace + +### Synopsis + +This command deletes the current or a specified workspace. + +``` +kusion workspace delete +``` + +### Examples + +``` + # Delete the current workspace + kusion workspace delete + + # Delete a specified workspace + kusion workspace delete dev + + # Delete a specified workspace in a specified backend + kusion workspace delete prod --backend oss-prod +``` + +### Options + +``` + --backend string the backend name + -h, --help help for delete +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion workspace](kusion-workspace.md) - Workspace is a logical concept representing a target that stacks will be deployed to + +###### Auto generated by spf13/cobra on 26-Sep-2024 diff --git a/docs/kusion/6-reference/1-commands/kusion-workspace-list.md b/docs/kusion/6-reference/1-commands/kusion-workspace-list.md new file mode 100644 index 00000000..97787e16 --- /dev/null +++ b/docs/kusion/6-reference/1-commands/kusion-workspace-list.md @@ -0,0 +1,41 @@ +# kusion workspace list + +List all workspace names + +### Synopsis + +This command list the names of all workspaces. + +``` +kusion workspace list +``` + +### Examples + +``` + # List all workspace names + kusion workspace list + + # List all workspace names in a specified backend + kusion workspace list --backend oss-prod +``` + +### Options + +``` + --backend string the backend name + -h, --help help for list +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion workspace](kusion-workspace.md) - Workspace is a logical concept representing a target that stacks will be deployed to + +###### Auto generated by spf13/cobra on 26-Sep-2024 diff --git a/docs/kusion/6-reference/1-commands/kusion-workspace-show.md b/docs/kusion/6-reference/1-commands/kusion-workspace-show.md new file mode 100644 index 00000000..dab0bcbd --- /dev/null +++ b/docs/kusion/6-reference/1-commands/kusion-workspace-show.md @@ -0,0 +1,44 @@ +# kusion workspace show + +Show a workspace configuration + +### Synopsis + +This command gets the current or a specified workspace configuration. + +``` +kusion workspace show +``` + +### Examples + +``` + # Show current workspace configuration + kusion workspace show + + # Show a specified workspace configuration + kusion workspace show dev + + # Show a specified workspace in a specified backend + kusion workspace show prod --backend oss-prod +``` + +### Options + +``` + --backend string the backend name + -h, --help help for show +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion workspace](kusion-workspace.md) - Workspace is a logical concept representing a target that stacks will be deployed to + +###### Auto generated by spf13/cobra on 26-Sep-2024 diff --git a/docs/kusion/6-reference/1-commands/kusion-workspace-switch.md b/docs/kusion/6-reference/1-commands/kusion-workspace-switch.md new file mode 100644 index 00000000..111127a4 --- /dev/null +++ b/docs/kusion/6-reference/1-commands/kusion-workspace-switch.md @@ -0,0 +1,41 @@ +# kusion workspace switch + +Switch the current workspace + +### Synopsis + +This command switches the workspace, where the workspace must be created. + +``` +kusion workspace switch +``` + +### Examples + +``` + # Switch the current workspace + kusion workspace switch dev + + # Switch the current workspace in a specified backend + kusion workspace switch prod --backend oss-prod +``` + +### Options + +``` + --backend string the backend name + -h, --help help for switch +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion workspace](kusion-workspace.md) - Workspace is a logical concept representing a target that stacks will be deployed to + +###### Auto generated by spf13/cobra on 26-Sep-2024 diff --git a/docs/kusion/6-reference/1-commands/kusion-workspace-update.md b/docs/kusion/6-reference/1-commands/kusion-workspace-update.md new file mode 100644 index 00000000..f868cc86 --- /dev/null +++ b/docs/kusion/6-reference/1-commands/kusion-workspace-update.md @@ -0,0 +1,46 @@ +# kusion workspace update + +Update a workspace configuration + +### Synopsis + +This command updates a workspace configuration with specified configuration file, where the file must be in the YAML format. + +``` +kusion workspace update +``` + +### Examples + +``` + # Update the current workspace + kusion workspace update -f dev.yaml + + # Update a specified workspace and set as current + kusion workspace update dev -f dev.yaml --current + + # Update a specified workspace in a specified backend + kusion workspace update prod -f prod.yaml --backend oss-prod +``` + +### Options + +``` + --backend string the backend name + --current set the creating workspace as current + -f, --file string the path of workspace configuration file + -h, --help help for update +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion workspace](kusion-workspace.md) - Workspace is a logical concept representing a target that stacks will be deployed to + +###### Auto generated by spf13/cobra on 26-Sep-2024 diff --git a/docs/kusion/6-reference/1-commands/kusion-workspace.md b/docs/kusion/6-reference/1-commands/kusion-workspace.md new file mode 100644 index 00000000..a3eba89d --- /dev/null +++ b/docs/kusion/6-reference/1-commands/kusion-workspace.md @@ -0,0 +1,38 @@ +# kusion workspace + +Workspace is a logical concept representing a target that stacks will be deployed to + +### Synopsis + +Workspace is a logical concept representing a target that stacks will be deployed to. + + Workspace is managed by platform engineers, which contains a set of configurations that application developers do not want or should not concern, and is reused by multiple stacks belonging to different projects. + +``` +kusion workspace [flags] +``` + +### Options + +``` + -h, --help help for workspace +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion](index.md) - Kusion is the Platform Orchestrator of Internal Developer Platform +* [kusion workspace create](kusion-workspace-create.md) - Create a new workspace +* [kusion workspace delete](kusion-workspace-delete.md) - Delete a workspace +* [kusion workspace list](kusion-workspace-list.md) - List all workspace names +* [kusion workspace show](kusion-workspace-show.md) - Show a workspace configuration +* [kusion workspace switch](kusion-workspace-switch.md) - Switch the current workspace +* [kusion workspace update](kusion-workspace-update.md) - Update a workspace configuration + +###### Auto generated by spf13/cobra on 26-Sep-2024 diff --git a/docs/kusion/6-reference/2-modules/1-developer-schemas/_category_.json b/docs/kusion/6-reference/2-modules/1-developer-schemas/_category_.json new file mode 100644 index 00000000..0df3bade --- /dev/null +++ b/docs/kusion/6-reference/2-modules/1-developer-schemas/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Developer Schemas" +} \ No newline at end of file diff --git a/docs/kusion/6-reference/2-modules/1-developer-schemas/app-configuration.md b/docs/kusion/6-reference/2-modules/1-developer-schemas/app-configuration.md new file mode 100644 index 00000000..6808bee7 --- /dev/null +++ b/docs/kusion/6-reference/2-modules/1-developer-schemas/app-configuration.md @@ -0,0 +1,35 @@ +# appconfiguration + +## Schema AppConfiguration + +AppConfiguration is a developer-centric definition that describes how to run an Application.
This application model builds upon a decade of experience at AntGroup running super large scale
internal developer platform, combined with best-of-breed ideas and practices from the community. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**accessories**|{str:any}|Accessories defines a collection of accessories that will be attached to the workload.|{}| +|**annotations**|{str:str}|Annotations are key/value pairs that attach arbitrary non-identifying metadata to resources.|{}| +|**labels**|{str:str}|Labels can be used to attach arbitrary metadata as key-value pairs to resources.|{}| +|**workload** `required`|[service.Service](workload/service#schema-service) \| [wl.Job](workload/job#schema-job) |Workload defines how to run your application code. Currently supported workload profile
includes Service and Job.|N/A| + +### Examples +```python +# Instantiate an App with a long-running service and its image is "nginx:v1" + +import kam as ac +import kam.workload as wl +import kam.workload.container as c + +helloworld : ac.AppConfiguration { + workload: service.Service { + containers: { + "nginx": c.Container { + image: "nginx:v1" + } + } + } +} +``` + + diff --git a/docs/kusion/6-reference/2-modules/1-developer-schemas/database/mysql.md b/docs/kusion/6-reference/2-modules/1-developer-schemas/database/mysql.md new file mode 100644 index 00000000..8f6135bb --- /dev/null +++ b/docs/kusion/6-reference/2-modules/1-developer-schemas/database/mysql.md @@ -0,0 +1,39 @@ +# mysql + +## Schema MySQL + +MySQL describes the attributes to locally deploy or create a cloud provider
managed mysql database instance for the workload. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**type** `required`|"local" | "cloud"|Type defines whether the mysql database is deployed locally or provided by
cloud vendor.|| +|**version** `required`|str|Version defines the mysql version to use.|| + +### Examples +```python +# Instantiate a local mysql database with version of 5.7. + +import mysql + +accessories: { + "mysql": mysql.MySQL { + type: "local" + version: "8.0" + } +} +``` + + +### Credentials and Connectivity + +For sensitive information such as the **host**, **username** and **password** for the database instance, Kusion will automatically inject them into the application container for users through environment variables. The relevant environment variables are listed in the table below. + +| Name | Explanation | +| ---- | ----------- | +| KUSION_DB\_HOST\_`` | Host address for accessing the database instance | +| KUSION_DB\_USERNAME\_`` | Account username for accessing the database instance | +| KUSION_DB\_PASSWORD\_`` | Account password for accessing the database instance | + +The `databaseName` can be declared in [workspace configs of mysql](../../2-workspace-configs/database/mysql.md), and Kusion will automatically concatenate the ``, ``, `` and `mysql` with `-` if not specified. When injecting the credentials into containers' environment variables, Kusion will convert the `databaseName` to uppercase, and replace `-` with `_`. diff --git a/docs/kusion/6-reference/2-modules/1-developer-schemas/database/postgres.md b/docs/kusion/6-reference/2-modules/1-developer-schemas/database/postgres.md new file mode 100644 index 00000000..ad8cbb7e --- /dev/null +++ b/docs/kusion/6-reference/2-modules/1-developer-schemas/database/postgres.md @@ -0,0 +1,39 @@ +# postgres + +## Schema PostgreSQL + +PostgreSQL describes the attributes to locally deploy or create a cloud provider
managed postgresql database instance for the workload. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**type** `required`|"local" | "cloud"|Type defines whether the postgresql database is deployed locally or provided by
cloud vendor.|| +|**version** `required`|str|Version defines the postgres version to use.|| + +### Examples +```python +#Instantiate a local postgresql database with image version of 14.0. + +import postgres as postgres + +accessories: { + "postgres": postgres.PostgreSQL { + type: "local" + version: "14.0" + } +} +``` + + +### Credentials and Connectivity + +For sensitive information such as the **host**, **username** and **password** for the database instance, Kusion will automatically inject them into the application container for users through environment variables. The relevant environment variables are listed in the table below. + +| Name | Explanation | +| ---- | ----------- | +| KUSION_DB\_HOST\_`` | Host address for accessing the database instance | +| KUSION_DB\_USERNAME\_`` | Account username for accessing the database instance | +| KUSION_DB\_PASSWORD\_`` | Account password for accessing the database instance | + +The `databaseName` can be declared in [workspace configs of postgres](../../2-workspace-configs/database/postgres.md), and Kusion will automatically concatenate the ``, ``, `` and `postgres` with `-` if not specified. When injecting the credentials into containers' environment variables, Kusion will convert the `databaseName` to uppercase, and replace `-` with `_`. diff --git a/docs/kusion/6-reference/2-modules/1-developer-schemas/inference/inference.md b/docs/kusion/6-reference/2-modules/1-developer-schemas/inference/inference.md new file mode 100644 index 00000000..4cdb853a --- /dev/null +++ b/docs/kusion/6-reference/2-modules/1-developer-schemas/inference/inference.md @@ -0,0 +1,50 @@ +# inference + +## Index + +- v1 + - [Inference](#inference) + +## Schemas + +### Inference + +Inference is a module schema consisting of model, framework and so on + +#### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**framework** `required`|"Ollama" \| "KubeRay"|The framework or environment in which the model operates.|| +|**model** `required`|str|The model name to be used for inference.|| +|**num_ctx**|int|The size of the context window used to generate the next token.|2048| +|**num_predict**|int|Maximum number of tokens to predict when generating text.|128| +|**system**|str|The system message, which will be set in the template.|""| +|**temperature**|float|A parameter determines whether the model's output is more random and creative or more predictable.|0.8| +|**template**|str|The full prompt template, which will be sent to the model.|""| +|**top_k**|int|A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.|40| +|**top_p**|float|A higher value (e.g. 0.9) will give more diverse answers, while a lower value (e.g. 0.5) will be more conservative.|0.9| +#### Examples + +``` +import inference.v1.infer + +accessories: { + "inference@v0.1.0": infer.Inference { + model: "llama3" + framework: "Ollama" + + system: "You are Mario from super mario bros, acting as an assistant." + template: "{{ if .System }}<|im_start|>system {{ .System }}<|im_end|> {{ end }}{{ if .Prompt }}<|im_start|>user {{ .Prompt }}<|im_end|> {{ end }}<|im_start|>assistant" + + top_k: 40 + top_p: 0.9 + temperature: 0.8 + + num_predict: 128 + num_ctx: 2048 + } +} +``` + + diff --git a/docs/kusion/6-reference/2-modules/1-developer-schemas/internal/common.md b/docs/kusion/6-reference/2-modules/1-developer-schemas/internal/common.md new file mode 100644 index 00000000..8b649196 --- /dev/null +++ b/docs/kusion/6-reference/2-modules/1-developer-schemas/internal/common.md @@ -0,0 +1,17 @@ +# common + +## Schema WorkloadBase + +WorkloadBase defines set of attributes shared by different workload profile, e.g Service
and Job. You can inherit this Schema to reuse these common attributes. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**annotations**|{str:str}|Annotations are key/value pairs that attach arbitrary non-identifying metadata to the workload.|| +|**containers** `required`|{str:}|Containers defines the templates of containers to be ran.
More info: https://kubernetes.io/docs/concepts/containers|| +|**labels**|{str:str}|Labels are key/value pairs that are attached to the workload.|| +|**replicas**|int|Number of container replicas based on this configuration that should be ran.|| +|**secrets**|{str:[s.Secret](#schema-secret)}|Secrets can be used to store small amount of sensitive data e.g. password, token.|| + + diff --git a/docs/kusion/6-reference/2-modules/1-developer-schemas/internal/container/container.md b/docs/kusion/6-reference/2-modules/1-developer-schemas/internal/container/container.md new file mode 100644 index 00000000..ce170fc6 --- /dev/null +++ b/docs/kusion/6-reference/2-modules/1-developer-schemas/internal/container/container.md @@ -0,0 +1,63 @@ +# container + +## Schema Container + +Container describes how the Application's tasks are expected to be run. Depending on
the replicas parameter 1 or more containers can be created from each template. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**args**|[str]|Arguments to the entrypoint.
Args will overwrite the CMD value set in the Dockfile, otherwise the Docker
image's CMD is used if this is not provided.|| +|**command**|[str]|Entrypoint array. Not executed within a shell.
Command will overwrite the ENTRYPOINT value set in the Dockfile, otherwise the Docker
image's ENTRYPOINT is used if this is not provided.|| +|**dirs**|{str:str}|Collection of volumes mount into the container's filesystem.
The dirs parameter is a dict with the key being the folder name in the container and the value
being the referenced volume.|| +|**env**|{str:str}|List of environment variables to set in the container.
The value of the environment variable may be static text or a value from a secret.|| +|**files**|{str:[FileSpec](#filespec)}|List of files to create in the container.
The files parameter is a dict with the key being the file name in the container and the value
being the target file specification.|| +|**image** `required`|str|Image refers to the Docker image name to run for this container.
More info: https://kubernetes.io/docs/concepts/containers/images|| +|**lifecycle**|[lc.Lifecycle](lifecycle/lifecycle.md#schema-lifecycle)|Lifecycle refers to actions that the management system should take in response to container lifecycle events.|| +|**livenessProbe**|[p.Probe](probe/probe.md#schema-probe)|LivenessProbe indicates if a running process is healthy.
Container will be restarted if the probe fails.|| +|**readinessProbe**|[p.Probe](probe/probe.md#schema-probe)|ReadinessProbe indicates whether an application is available to handle requests.|| +|**resources**|{str:str}|Map of resource requirements the container should run with.
The resources parameter is a dict with the key being the resource name and the value being
the resource value.|| +|**startupProbe**|[p.Probe](probe/probe.md#schema-probe)|StartupProbe indicates that the container has started for the first time.
Container will be restarted if the probe fails.|| +|**workingDir**|str|The working directory of the running process defined in entrypoint.
Default container runtime will be used if this is not specified.|| + +### Examples +```python +import kam.workload.container as c + +web = c.Container { + image: "nginx:latest" + command: ["/bin/sh", "-c", "echo hi"] + env: { + "name": "value" + } + resources: { + "cpu": "2" + "memory": "4Gi" + } +} +``` + +## Schema FileSpec + +FileSpec defines the target file in a Container. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**content**|str|File content in plain text.|| +|**contentFrom**|str|Source for the file content, reference to a secret of configmap value.|| +|**mode** `required`|str|Mode bits used to set permissions on this file, must be an octal value
between 0000 and 0777 or a decimal value between 0 and 511|"0644"| + +### Examples +```python +import kam.workload.container as c + +tmpFile = c.FileSpec { + content: "some file contents" + mode: "0777" +} +``` + + diff --git a/docs/kusion/6-reference/2-modules/1-developer-schemas/internal/container/lifecycle/lifecycle.md b/docs/kusion/6-reference/2-modules/1-developer-schemas/internal/container/lifecycle/lifecycle.md new file mode 100644 index 00000000..91123526 --- /dev/null +++ b/docs/kusion/6-reference/2-modules/1-developer-schemas/internal/container/lifecycle/lifecycle.md @@ -0,0 +1,29 @@ +# lifecycle + +## Schema Lifecycle + +Lifecycle describes actions that the management system should take in response
to container lifecycle events. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**postStart**| | |The action to be taken after a container is created.
More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks|| +|**preStop**| | |The action to be taken before a container is terminated due to an API request or
management event such as liveness/startup probe failure, preemption, resource contention, etc.
More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks|| + +### Examples +```python +import kam.workload.container.probe as p +import kam.workload.container.lifecycle as lc + +lifecycleHook = lc.Lifecycle { + preStop: p.Exec { + command: ["preStop.sh"] + } + postStart: p.Http { + url: "http://localhost:80" + } +} +``` + + diff --git a/docs/kusion/6-reference/2-modules/1-developer-schemas/internal/container/probe/probe.md b/docs/kusion/6-reference/2-modules/1-developer-schemas/internal/container/probe/probe.md new file mode 100644 index 00000000..64d709cd --- /dev/null +++ b/docs/kusion/6-reference/2-modules/1-developer-schemas/internal/container/probe/probe.md @@ -0,0 +1,92 @@ +# probe + +## Schema Probe + +Probe describes a health check to be performed against a container to determine whether it is
alive or ready to receive traffic. There are three probe types: readiness, liveness, and startup. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**failureThreshold**|int|Minimum consecutive failures for the probe to be considered failed after having succeeded.|| +|**initialDelaySeconds**|int|The number of seconds before health checking is activated.
More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes|| +|**periodSeconds**|int|How often (in seconds) to perform the probe.|| +|**probeHandler** `required`|[Exec](#exec) | [Http](#http) | [Tcp](#tcp)|The action taken to determine the alive or health of a container|| +|**successThreshold**|int|Minimum consecutive successes for the probe to be considered successful after having failed.|| +|**terminationGracePeriod**|int|Duration in seconds before terminate gracefully upon probe failure.|| +|**timeoutSeconds**|int|The number of seconds after which the probe times out.
More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes|| + +### Examples +```python +import kam.workload.container.probe as p + +probe = p.Probe { + probeHandler: p.Http { + path: "/healthz" + } + initialDelaySeconds: 10 +} +``` + +## Schema Exec + +Exec describes a "run in container" action. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**command** `required`|[str]|The command line to execute inside the container.|| + +### Examples +```python +import kam.workload.container.probe as p + +execProbe = p.Exec { + command: ["probe.sh"] +} +``` + +## Schema Http + +Http describes an action based on HTTP Get requests. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**headers**|{str:str}|Collection of custom headers to set in the request|| +|**url** `required`|str|The full qualified url to send HTTP requests.|| + +### Examples +```python +import kam.workload.container.probe as p + +httpProbe = p.Http { + url: "http://localhost:80" + headers: { + "X-HEADER": "VALUE" + } +} +``` + +## Schema Tcp + +Tcp describes an action based on opening a socket. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**url** `required`|str|The full qualified url to open a socket.|| + +### Examples +```python +import kam.workload.container.probe as p + +tcpProbe = p.Tcp { + url: "tcp://localhost:1234" +} +``` + + diff --git a/docs/kusion/6-reference/2-modules/1-developer-schemas/internal/secret/secret.md b/docs/kusion/6-reference/2-modules/1-developer-schemas/internal/secret/secret.md new file mode 100644 index 00000000..1f13bb85 --- /dev/null +++ b/docs/kusion/6-reference/2-modules/1-developer-schemas/internal/secret/secret.md @@ -0,0 +1,29 @@ +# secret + +## Schema Secret + +Secrets are used to provide data that is considered sensitive like passwords, API keys,
TLS certificates, tokens or other credentials. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**data**|{str:str}|Data contains the non-binary secret data in string form.|| +|**immutable**|bool|Immutable, if set to true, ensures that data stored in the Secret cannot be updated.|| +|**params**|{str:str}|Collection of parameters used to facilitate programmatic handling of secret data.|| +|**type** `required`|"basic" | "token" | "opaque" | "certificate" | "external"|Type of secret, used to facilitate programmatic handling of secret data.|| + +### Examples +```python +import kam.workload.secret as sec + +basicAuth = sec.Secret { + type: "basic" + data: { + "username": "" + "password": "" + } +} +``` + + diff --git a/docs/kusion/6-reference/2-modules/1-developer-schemas/k8s_manifest/k8s_manifest.md b/docs/kusion/6-reference/2-modules/1-developer-schemas/k8s_manifest/k8s_manifest.md new file mode 100644 index 00000000..3e749af9 --- /dev/null +++ b/docs/kusion/6-reference/2-modules/1-developer-schemas/k8s_manifest/k8s_manifest.md @@ -0,0 +1,30 @@ +# k8s_manifest + +## Schema K8sManifest + +K8sManifest defines the paths of the YAML files, or the directories of the raw Kubernetes manifests, which will be jointly appended to the Resources of Spec. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**paths** `required`|[str]|The paths of the YAML files, or the directories of the raw Kubernetes manifests.|| + +### Examples + +``` +import k8s_manifest + +accessories: { + "k8s_manifest": k8s_manifest.K8sManifest { + paths: [ + # The path of a YAML file. + "/path/to/my/k8s_manifest.yaml", + # The path of a directory containing K8s manifests. + "/dir/to/my/k8s_manifests" + ] + } +} +``` + + diff --git a/docs/kusion/6-reference/2-modules/1-developer-schemas/monitoring/prometheus.md b/docs/kusion/6-reference/2-modules/1-developer-schemas/monitoring/prometheus.md new file mode 100644 index 00000000..bf2e551e --- /dev/null +++ b/docs/kusion/6-reference/2-modules/1-developer-schemas/monitoring/prometheus.md @@ -0,0 +1,24 @@ +# prometheus + +## Schema Prometheus + +Prometheus can be used to define monitoring requirements + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**path**|str|The path to scrape metrics from.|"/metrics"| +|**port**|str|The port to scrape metrics from. When using Prometheus operator, this needs to be the port NAME. Otherwise, this can be a port name or a number.|container ports when scraping pod (monitorType is pod) and service port when scraping service (monitorType is service)| + +### Examples +```python +import monitoring as m + +"monitoring": m.Prometheus { + path: "/metrics" + port: "web" +} +``` + + diff --git a/docs/kusion/6-reference/2-modules/1-developer-schemas/network/network.md b/docs/kusion/6-reference/2-modules/1-developer-schemas/network/network.md new file mode 100644 index 00000000..daa33121 --- /dev/null +++ b/docs/kusion/6-reference/2-modules/1-developer-schemas/network/network.md @@ -0,0 +1,51 @@ +# network + +## Schema Network + +Network defines the exposed port of Service, which can be used to describe how the Service
get accessed. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**ports**|[[Port](#schema-port)]|The list of ports which the Workload should get exposed.|| + +### Examples +```python +import network as n + +"network": n.Network { + ports: [ + n.Port { + port: 80 + public: True + } + ] +} +``` + +## Schema Port + +Port defines the exposed port of Workload, which can be used to describe how the Workload get accessed. + +| name | type | description | default value | +| --- | --- | --- | --- | +|**port** `required`|int|The exposed port of the Workload.|80| +|**protocol** `required`|"TCP" | "UDP"|The protocol to access the port.|"TCP"| +|**public** `required`|bool|Public defines whether the port can be accessed through Internet.|False| +|**targetPort**|int|The backend container port. If empty, set it the same as the port.|| + +### Examples + +```python +import network as n + +port = n.Port { + port: 80 + targetPort: 8080 + protocol: "TCP" + public: True +} +``` + + diff --git a/docs/kusion/6-reference/2-modules/1-developer-schemas/opsrule/opsrule.md b/docs/kusion/6-reference/2-modules/1-developer-schemas/opsrule/opsrule.md new file mode 100644 index 00000000..8313090a --- /dev/null +++ b/docs/kusion/6-reference/2-modules/1-developer-schemas/opsrule/opsrule.md @@ -0,0 +1,35 @@ +# opsrule + +## Schema OpsRule + +OpsRule describes operation rules for various Day-2 Operations. Once declared, these
operation rules will be checked before any Day-2 operations. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**maxUnavailable**|int | str|The maximum percentage of the total pod instances in the component that can be
simultaneously unhealthy.|"25%"| + +```python +import opsrule as o +import kam.v1.app_configuration +import kam.v1.workload as wl +import kam.v1.workload.container as c + +helloworld : ac.AppConfiguration { + workload: service.Service { + containers: { + "nginx": c.Container { + image: "nginx:v1" + } + } + } + accessories: { + "opsrule" : o.OpsRule { + maxUnavailable: "30%" + } + } +} +``` + + diff --git a/docs/kusion/6-reference/2-modules/1-developer-schemas/workload/job.md b/docs/kusion/6-reference/2-modules/1-developer-schemas/workload/job.md new file mode 100644 index 00000000..52194488 --- /dev/null +++ b/docs/kusion/6-reference/2-modules/1-developer-schemas/workload/job.md @@ -0,0 +1,251 @@ +# job + +## Schemas +- [Job](#schema-job) + - [Container](#schema-container) + - [Filespec](#schema-filespec) + - [LifeCycle](#schema-lifecycle) + - [Probe](#schema-probe) + - [Exec](#schema-exec) + - [Http](#schema-http) + - [Tcp](#schema-tcp) + - [Secret](#schema-secret) + +## Schema Job + +Job is a kind of workload profile that describes how to run your application code. This
is typically used for tasks that take from a few seconds to a few days to complete. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**annotations**|{str:str}|Annotations are key/value pairs that attach arbitrary non-identifying metadata to the workload.|| +|**containers** `required`|{str:[Container](../internal/container#schema-container)}|Containers defines the templates of containers to be ran.
More info: https://kubernetes.io/docs/concepts/containers|| +|**labels**|{str:str}|Labels are key/value pairs that are attached to the workload.|| +|**replicas**|int|Number of container replicas based on this configuration that should be ran.|| +|**schedule** `required`|str|The scheduling strategy in Cron format. More info: https://en.wikipedia.org/wiki/Cron.|| +|**secrets**|{str:[Secret](../internal/secret/secret.md#schema-secret)}|Secrets can be used to store small amount of sensitive data e.g. password, token.|| + +### Examples +```python +# Instantiate a job with busybox image and runs every hour + +import kam.workload as wl +import kam.workload.container as c + +echoJob : wl.Job { + containers: { + "busybox": c.Container{ + image: "busybox:1.28" + command: ["/bin/sh", "-c", "echo hello"] + } + } + schedule: "0 * * * *" +} +``` + +### Base Schema +[WorkloadBase](../internal/common#schema-workloadbase) + +## Schema Container + +Container describes how the Application's tasks are expected to be run. Depending on
the replicas parameter 1 or more containers can be created from each template. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**args**|[str]|Arguments to the entrypoint.
Args will overwrite the CMD value set in the Dockfile, otherwise the Docker
image's CMD is used if this is not provided.|| +|**command**|[str]|Entrypoint array. Not executed within a shell.
Command will overwrite the ENTRYPOINT value set in the Dockfile, otherwise the Docker
image's ENTRYPOINT is used if this is not provided.|| +|**dirs**|{str:str}|Collection of volumes mount into the container's filesystem.
The dirs parameter is a dict with the key being the folder name in the container and the value
being the referenced volume.|| +|**env**|{str:str}|List of environment variables to set in the container.
The value of the environment variable may be static text or a value from a secret.|| +|**files**|{str:[FileSpec](#filespec)}|List of files to create in the container.
The files parameter is a dict with the key being the file name in the container and the value
being the target file specification.|| +|**image** `required`|str|Image refers to the Docker image name to run for this container.
More info: https://kubernetes.io/docs/concepts/containers/images|| +|**lifecycle**|[lc.Lifecycle](../internal/container/lifecycle/lifecycle.md#schema-lifecycle)|Lifecycle refers to actions that the management system should take in response to container lifecycle events.|| +|**livenessProbe**|[p.Probe](../internal/container/probe/probe.md#schema-probe)|LivenessProbe indicates if a running process is healthy.
Container will be restarted if the probe fails.|| +|**readinessProbe**|[p.Probe](../internal/container/probe/probe.md#schema-probe)|ReadinessProbe indicates whether an application is available to handle requests.|| +|**resources**|{str:str}|Map of resource requirements the container should run with.
The resources parameter is a dict with the key being the resource name and the value being
the resource value.|| +|**startupProbe**|[p.Probe](../internal/container/probe/probe.md#schema-probe)|StartupProbe indicates that the container has started for the first time.
Container will be restarted if the probe fails.|| +|**workingDir**|str|The working directory of the running process defined in entrypoint.
Default container runtime will be used if this is not specified.|| + +### Examples +```python +import kam.workload.container as c + +web = c.Container { + image: "nginx:latest" + command: ["/bin/sh", "-c", "echo hi"] + env: { + "name": "value" + } + resources: { + "cpu": "2" + "memory": "4Gi" + } +} +``` + +## Schema FileSpec + +FileSpec defines the target file in a Container. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**content**|str|File content in plain text.|| +|**contentFrom**|str|Source for the file content, reference to a secret of configmap value.|| +|**mode** `required`|str|Mode bits used to set permissions on this file, must be an octal value
between 0000 and 0777 or a decimal value between 0 and 511|"0644"| + +### Examples +```python +import kam.workload.container as c + +tmpFile = c.FileSpec { + content: "some file contents" + mode: "0777" +} +``` + +### Schema Lifecycle + +Lifecycle describes actions that the management system should take in response to container lifecycle events. + +#### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**postStart**| | |The action to be taken after a container is created.
More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks|| +|**preStop**| | |The action to be taken before a container is terminated due to an API request or
management event such as liveness/startup probe failure, preemption, resource contention, etc.
More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks|| +#### Examples + +``` +import kam.workload.container.probe as p +import kam.workload.container.lifecycle as lc + +lifecycleHook = lc.Lifecycle { + preStop: p.Exec { + command: ["preStop.sh"] + } + postStart: p.Http { + url: "http://localhost:80" + } +} +``` + +### Schema Exec + +Exec describes a "run in container" action. + +#### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**command** `required`|[str]|The command line to execute inside the container.|| +#### Examples + +``` +import kam.workload.container.probe as p + +execProbe = p.Exec { + command: ["probe.sh"] +} +``` + +### Schema Http + +Http describes an action based on HTTP Get requests. + +#### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**headers**|{str:str}|Collection of custom headers to set in the request|| +|**url** `required`|str|The full qualified url to send HTTP requests.|| +#### Examples + +``` +import kam.workload.container.probe as p + +httpProbe = p.Http { + url: "http://localhost:80" + headers: { + "X-HEADER": "VALUE" + } +} +``` + +### Schema Probe + +Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic. There are three probe types: readiness, liveness, and startup. + +#### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**failureThreshold**|int|Minimum consecutive failures for the probe to be considered failed after having succeeded.|| +|**initialDelaySeconds**|int|The number of seconds before health checking is activated.
More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes|| +|**periodSeconds**|int|How often (in seconds) to perform the probe.|| +|**probeHandler** `required`|[Exec](#exec) | [Http](#http) | [Tcp](#tcp)|The action taken to determine the alive or health of a container|| +|**successThreshold**|int|Minimum consecutive successes for the probe to be considered successful after having failed.|| +|**terminationGracePeriod**|int|Duration in seconds before terminate gracefully upon probe failure.|| +|**timeoutSeconds**|int|The number of seconds after which the probe times out.
More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes|| +#### Examples + +``` +import kam.workload.container.probe as p + +probe = p.Probe { + probeHandler: p.Http { + path: "/healthz" + } + initialDelaySeconds: 10 +} +``` + +### Schema Tcp + +Tcp describes an action based on opening a socket. + +#### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**url** `required`|str|The full qualified url to open a socket.|| +#### Examples + +``` +import kam.workload.container.probe as p + +tcpProbe = p.Tcp { + url: "tcp://localhost:1234" +} +``` + +## Schema Secret + +Secret can be used to store sensitive data. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**data**|{str:str}|Data contains the non-binary secret data in string form.|| +|**immutable**|bool|Immutable, if set to true, ensures that data stored in the Secret cannot be updated.|| +|**params**|{str:str}|Collection of parameters used to facilitate programmatic handling of secret data.|| +|**type** `required`|"basic" | "token" | "opaque" | "certificate" | "external"|Type of secret, used to facilitate programmatic handling of secret data.|| + +### Examples +```python +import kam.workload.secret as sec + +basicAuth = sec.Secret { + type: "basic" + data: { + "username": "" + "password": "" + } +} +``` + + diff --git a/docs/kusion/6-reference/2-modules/1-developer-schemas/workload/service.md b/docs/kusion/6-reference/2-modules/1-developer-schemas/workload/service.md new file mode 100644 index 00000000..8dc74ccf --- /dev/null +++ b/docs/kusion/6-reference/2-modules/1-developer-schemas/workload/service.md @@ -0,0 +1,248 @@ +# service + +## Schemas +- [Service](#schema-service) + - [Container](#schema-container) + - [Filespec](#schema-filespec) + - [LifeCycle](#schema-lifecycle) + - [Probe](#schema-probe) + - [Exec](#schema-exec) + - [Http](#schema-http) + - [Tcp](#schema-tcp) + - [Secret](#schema-secret) + +## Schema Service + +Service is a kind of workload profile that describes how to run your application code. This
is typically used for long-running web applications that should "never" go down, and handle
short-lived latency-sensitive web requests, or events. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**annotations**|{str:str}|Annotations are key/value pairs that attach arbitrary non-identifying metadata to the workload.|| +|**containers** `required`|{str:}|Containers defines the templates of containers to be ran.
More info: https://kubernetes.io/docs/concepts/containers|| +|**labels**|{str:str}|Labels are key/value pairs that are attached to the workload.|| +|**replicas**|int|Number of container replicas based on this configuration that should be ran.|| +|**secrets**|{str:[Secret](../internal/secret/secret.md#schema-secret)}|Secrets can be used to store small amount of sensitive data e.g. password, token.|| + +### Examples +```python +# Instantiate a long-running service and its image is "nginx:v1" + +import kam.workload as wl +import kam.workload.container as c + +nginxSvc : service.Service { + containers: { + "nginx": c.Container { + image: "nginx:v1" + } + } +} +``` + +### Base Schema +[WorkloadBase](../internal/common#schema-workloadbase) + +## Schema Container + +Container describes how the Application's tasks are expected to be run. Depending on
the replicas parameter 1 or more containers can be created from each template. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**args**|[str]|Arguments to the entrypoint.
Args will overwrite the CMD value set in the Dockfile, otherwise the Docker
image's CMD is used if this is not provided.|| +|**command**|[str]|Entrypoint array. Not executed within a shell.
Command will overwrite the ENTRYPOINT value set in the Dockfile, otherwise the Docker
image's ENTRYPOINT is used if this is not provided.|| +|**dirs**|{str:str}|Collection of volumes mount into the container's filesystem.
The dirs parameter is a dict with the key being the folder name in the container and the value
being the referenced volume.|| +|**env**|{str:str}|List of environment variables to set in the container.
The value of the environment variable may be static text or a value from a secret.|| +|**files**|{str:[FileSpec](#filespec)}|List of files to create in the container.
The files parameter is a dict with the key being the file name in the container and the value
being the target file specification.|| +|**image** `required`|str|Image refers to the Docker image name to run for this container.
More info: https://kubernetes.io/docs/concepts/containers/images|| +|**lifecycle**|[lc.Lifecycle](../internal/container/lifecycle/lifecycle.md#schema-lifecycle)|Lifecycle refers to actions that the management system should take in response to container lifecycle events.|| +|**livenessProbe**|[p.Probe](../internal/container/probe/probe.md#schema-probe)|LivenessProbe indicates if a running process is healthy.
Container will be restarted if the probe fails.|| +|**readinessProbe**|[p.Probe](../internal/container/probe/probe.md#schema-probe)|ReadinessProbe indicates whether an application is available to handle requests.|| +|**resources**|{str:str}|Map of resource requirements the container should run with.
The resources parameter is a dict with the key being the resource name and the value being
the resource value.|| +|**startupProbe**|[p.Probe](../internal/container/probe/probe.md#schema-probe)|StartupProbe indicates that the container has started for the first time.
Container will be restarted if the probe fails.|| +|**workingDir**|str|The working directory of the running process defined in entrypoint.
Default container runtime will be used if this is not specified.|| + +### Examples +```python +import kam.workload.container as c + +web = c.Container { + image: "nginx:latest" + command: ["/bin/sh", "-c", "echo hi"] + env: { + "name": "value" + } + resources: { + "cpu": "2" + "memory": "4Gi" + } +} +``` + +## Schema FileSpec + +FileSpec defines the target file in a Container. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**content**|str|File content in plain text.|| +|**contentFrom**|str|Source for the file content, reference to a secret of configmap value.|| +|**mode** `required`|str|Mode bits used to set permissions on this file, must be an octal value
between 0000 and 0777 or a decimal value between 0 and 511|"0644"| + +### Examples +```python +import kam.workload.container as c + +tmpFile = c.FileSpec { + content: "some file contents" + mode: "0777" +} +``` + +### Schema Lifecycle + +Lifecycle describes actions that the management system should take in response to container lifecycle events. + +#### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**postStart**| | |The action to be taken after a container is created.
More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks|| +|**preStop**| | |The action to be taken before a container is terminated due to an API request or
management event such as liveness/startup probe failure, preemption, resource contention, etc.
More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks|| +#### Examples + +``` +import kam.workload.container.probe as p +import kam.workload.container.lifecycle as lc + +lifecycleHook = lc.Lifecycle { + preStop: p.Exec { + command: ["preStop.sh"] + } + postStart: p.Http { + url: "http://localhost:80" + } +} +``` + +### Schema Exec + +Exec describes a "run in container" action. + +#### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**command** `required`|[str]|The command line to execute inside the container.|| +#### Examples + +``` +import kam.workload.container.probe as p + +execProbe = p.Exec { + command: ["probe.sh"] +} +``` + +### Schema Http + +Http describes an action based on HTTP Get requests. + +#### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**headers**|{str:str}|Collection of custom headers to set in the request|| +|**url** `required`|str|The full qualified url to send HTTP requests.|| +#### Examples + +``` +import kam.workload.container.probe as p + +httpProbe = p.Http { + url: "http://localhost:80" + headers: { + "X-HEADER": "VALUE" + } +} +``` + +### Schema Probe + +Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic. There are three probe types: readiness, liveness, and startup. + +#### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**failureThreshold**|int|Minimum consecutive failures for the probe to be considered failed after having succeeded.|| +|**initialDelaySeconds**|int|The number of seconds before health checking is activated.
More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes|| +|**periodSeconds**|int|How often (in seconds) to perform the probe.|| +|**probeHandler** `required`|[Exec](#exec) | [Http](#http) | [Tcp](#tcp)|The action taken to determine the alive or health of a container|| +|**successThreshold**|int|Minimum consecutive successes for the probe to be considered successful after having failed.|| +|**terminationGracePeriod**|int|Duration in seconds before terminate gracefully upon probe failure.|| +|**timeoutSeconds**|int|The number of seconds after which the probe times out.
More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes|| +#### Examples + +``` +import kam.workload.container.probe as p + +probe = p.Probe { + probeHandler: p.Http { + path: "/healthz" + } + initialDelaySeconds: 10 +} +``` + +### Schema Tcp + +Tcp describes an action based on opening a socket. + +#### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**url** `required`|str|The full qualified url to open a socket.|| +#### Examples + +``` +import kam.workload.container.probe as p + +tcpProbe = p.Tcp { + url: "tcp://localhost:1234" +} +``` + +## Schema Secret + +Secret can be used to store sensitive data. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**data**|{str:str}|Data contains the non-binary secret data in string form.|| +|**immutable**|bool|Immutable, if set to true, ensures that data stored in the Secret cannot be updated.|| +|**params**|{str:str}|Collection of parameters used to facilitate programmatic handling of secret data.|| +|**type** `required`|"basic" | "token" | "opaque" | "certificate" | "external"|Type of secret, used to facilitate programmatic handling of secret data.|| + +### Examples +```python +import kam.workload.secret as sec + +basicAuth = sec.Secret { + type: "basic" + data: { + "username": "" + "password": "" + } +} +``` + + diff --git a/docs/kusion/6-reference/2-modules/2-workspace-configs/_category_.json b/docs/kusion/6-reference/2-modules/2-workspace-configs/_category_.json new file mode 100644 index 00000000..81444988 --- /dev/null +++ b/docs/kusion/6-reference/2-modules/2-workspace-configs/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Workspace Configs" +} \ No newline at end of file diff --git a/docs/kusion/6-reference/2-modules/2-workspace-configs/database/mysql.md b/docs/kusion/6-reference/2-modules/2-workspace-configs/database/mysql.md new file mode 100644 index 00000000..66225f5b --- /dev/null +++ b/docs/kusion/6-reference/2-modules/2-workspace-configs/database/mysql.md @@ -0,0 +1,52 @@ +# mysql + +## Module MySQL + +MySQL describes the attributes to locally deploy or create a cloud provider managed mysql database instance for the workload. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**cloud**
Cloud specifies the type of the cloud vendor. |"aws" \| "alicloud"|Undefined|**required**| +|**username**
Username specifies the operation account for the mysql database. |str|"root"|optional| +|**category**
Category specifies the edition of the mysql instance provided by the cloud vendor. |str|"Basic"|optional| +|**securityIPs**
SecurityIPs specifies the list of IP addresses allowed to access the mysql instance provided by the cloud vendor. |[str]|["0.0.0.0/0"]|optional| +|**privateRouting**
PrivateRouting specifies whether the host address of the cloud mysql instance for the workload to connect with is via public network or private network of the cloud vendor. |bool|true|optional| +|**size**
Size specifies the allocated storage size of the mysql instance. |int|10|optional| +|**subnetID**
SubnetID specifies the virtual subnet ID associated with the VPC that the cloud mysql instance will be created in. |str|Undefined|optional| +|**databaseName**
databaseName specifies the database name. |str|Undefined|optional| + +### Examples + +```yaml +# MySQL workspace configs for AWS RDS +modules: + mysql: + path: oci://ghcr.io/kusionstack/mysql + version: 0.2.0 + configs: + default: + cloud: aws + size: 20 + instanceType: db.t3.micro + privateRouting: false + databaseName: "my-mysql" +``` + +```yaml +# MySQL workspace configs for Alicloud RDS +modules: + mysql: + path: oci://ghcr.io/kusionstack/mysql + version: 0.2.0 + configs: + default: + cloud: alicloud + size: 20 + instanceType: mysql.n2.serverless.1c + category: serverless_basic + privateRouting: false + subnetID: [your-subnet-id] + databaseName: "my-mysql" +``` \ No newline at end of file diff --git a/docs/kusion/6-reference/2-modules/2-workspace-configs/database/postgres.md b/docs/kusion/6-reference/2-modules/2-workspace-configs/database/postgres.md new file mode 100644 index 00000000..aed20616 --- /dev/null +++ b/docs/kusion/6-reference/2-modules/2-workspace-configs/database/postgres.md @@ -0,0 +1,55 @@ +# postgres + +## Module PostgreSQL + +PostgreSQL describes the attributes to locally deploy or create a cloud provider managed postgres database instance for the workload. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**cloud**
Cloud specifies the type of the cloud vendor. |"aws" \| "alicloud"|Undefined|**required**| +|**username**
Username specifies the operation account for the postgres database. |str|"root"|optional| +|**category**
Category specifies the edition of the postgres instance provided by the cloud vendor. |str|"Basic"|optional| +|**securityIPs**
SecurityIPs specifies the list of IP addresses allowed to access the postgres instance provided by the cloud vendor. |[str]|["0.0.0.0/0"]|optional| +|**privateRouting**
PrivateRouting specifies whether the host address of the cloud postgres instance for the workload to connect with is via public network or private network of the cloud vendor. |bool|true|optional| +|**size**
Size specifies the allocated storage size of the postgres instance. |int|10|optional| +|**subnetID**
SubnetID specifies the virtual subnet ID associated with the VPC that the cloud postgres instance will be created in. |str|Undefined|optional| +|**databaseName**
databaseName specifies the database name. |str|Undefined|optional| + +### Examples + +```yaml +# PostgreSQL workspace configs for AWS RDS +modules: + postgres: + path: oci://ghcr.io/kusionstack/postgres + version: 0.2.0 + configs: + default: + cloud: aws + size: 20 + instanceType: db.t3.micro + securityIPs: + - 0.0.0.0/0 + databaseName: "my-postgres" +``` + +```yaml +# PostgreSQL workspace configs for Alicloud RDS +modules: + postgres: + path: oci://ghcr.io/kusionstack/postgres + version: 0.2.0 + configs: + default: + cloud: alicloud + size: 20 + instanceType: pg.n2.serverless.1c + category: serverless_basic + privateRouting: false + subnetID: [your-subnet-id] + securityIPs: + - 0.0.0.0/0 + databaseName: "my-postgres" +``` \ No newline at end of file diff --git a/docs/kusion/6-reference/2-modules/2-workspace-configs/inference/inference.md b/docs/kusion/6-reference/2-modules/2-workspace-configs/inference/inference.md new file mode 100644 index 00000000..4cdb853a --- /dev/null +++ b/docs/kusion/6-reference/2-modules/2-workspace-configs/inference/inference.md @@ -0,0 +1,50 @@ +# inference + +## Index + +- v1 + - [Inference](#inference) + +## Schemas + +### Inference + +Inference is a module schema consisting of model, framework and so on + +#### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**framework** `required`|"Ollama" \| "KubeRay"|The framework or environment in which the model operates.|| +|**model** `required`|str|The model name to be used for inference.|| +|**num_ctx**|int|The size of the context window used to generate the next token.|2048| +|**num_predict**|int|Maximum number of tokens to predict when generating text.|128| +|**system**|str|The system message, which will be set in the template.|""| +|**temperature**|float|A parameter determines whether the model's output is more random and creative or more predictable.|0.8| +|**template**|str|The full prompt template, which will be sent to the model.|""| +|**top_k**|int|A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.|40| +|**top_p**|float|A higher value (e.g. 0.9) will give more diverse answers, while a lower value (e.g. 0.5) will be more conservative.|0.9| +#### Examples + +``` +import inference.v1.infer + +accessories: { + "inference@v0.1.0": infer.Inference { + model: "llama3" + framework: "Ollama" + + system: "You are Mario from super mario bros, acting as an assistant." + template: "{{ if .System }}<|im_start|>system {{ .System }}<|im_end|> {{ end }}{{ if .Prompt }}<|im_start|>user {{ .Prompt }}<|im_end|> {{ end }}<|im_start|>assistant" + + top_k: 40 + top_p: 0.9 + temperature: 0.8 + + num_predict: 128 + num_ctx: 2048 + } +} +``` + + diff --git a/docs/kusion/6-reference/2-modules/2-workspace-configs/k8s_manifest/k8s_manifest.md b/docs/kusion/6-reference/2-modules/2-workspace-configs/k8s_manifest/k8s_manifest.md new file mode 100644 index 00000000..ab960c65 --- /dev/null +++ b/docs/kusion/6-reference/2-modules/2-workspace-configs/k8s_manifest/k8s_manifest.md @@ -0,0 +1,25 @@ +# k8s_manifest + +## Module K8sManifest + +K8sManifest defines the paths of the YAML files, or the directories of the raw Kubernetes manifests, which will be jointly appended to the Resources of Spec. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**paths**
The paths of the YAML files, or the directories of the raw Kubernetes manifests. |[str]|Undefined|**optional**| + +### Examples + +```yaml +modules: + k8s_manifest: + path: oci://ghcr.io/kusionstack/k8s_manifest + version: 0.1.0 + configs: + default: + paths: + - /path/to/k8s_manifest.yaml + - /dir/to/k8s_manifest/ +``` \ No newline at end of file diff --git a/docs/kusion/6-reference/2-modules/2-workspace-configs/monitoring/prometheus.md b/docs/kusion/6-reference/2-modules/2-workspace-configs/monitoring/prometheus.md new file mode 100644 index 00000000..55628423 --- /dev/null +++ b/docs/kusion/6-reference/2-modules/2-workspace-configs/monitoring/prometheus.md @@ -0,0 +1,43 @@ +# monitoring + +`monitoring` can be used to define workspace-level monitoring configurations. + +## Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**operatorMode**
Whether the Prometheus instance installed in the cluster runs as a Kubernetes operator or not. This determines the different kinds of resources Kusion manages.|true \| false|false|optional| +|**monitorType**
The kind of monitor to create. It only applies when operatorMode is set to True.|"Service" \| "Pod"|"Service"|optional| +|**interval**
The time interval which Prometheus scrapes metrics data. Only applicable when operator mode is set to true.
When operator mode is set to false, the scraping interval can only be set in the scraping job configuration, which kusion does not have permission to manage directly.|str|30s|optional| +|**timeout**
The timeout when Prometheus scrapes metrics data. Only applicable when operator mode is set to true.
When operator mode is set to false, the scraping timeout can only be set in the scraping job configuration, which kusion does not have permission to manage directly.|str|15s|optional| +|**scheme**
The scheme to scrape metrics from. Possible values are http and https.|"http" \| "https"|http|optional| + +### Examples +```yaml +modules: + monitoring: + path: oci://ghcr.io/kusionstack/monitoring + version: 0.2.0 + configs: + default: + operatorMode: True + monitorType: Pod + scheme: http + interval: 30s + timeout: 15s + low_frequency: + operatorMode: False + interval: 2m + timeout: 1m + projectSelector: + - foo + - bar + high_frequency: + monitorType: Service + interval: 10s + timeout: 5s + projectSelector: + - helloworld + - wordpress + - prometheus-sample-app +``` \ No newline at end of file diff --git a/docs/kusion/6-reference/2-modules/2-workspace-configs/networking/network.md b/docs/kusion/6-reference/2-modules/2-workspace-configs/networking/network.md new file mode 100644 index 00000000..05609acc --- /dev/null +++ b/docs/kusion/6-reference/2-modules/2-workspace-configs/networking/network.md @@ -0,0 +1,26 @@ +# network + +`network` can be used to define workspace-level networking configurations. + +## Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**type**
The specific cloud vendor that provides load balancer.| "alicloud" \| "aws"|Undefined|**required**| +| **labels**
The attached labels of the port.|{str:str}|Undefined|optional| +| **annotations**
The attached annotations of the port.|{str:str}|Undefined|optional| + +### Examples + +```yaml +modules: + path: oci://ghcr.io/kusionstack/network + version: 0.2.0 + configs: + default: + type: alicloud + labels: + kusionstack.io/control: "true" + annotations: + service.beta.kubernetes.io/alibaba-cloud-loadbalancer-spec: slb.s1.small +``` \ No newline at end of file diff --git a/docs/kusion/6-reference/2-modules/2-workspace-configs/opsrule/opsrule.md b/docs/kusion/6-reference/2-modules/2-workspace-configs/opsrule/opsrule.md new file mode 100644 index 00000000..0c3d29c1 --- /dev/null +++ b/docs/kusion/6-reference/2-modules/2-workspace-configs/opsrule/opsrule.md @@ -0,0 +1,22 @@ +# opsrule + +`opsrule` can be used to define workspace-level operational rule configurations. + +## Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**maxUnavailable**
The maximum percentage of the total pod instances in the component that can be
simultaneously unhealthy.|int \| str|Undefined|optional| + + +### Examples + +```yaml +modules: + opsrule: + path: oci://ghcr.io/kusionstack/opsrule + version: 0.2.0 + configs: + default: + maxUnavailable: "40%" +``` \ No newline at end of file diff --git a/docs/kusion/6-reference/2-modules/2-workspace-configs/workload/job.md b/docs/kusion/6-reference/2-modules/2-workspace-configs/workload/job.md new file mode 100644 index 00000000..da659136 --- /dev/null +++ b/docs/kusion/6-reference/2-modules/2-workspace-configs/workload/job.md @@ -0,0 +1,26 @@ +# job + +`job` can be used to define workspace-level job configuration. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +| **replicas**
Number of container replicas based on this configuration that should be ran. |int|2| optional | +| **labels**
Labels are key/value pairs that are attached to the workload. |{str: str}|Undefined| optional | +| **annotations**
Annotations are key/value pairs that attach arbitrary non-identifying metadata to the workload. |{str: str}|Undefined| optional | + +### Examples +```yaml +modules: + job: + path: oci://ghcr.io/kusionstack/job + version: 0.1.0 + configs: + default: + replicas: 3 + labels: + label-key: label-value + annotations: + annotation-key: annotation-value +``` \ No newline at end of file diff --git a/docs/kusion/6-reference/2-modules/2-workspace-configs/workload/service.md b/docs/kusion/6-reference/2-modules/2-workspace-configs/workload/service.md new file mode 100644 index 00000000..9c76a44c --- /dev/null +++ b/docs/kusion/6-reference/2-modules/2-workspace-configs/workload/service.md @@ -0,0 +1,28 @@ +# service + +`service` can be used to define workspace-level service configuration. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +| **replicas**
Number of container replicas based on this configuration that should be ran. |int|2| optional | +| **labels**
Labels are key/value pairs that are attached to the workload. |{str: str}|Undefined| optional | +| **annotations**
Annotations are key/value pairs that attach arbitrary non-identifying metadata to the workload. |{str: str}|Undefined| optional | +| **type**
Type represents the type of workload used by this Service. Currently, it supports several
types, including Deployment and CollaSet. |"Deployment" \| "CollaSet"| Deployment |**required**| + +### Examples +```yaml +modules: + service: + path: oci://ghcr.io/kusionstack/service + version: 0.2.0 + configs: + default: + replicas: 3 + labels: + label-key: label-value + annotations: + annotation-key: annotation-value + type: CollaSet +``` \ No newline at end of file diff --git a/docs/kusion/6-reference/2-modules/3-naming-conventions.md b/docs/kusion/6-reference/2-modules/3-naming-conventions.md new file mode 100644 index 00000000..ab7f668c --- /dev/null +++ b/docs/kusion/6-reference/2-modules/3-naming-conventions.md @@ -0,0 +1,34 @@ +--- +id: naming-conventions +sidebar_label: Resource Naming Conventions +--- + +# Resource Naming Conventions + +Kusion will automatically create Kubernetes or Terraform resources for the applications, many of which do not require users' awareness. This document will introduce the naming conventions for these related resources. + +## Kubernetes Resources + +Kusion adheres to specific rules when generating the Kubernetes resources for users' applications. The table below lists some common Kubernetes resource naming conventions. Note that `Namespace` can now be specified by users. + +| Resource | Concatenation Rule | Example ID | +| -------- | ------------------ | ---------- | +| Namespace | `` | v1:Namespace:wordpress-local-db | +| Deployment | ``-``-`` | apps/v1:Deployment:wordpress-local-db:wordpress-local-db-dev-wordpress | +| CronJob | ``-``-`` | batch/v1:CronJob:helloworld:helloworld-dev-helloworld | +| Service | ``-``-``-` or ` | v1:Service:helloworld:helloworld-dev-helloworld-public | + +## Terraform Resources + +Similarly, Kusion also adheres to specific naming conventions when generating the Terraform Resources. Some common resources are listed below. + +| Resource | Concatenation Rule | Example ID | +| -------- | ------------------ | ---------- | +| random_password | ``-`` | hashicorp:random:random_password:wordpress-db-mysql | +| aws_security_group | ``-`` | hashicorp:aws:aws_security_group:wordpress-db-mysql | +| aws_db_instance | `` | hashicorp:aws:aws_db_instance:wordpress-db | +| alicloud_db_instance | `` | aliyun:alicloud:alicloud_db_instance:wordpress-db | +| alicloud_db_connection | `` | aliyun:alicloud:alicloud_db_connection:wordpress | +| alicloud_rds_account | `` | aliyun:alicloud:alicloud_rds_account:wordpress | + +The `` is composed of two parts, one of which is the `key` of database declared in `AppConfiguration` and the other is the `suffix` declared in `workspace` configuration. Kusion will concatenate the database key and suffix, convert them to uppercase, and replace `-` with `_`. And the `` supported now includes `mysql` and `postgres`. diff --git a/docs/kusion/6-reference/2-modules/_category_.json b/docs/kusion/6-reference/2-modules/_category_.json new file mode 100644 index 00000000..4dadaa75 --- /dev/null +++ b/docs/kusion/6-reference/2-modules/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Kusion Modules" +} diff --git a/docs/kusion/6-reference/2-modules/index.md b/docs/kusion/6-reference/2-modules/index.md new file mode 100644 index 00000000..744892c4 --- /dev/null +++ b/docs/kusion/6-reference/2-modules/index.md @@ -0,0 +1,45 @@ +# Kusion Modules + +KusionStack presets application configuration models described by KCL, where the model is called **Kusion Model**. The GitHub repository [KusionStack/catalog](https://github.com/KusionStack/catalog) is used to store these models, which is known as **Kusion Model Library**. + +The original intention of designing Kusion Model is to enhance the efficiency and improve the experience of YAML users. Through the unified application model defined by code, abstract and encapsulate complex configuration items, omit repetitive and derivable configurations, and supplement with necessary verification logic. Only the necessary attributes get exposed, users get an out-of-the-box, easy-to-understand configuration interface, which reduces the difficulty and improves the reliability of the configuration work. + +Kusion Model Library currently provides the Kusion Model `AppConfiguration`. The design of `AppConfiguration` is developer-centric, based on Ant Group's decades of practice in building and managing hyperscale IDP (Internal Developer Platform), and the best practice of community. `AppConfiguration` describes the full lifecycle of an application. + +A simple example of using `AppConfiguration` to describe an application is as follows: + +```bash +wordpress: ac.AppConfiguration { + workload: service.Service { + containers: { + "wordpress": c.Container { + image: "wordpress:latest" + env: { + "WORDPRESS_DB_HOST": "secret://wordpress-db/hostAddress" + "WORDPRESS_DB_PASSWORD": "secret://wordpress-db/password" + } + resources: { + "cpu": "1" + "memory": "2Gi" + } + } + } + replicas: 2 + ports: [ + n.Port { + port: 80 + public: True + } + ] + } + + database: db.Database { + type: "alicloud" + engine: "MySQL" + version: "5.7" + size: 20 + instanceType: "mysql.n2.serverless.1c" + category: "serverless_basic" + } +} +``` \ No newline at end of file diff --git a/docs/kusion/6-reference/3-roadmap.md b/docs/kusion/6-reference/3-roadmap.md new file mode 100644 index 00000000..f411009e --- /dev/null +++ b/docs/kusion/6-reference/3-roadmap.md @@ -0,0 +1,15 @@ +# Roadmap + +For a finer-grained view of our roadmap and what is being worked on for a release, please refer to the [Roadmap](https://github.com/orgs/KusionStack/projects/24) + +## Expand Kusion Module Ecosystem to meet more scenarios + +We plan to expand the range of Kusion modules. This includes not only cloud services but also popular cloud-native projects such as Prometheus, Backstage, Crossplane, etc. By leveraging the ecosystem of CNCF projects and Terraform providers, we aim to enrich the Kusion module ecosystem to meet more scenarios. + +## LLM (Large Language Models) Operation + +Kusion is essentially designed to tackle team collaboration challenges. The LLM operations also involve many collaborative tasks. We believe Kusion can boost the operational efficiency of LLM engineers in this setting as well. + +## Kusion Server + +Currently, Kusion is a command-line tool, which has its pros and cons. Through our discussions with community users, we‘ve discovered that some of them prefer a long-running service with a web portal. We’re planning to build this form of Kusion, and have already started developing some features. diff --git a/docs/kusion/6-reference/_category_.json b/docs/kusion/6-reference/_category_.json new file mode 100644 index 00000000..a3b4dd92 --- /dev/null +++ b/docs/kusion/6-reference/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Reference" +} diff --git a/docs/kusion/7-faq/1-install-error.md b/docs/kusion/7-faq/1-install-error.md new file mode 100644 index 00000000..a0fde76a --- /dev/null +++ b/docs/kusion/7-faq/1-install-error.md @@ -0,0 +1,39 @@ +--- +sidebar_position: 1 +--- + +# Installation + +## 1. Could not find `libintl.dylib` + +This problem is that some tools depends on the `Gettext` library, but macOS does not have this library by default. You can try to solve it in the following ways: + +1. (Skip this step for non-macOS m1) For macOS m1 operating system, make sure you have a homebrew arm64e-version installed in /opt/homebrew, otherwise install the arm version of brew with the following command + +``` +/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" +# add to path +export PATH=/opt/homebrew/bin:$PATH +``` + +2. `brew install gettext` +3. Make sure `libintl.8.dylib` exists in `/usr/local/opt/gettext/lib` directory +4. If brew is installed in another directory, the library can be created by copying it to the corresponding directory + +## 2. macOS system SSL related errors + +Openssl dylib library not found or SSL module is not available problem + +1. (Skip this step for non-macOS m1) For macOS m1 operating system, make sure you have a homebrew arm64e-version installed in /opt/homebrew, otherwise install the arm version of brew with the following command + +``` +/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" +# add to path +export PATH=/opt/homebrew/bin:$PATH +``` + +2. Install openssl (version 1.1) via brew + +``` +brew install openssl@1.1 +``` diff --git a/docs/kusion/7-faq/2-kcl.md b/docs/kusion/7-faq/2-kcl.md new file mode 100644 index 00000000..596aa881 --- /dev/null +++ b/docs/kusion/7-faq/2-kcl.md @@ -0,0 +1,7 @@ +--- +sidebar_position: 2 +--- + +# KCL + +Visit the [KCL website](https://kcl-lang.io/docs/user_docs/support/faq-kcl) for more documents. \ No newline at end of file diff --git a/docs/kusion/7-faq/_category_.json b/docs/kusion/7-faq/_category_.json new file mode 100644 index 00000000..7c4b229f --- /dev/null +++ b/docs/kusion/7-faq/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "FAQ" +} diff --git a/docs/reference/_category_.json b/docs/reference/_category_.json deleted file mode 100644 index 1b337d5a..00000000 --- a/docs/reference/_category_.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "label": "Reference", - "position": 5 -} diff --git a/docs/reference/cli/index.md b/docs/reference/cli/index.md deleted file mode 100644 index 0486c6ee..00000000 --- a/docs/reference/cli/index.md +++ /dev/null @@ -1,3 +0,0 @@ -# Command Line Tools - -The KusionStack provides kusion tools, KCL language tools, and OpenAPI supporting tools. Through these tools, combined with Config, Provider and other capabilities, it provides a complete set of solutions including configuration language, model interface, automation tools, and best practices. diff --git a/docs/reference/cli/kcl/_category_.json b/docs/reference/cli/kcl/_category_.json deleted file mode 100644 index 4c605bd0..00000000 --- a/docs/reference/cli/kcl/_category_.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "label": "KCL Tools", - "position": 2 -} diff --git a/docs/reference/cli/kcl/docgen.md b/docs/reference/cli/kcl/docgen.md deleted file mode 100644 index 249c7fa4..00000000 --- a/docs/reference/cli/kcl/docgen.md +++ /dev/null @@ -1,311 +0,0 @@ ---- -sidebar_position: 5 ---- -# Docgen - -The KCL Docgen tool supports extracting model documents from KCL source code and supports multiple output formats: JSON, YAML and Markdown. This article introduces the document specification of the KCL language, gives an example of how to use the KCL Docgen tool to extract documents, and shows the process of importing localization documents. - -## 1. Document Specification of KCL - -The documentation of the KCL file mainly contains the following two parts: - -* Current KCL Module document: description of the current KCL file -* All schema documents contained in the KCL file: a description of the current schema, including schema description, schema attribute descriptions, and Examples. The specific format is as follows: - -1. Schema description - - ```python - """This is a brief description of the Schema - """ - ``` - -2. Description of each attribute of Schema: including attribute description, attribute type, default value, optional or required - - ```python - """ - Attributes - ---------- - x : type, default is a, optional. - Description of parameter `x`. - y : type, default is b, required. - Description of parameter `y`. - """ - ``` - - `----------` indicates that `Attributes` is a title (the length of the symbol `-` is the same as the length of the title), the attribute name and attribute type are separated by a colon `:`, the description of the attribute is written on another line with indentation. The default value of the attribute is separated by a comma `,` after the attribute type, and it is written in the form of `default is {default value}`. In addition, it is necessary to indicate whether the attribute is optional/required. Write `optional` after the default value for an optional attribute, and write `required` after the default value for a required attribute. - -3. Examples - - ```python - """ - Examples - -------- - val = Schema { - name = "Alice" - age = 18 - } - """ - ``` - -In addition, the KCL docstring syntax should use a subset of the [re-structured text (reST)](https://docutils.sourceforge.io/rst.html) and be rendered using the [Sphinx](https://www.sphinx-doc.org/en/master/). - -## 2. Generating Documentation From KCL - -Use the `kcl-doc generate` command to extract documentation from a user-specified file or directory and output it to the specified directory. - -1. Args - - ``` - usage: kcl-doc generate [-h] [--format YAML] [-o OUTPUT] [--r] - [--i18n-locale LOCALE] [--repo-url REPO_URL] - [files [files ...]] - - positional arguments: - files KCL file paths. If there's more than one files to - generate, separate them by space - - optional arguments: - -h, --help show this help message and exit - --format YAML Doc file format, support YAML, JSON and MARKDOWN. - Defaults to MARKDOWN - -o OUTPUT, --output-path OUTPUT - Specify the output directory. Defaults to ./kcl_doc - --r, -R, --recursive Search directory recursively - --i18n-locale LOCALE I18n locale, e.g.: zh, zh_cn, en, en_AS. Defaults to - en - --repo-url REPO_URL The source code repository url. It will displayed in - the generated doc to link to the source code. - ``` - -2. Extract documents from the file(s) and output them to the specified directory - - ```text - kcl-doc generate your_config.k your_another_config.k -o your_docs_output_dir - ``` - -3. From the specified directory, recursively find the KCL file(s) and extract the documentation - - ```text - kcl-doc generate your_config_dir -r -o your_docs_output_dir - ``` - -4. When generating documentation, specify the source code repository address. The generated documentation will contain links to source files - - ```text - kcl-doc generate your_config.k -o your_docs_output_dir --repo-url https://url/to/source_code - ``` - -## 3. Add Documentation for Localized Languages - -As shown before, by default, the documentation extracted by the documentation generation tool is based on the content of the source docstring, and thus the language of the documentation depends on the language in which the docstring was written. If you need to add localized language documentation to the source file, you can follow the steps below: - -1. Initialize the i18n configuration file. This step generates the corresponding i18n configuration file based on the specified KCL file. The file format can be JSON/YAML, and the default is YAML. The output profile name will end in the specified target localization language - - ```text - kcl-doc init-i18n your_config.k --format JSON --i18n-locale your_target_locale - ``` - -2. Modify the i18n configuration file and update each doc field in your locale language - -3. Generate localized documents from the modified i18n configuration file - - ```text - kcl-doc generate your_config_dir --i18n-locale your_target_locale --format Markdown - ``` - - Next, a simple example is used to demonstrate the process of adding localized language documents. - - 3.1 Prepare the KCL file, such as `server.k`: - - ```python - schema Server: - """Server is the common user interface for long-running - services adopting the best practice of Kubernetes. - - Attributes - ---------- - workloadType : str, default is "Deployment", required - Use this attribute to specify which kind of long-running service you want. - Valid values: Deployment, CafeDeployment. - See also: kusion_models/core/v1/workload_metadata.k. - name : str, required - A Server-level attribute. - The name of the long-running service. - See also: kusion_models/core/v1/metadata.k. - labels : {str:str}, optional - A Server-level attribute. - The labels of the long-running service. - See also: kusion_models/core/v1/metadata.k. - - Examples - ---------------------- - myCustomApp = AppConfiguration { - name = "componentName" - } - """ - - workloadType: str = "Deployment" - name: str - labels?: {str: str} - ``` - - 3.2 Get the initialized i18n configuration file from the `server.k`. For example, if you want to add Chinese documents to it, specify the format of the generated configuration file as YAML - - ```text - kcl init-i18n server.k --format YAML --i18n-locale zh_cn - ``` - - This command will create the directory `kcl_doc` under the current directory and generate the i18n configuration file `kcl_doc/i18n_server_zh_cn.yaml`. Its contents are as follows: - - ```yaml - name: server - relative_path: ./server.k - schemas: - - name: Server - doc: | - Server is the common user interface for long-running - services adopting the best practice of Kubernetes. - attributes: - - name: workloadType - doc: | - Use this attribute to specify which kind of long-running service you want. - Valid values: Deployment, CafeDeployment. - See also: kusion_models/core/v1/workload_metadata.k. - type: - type_str: str - type_category: BUILTIN - builtin_type: STRING - default_value: '"Deployment"' - is_optional: false - - name: name - doc: | - A Server-level attribute. - The name of the long-running service. - See also: kusion_models/core/v1/metadata.k. - type: - type_str: str - type_category: BUILTIN - builtin_type: STRING - is_optional: false - default_value: '' - - name: labels - doc: | - A Server-level attribute. - The labels of the long-running service. - See also: kusion_models/core/v1/metadata.k. - type: - type_str: '{str: str}' - type_category: DICT - dict_type: - key_type: - type_str: str - type_category: BUILTIN - builtin_type: STRING - value_type: - type_str: str - type_category: BUILTIN - builtin_type: STRING - is_optional: true - default_value: '' - examples: | - myCustomApp = AppConfiguration { - name = "componentName" - } - doc: '' - source_code_url: '' - ``` - - 3.3 Modify all the `doc` fields to the Chinese description. The modified configuration is as follows: - - ```yaml - name: server - relative_path: ./server.k - schemas: - - name: Server - doc: | - Server 模型定义了采用 Kubernetes 最佳实践的持续运行的服务的通用配置接口 - attributes: - - name: workloadType - doc: | - workloadType 属性定义了服务的类型,是服务级别的属性。合法的取值有:Deployment, CafeDeployment. - 另请查看:kusion_models/core/v1/workload_metadata.k. - type: - type_str: str - type_category: BUILTIN - builtin_type: STRING - default_value: '"Deployment"' - is_optional: false - - name: name - doc: | - name 为服务的名称,是服务级别的属性。 - 另请查看:kusion_models/core/v1/metadata.k. - type: - type_str: str - type_category: BUILTIN - builtin_type: STRING - is_optional: false - default_value: '' - - name: labels - doc: | - labels 为服务的标签,是服务级别的属性。 - 另请查看:kusion_models/core/v1/metadata.k. - type: - type_str: '{str: str}' - type_category: DICT - dict_type: - key_type: - type_str: str - type_category: BUILTIN - builtin_type: STRING - value_type: - type_str: str - type_category: BUILTIN - builtin_type: STRING - is_optional: true - default_value: '' - examples: | - myCustomApp = AppConfiguration { - name = "componentName" - } - doc: '' - source_code_url: '' - ``` - - 3.4 Based on the modified i18n configuration, generate documents in localized languages. Execute the following command to output the Chinese document `kcl_doc/doc_server_zh_cn.md`. The commands and the contents of the generated documents are as follows: - - ```text - kcl-doc generate server.k --i18n-locale zh_cn --format Markdown - ``` - - ~~~markdown - # server - ## Schema Server - Server 模型定义了采用 Kubernetes 最佳实践的持续运行的服务的通用配置接口 - - ### Attributes - |Name and Description|Type|Default Value|Required| - |--------------------|----|-------------|--------| - |**workloadType**
workloadType 属性定义了服务的类型,是服务级别的属性。合法的取值有:Deployment, CafeDeployment.
另请查看:kusion_models/core/v1/workload_metadata.k.|str|"Deployment"|**required**| - |**name**
name 为服务的名称,是服务级别的属性。
另请查看:kusion_models/core/v1/metadata.k.|str|Undefined|**required**| - |**labels**
labels 为服务的标签,是服务级别的属性。
另请查看:kusion_models/core/v1/metadata.k.|{str: str}|Undefined|optional| - ### Examples - ``` - myCustomApp = AppConfiguration { - name = "componentName" - } - ``` - - - ~~~ - -## 4. Appendix - -### 1. Concept of reST - -For documents in reST format, paragraphs and indentation are important, new paragraphs are marked with blank lines, and indentation is the indentation indicated in the output. Font styles can be expressed as follows: - -* \*Italic\* -* \*\*Bold\*\* -* \`\`Monospaced\`\` - -Refer to [reST](https://docutils.sourceforge.io/rst.html) for more information. diff --git a/docs/reference/cli/kcl/fmt.md b/docs/reference/cli/kcl/fmt.md deleted file mode 100644 index 51b9e009..00000000 --- a/docs/reference/cli/kcl/fmt.md +++ /dev/null @@ -1,70 +0,0 @@ ---- -sidebar_position: 2 ---- - -# Format - -The KCL Format tool supports reformatting KCL files to the standard code style. This article demonstrates the KCL code style and how to use the KCL Format tool. - -## Code Style - -The KCL Format tool modifies the files according to the KCL code style: [Style Guide for KCL Code](../../lang/lang/spec/codestyle.md) - -## How to use - -* Formatting Single File - -```text -kcl-fmt your_config.k -``` - -* Formatting multiple files - -```text -kcl-fmt your_config_path -R -``` - -* Args - * `-R|--recursive` Whether to recursively traverse subfolders - * `-w|--fmt-output` Whether to output to STDOUT, without `-w` indicates in-place modification. - -## Display of formatting files - -* Before formatting - -```py -import math -mixin DeploymentMixin: - service:str ="my-service" -schema DeploymentBase: - name: str - image : str -schema Deployment[replicas] ( DeploymentBase ) : - mixin[DeploymentMixin] - replicas : int = replicas - command: [str ] - labels: {str: str} -deploy = Deployment(replicas = 3){} -``` - -* After formatting - -```py -import math - -mixin DeploymentMixin: - service: str = "my-service" - -schema DeploymentBase: - name: str - image: str - -schema Deployment[replicas](DeploymentBase): - mixin [DeploymentMixin] - replicas: int = replicas - command: [str] - labels: {str:str} - -deploy = Deployment(replicas=3) {} - -``` diff --git a/docs/reference/cli/kcl/index.md b/docs/reference/cli/kcl/index.md deleted file mode 100644 index 2d7df571..00000000 --- a/docs/reference/cli/kcl/index.md +++ /dev/null @@ -1,3 +0,0 @@ -# KCL Language Tools - -As the configuration policy language of Kusion, KCL not only provides the command `kcl` to compile and execute configuration programs but also provides fmt, lint, test, vet, docgen and other supporting tools. diff --git a/docs/reference/cli/kcl/lint.md b/docs/reference/cli/kcl/lint.md deleted file mode 100644 index 651ccb47..00000000 --- a/docs/reference/cli/kcl/lint.md +++ /dev/null @@ -1,129 +0,0 @@ ---- -sidebar_position: 3 ---- - -# Lint - -The KCL Lint tool supports checking some warning-level defects in KCL code and supports multiple output formats. This document shows how to use the KCL Lint tool. - -## Example - -### Project Struct - -```text -. -└── Test - └── kcl.mod - └── .kcllint - └── a.k - └── b.k - └── dir - └── c.k - └── test.k -``` - -`.kcllint` is the configuration file of lint and it is optional. `a.k`, `b.k`, `c.k` and `test.k` are the kcl file to be checked. - -Args: - -```shell -kcl-lint your_config.k -``` - -or - -```shell -kcl-lint your_config_path -``` - -lint configuration file - -```shell -kcl-lint --config abspath/.kcllint your_config.k -``` - -Output: - -```shell -/Users/../test.k:12:1: E0501 line too long (122 > 120 characters) -# line too long, line too long, line too long, line too long, line too long, line too long, line too long, line too long, -^ - -/Users/../test.k:14:1: E0413 Import b should be placed at the top of the module -import b -^ - - -Check total 1 files: -1 E0413: ImportStmt is not at the top of the file -1 E0501: Line too long -KCL Lint: 2 problems -``` - -## KCL Lint Tool - -### Args - -```shell -usage: kcl-lint [-h] [--config file] [file] - -positional arguments: - file KCL file path - -optional arguments: - -h, --help show this help message and exit - --config file KCL lint config path -``` - -+ --config: path of `.kcllint` -+ file: the path of a single `*.k` file or directory to be checked. Support the absolute path or relative path of the current directory. - -### Lint Configuration - -#### Priority - -The priority of Lint's configuration is as follows: - -1. the `.kcllint` set in CLI Args -2. the `.kcllint` under the directory of checked `.k` file or checked directory -3. default configuration - -#### .kcllint - -The file `.kcllint` is written in YAML. Its contents include: - -+ check_list: kinds of checks, including `"import"` and `"misc"` -+ ignore: ignored check items. See the `Error Code` for optional items. -+ max_line_length: check parameter, that is, the maximum length of code -+ output: output streams and formats, including `"stdout"`、`"file"` and `"sarif"` -+ output_path: The path of the output file. It is optional, but it is required when the `output` is set as `"file"` or `"sarif"` - -Example: - -```yaml -check_list: ["import","misc"] -ignore: ["E0501"] -max_line_length: 120 -output: ["stdout"] -output_path: -``` - -#### Default Configuration: - -```yaml -check_list: ["import", "misc"] -ignore": [] -max_line_length: 200 -output: ["stdout"] -``` - -### Error Code - -+ import_checker - + E0401: Unable to import. - + W0401: Reimport. - + E0406: Module import itself. - + W0411: Import but unused. - + E0413: ImportStmt is not at the top of the file -+ misc_checker - + E0501: Line too long diff --git a/docs/reference/cli/kcl/overview.md b/docs/reference/cli/kcl/overview.md deleted file mode 100644 index 25ab5bf2..00000000 --- a/docs/reference/cli/kcl/overview.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -sidebar_position: 1 ---- - -# Overview - -KCL toolchain is a toolset of KCL language, which aims to improve the efficiency of batch migration, writing, compiling and testing of KCL. - -| | Name | Description | -| ---------------- | ------------------------ | ----------------------------------------------------------------------- | -| Main Toolset | **kcl** | Provide support for KCL in coding, compiling and testing | -| | kcl build | Build KCL code(not yet) | -| | kcl test | Provide unit test(not yet) and integration test | -| | kcl fmt | Formoat KCL code | -| | kcl list | Parse the KCL code and lists the option parameter/schema attributes information. (kcl list-options and kcl list-attributes)| -|Automation Toolset| kcl-lint | Check code style for KCL | -| | kcl-doc | Parses the KCL code and generate documents | -| | kcl-fmt | Same as `kcl fmt` | -| IDE Plugin | IntelliJ IDEA KCL plugin | Provide assistance for KCL in coding and compiling on IntelliJ IDEA | -| | VS Code KCL plugin | Provide assistance for KCL in coding and compiling on VS Code | diff --git a/docs/reference/cli/kcl/test.md b/docs/reference/cli/kcl/test.md deleted file mode 100644 index 11c99e06..00000000 --- a/docs/reference/cli/kcl/test.md +++ /dev/null @@ -1,172 +0,0 @@ ---- -sidebar_position: 5 ---- - -# Test Tool - -## Intro - -The KCL Test tool and `testing` package provide a simple testing framework to test KCL code. All KCL files in each directory are a set of tests, and each schema starts with `Test` in each `test.k` is a test case. - -## How to use - -There is a KCL file `hello.k`: - -```python -schema Person: - name: str = "kcl" - age: int = 1 - -hello = Person { - name = "hello kcl" - age = 102 -} -``` - -Build a test file `hello_test.k`: - -```python -schema TestPerson: - a = Person{} - assert a.name == 'kcl' - -schema TestPerson_age: - a = Person{} - assert a.age == 1 - -schema TestPerson_ok: - a = Person{} - assert a.name == "kcl" - assert a.age == 1 -``` - -Execute the following command: - -``` -$ kcl-test -ok /pkg/to/app [365.154142ms] -$ -``` - -## Failed Test Case - -Modify `hello_test.k` to the following code to build failed test case: - -```python -# Copyright 2021 The KCL Authors. All rights reserved. - -import testing - -schema TestPerson: - a = Person{} - assert a.name == 'kcl2' - -schema TestPerson_age: - a = Person{} - assert a.age == 123 - -schema TestPerson_ok: - a = Person{} - assert a.name == "kcl2" - assert a.age == 1 -``` - -Output: - -``` -$ kcl-test -FAIL /pkg/to/app [354.153775ms] ----- failed [48.817552ms] - KCL Runtime Error: File /pkg/to/app/hello_test.k:7: - assert a.name == 'kcl2' - Assertion failure ----- failed [47.515009ms] - KCL Runtime Error: File /pkg/to/app/hello_test.k:11: - assert a.age == 123 - Assertion failure ----- failed [47.26677ms] - KCL Runtime Error: File /pkg/to/app/hello_test.k:15: - assert a.name == "kcl2" - Assertion failure -$ -``` - -## Option Args -Literal type command line arguments can be specified via the testing package: - -```python -schema TestOptions: - testing.arguments("name", "ktest") - testing.arguments("age", "123") - testing.arguments("int0", 0) - testing.arguments("float0", 0.0) - testing.arguments("bool-true", True) - testing.arguments("bool-false", False) - - name = option("name") - assert name == "ktest" - - age = option("age") - assert age == 123 - - assert option("int0") == 0 - assert option("float0") == 0.0 - assert option("bool-true") == True - assert option("bool-false") == False -``` - -`testing.arguments` defines a set of key-value arguments, valid only in the current test. - -The option arguments can also be loaded from the `settings.yaml`. There is a file `settings.yaml`: - -```yaml - - key: app-name - value: app - - key: env-type - value: prod - - key: image - value: reg.docker.inc.com/test-image -``` - -Parameters can then be configured via `testing.setting_file("./settings.yaml")`. At the same time, `testing.arguments()` is still supported to override the parameters in the configuration file: - -```py -schema TestOptions_setting: - testing.setting_file("./settings.yaml") - testing.arguments("file", "settings.yaml") - - assert option("app-name") == "app" - assert option("file") == "settings.yaml" -``` - -## Plugin Test - -Automatically switch to plugin mode if the directory to be tested contains `plugin.py` and test files. Then set the environment variable `KCL_PLUGINS_ROOT` before the test (plugins in other directories can no longer be accessed) to test the current plugin, and restore the previous `KCL_PLUGINS_ROOT` after the test is completed. - -## Integration Test - -Automatically execute integration tests when the directory contains `*.k`. If there is `stdout.golden` then verify the output. If there is `stderr.golden` then verify the error. Supports the `settings.yaml` file to define command line arguments. - -## Batch Test - -+ `kcl-test path` Execute the test of the specified directory. It can be omitted if it's the same directory that the command is executed -+ `kcl-test -run=regexp` Execute the test which matches patterns -+ `kcl-test ./...` Execute unit tests recursively - -## Args - -``` -$ kcl-test -h -NAME: - kcl-go test - test packages - -USAGE: - kcl-go test [command options] [packages] - -OPTIONS: - --run value Run only those tests matching the regular expression. - --quiet, -q Set quiet mode (default: false) - --verbose, -v Log all tests as they are run (default: false) - --debug, -d Run in debug mode (for developers only) (default: false) - --help, -h show help (default: false) -``` diff --git a/docs/reference/cli/kcl/vet.md b/docs/reference/cli/kcl/vet.md deleted file mode 100644 index 2ca49bec..00000000 --- a/docs/reference/cli/kcl/vet.md +++ /dev/null @@ -1,81 +0,0 @@ ---- -sidebar_position: 4 ---- - -# Validation - -## Intro - -The KCL Validation tool supports basic configuration data verification capabilities. You can write a KCL schema to verify the type and value of the input JSON/YAML files. - -## How to use - -There is a JSON format file `data.json`: - -```json -{ - "name": "Alice", - "age": "18", - "message": "This is Alice", - "data": { - "id": "1", - "value": "value1" - }, - "labels": { - "key": "value" - }, - "hc": [1, 2, 3] -} -``` - -Build a validate KCL file `schema.k`: - -```py -schema User: - name: str - age: int - message?: str - data: Data - labels: {str:} - hc: [int] - - check: - age > 10 - -schema Data: - id: int - value: str -``` - -Execute the following command: - -``` -$ kcl-vet data.json schema.k -Validate succuss! -``` - -## Specify the schema for validation - -When multiple schema definitions exist in the KCL file, by default, the KCL Validation tool will use the first schema to check. If you need to specify a schema for verification, you can use the `-d|--schema` parameter - -``` -$kcl-vet data.json schema.k -d User -``` - -## Args - -``` -$ kcl-vet -h -usage: kcl-vet [-h] [-d schema] [--format format] [-n attribute_name] - data_file kcl_file - -positional arguments: - data_file Validation data file - kcl_file KCL file - -optional arguments: - -h, --help show this help message and exit - -d schema, --schema schema - --format format Validation data file format, support YAML and JSON - -n attribute_name, --attribute-name attribute_name -``` diff --git a/docs/reference/cli/kusionctl/_category_.json b/docs/reference/cli/kusionctl/_category_.json deleted file mode 100644 index 7040358e..00000000 --- a/docs/reference/cli/kusionctl/_category_.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "label": "Kusion Tools", - "position": 1 -} diff --git a/docs/reference/cli/kusionctl/apply.md b/docs/reference/cli/kusionctl/apply.md deleted file mode 100644 index 00e02587..00000000 --- a/docs/reference/cli/kusionctl/apply.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -sidebar_position: 6 ---- -# Apply - -通过工作目录将配置栈应用到一个或多个资源 - -### Synopsis - -将一系列资源变更应用到当前栈中。 - -在 Konfig 栈中根据 KCL 文件创建、更新、删除资源。 默认情况下,在采取任何行动之前,kusion 会生成一个执行计划,并提交给你批准。 - -你可以检查计划详细信息,然后决定是否应采取或中止这些操作。 - -``` -kusion apply [flags] -``` - -### Examples - -``` - # 指定工作目录应用 - kusion apply -w /path/to/workdir - - # 指定参数应用 - kusion apply -D name=test -D age=18 - - # 指定配置文件应用 - kusion apply -Y settings.yaml - - # 应用前跳过计划的交互式审批 - kusion apply —yes -``` - -### Options - -``` - -D, --argument strings 指定参数应用 KCL 代码 - -d, --detail 预览后自动展示 apply 计划细节 - -h, --help help for apply - --operator string 指定操作人 - -O, --overrides strings 指定配置覆盖路径和值 - -Y, --setting strings 指定命令行配置文件 - -w, --workdir string 指定工作目录 - -y, --yes 预览后自动审批并应用更新 -``` - -### Options inherited from parent commands - -``` - --log-level string 设置 kusion 开发日志级别,默认为 INFO,所有选项:DEBUG、INFO、ERROR、WARN、FATAL (default "INFO") - --profile string 要捕获的档案名称。none、cpu、heap、goroutine、threadcreate、block 和 mutex 之一 (default "none") - --profile-output string 档案写入的文件名 (default "profile.pprof") -``` - -### SEE ALSO - -* [kusion](./overview.md) - kusion 通过代码管理 Kubernetes - -###### Auto generated by spf13/cobra on 21-Jan-2022 diff --git a/docs/reference/cli/kusionctl/compile.md b/docs/reference/cli/kusionctl/compile.md deleted file mode 100644 index 35a6f7fc..00000000 --- a/docs/reference/cli/kusionctl/compile.md +++ /dev/null @@ -1,65 +0,0 @@ ---- -sidebar_position: 4 ---- -# Compile - -将 KCL 编译成 YAML - -### Synopsis - -编译一个或多个 KCL 文件。 - -必须指定 KCL 文件名。 你可以指定参数列表来替换 KCL 中定义的占位符, 并在使用 --output 标志时将编译结果输出到文件。 - -``` -kusion compile [flags] -``` - -### Examples - -``` - # 将 main.k 中的配置编译成 YAML 格式 - kusion compile main.k - - # 使用参数编译 main.k - kusion compile main.k -D name=test -D age=18 - - # 使用来自 settings.yaml 的参数编译 main.k - kusion compile main.k -Y settings.yaml - - # 使用工作目录编译 main.k - kusion compile main.k -w Konfig/appops/demo/dev - - # Override 当前 Stack 中的 KCL 配置 - kusion compile -O __main__:appConfiguration.image=nginx:latest -a - - # 编译 main.k 并将结果写入 output.yaml - kusion compile main.k -o output.yaml -``` - -### Options - -``` - -D, --argument strings 指定顶级参数 - -n, --disable-none 禁用转储 None 值 - -h, --help help for compile - -o, --output string 指定输出文件 - -a, --override-AST 指定覆盖选项 - -O, --overrides strings 指定配置覆盖路径和值 - -Y, --setting strings 指定命令行配置文件 - -w, --workdir string 指定工作目录 -``` - -### Options inherited from parent commands - -``` - --log-level string 设置 kusion 开发日志级别,默认为 INFO,所有选项:DEBUG、INFO、ERROR、WARN、FATAL (default "INFO") - --profile string 要捕获的档案名称。none、cpu、heap、goroutine、threadcreate、block 和 mutex 之一 (default "none") - --profile-output string 档案写入的文件名 (default "profile.pprof") -``` - -### SEE ALSO - -* [kusion](./overview.md) - kusion 通过代码管理 Kubernetes - -###### Auto generated by spf13/cobra on 21-Jan-2022 diff --git a/docs/reference/cli/kusionctl/destroy.md b/docs/reference/cli/kusionctl/destroy.md deleted file mode 100644 index 2bdf4303..00000000 --- a/docs/reference/cli/kusionctl/destroy.md +++ /dev/null @@ -1,52 +0,0 @@ ---- -sidebar_position: 7 ---- -# Destroy - -通过工作目录销毁已应用到资源的配置栈 - -### Synopsis - -通过资源规约删除资源。 - -只接受 KCL 文件。只能指定一种类型的参数:文件名、资源、名称、资源或标签选择器。 - -请注意,destroy 命令不会进行资源版本检查, 因此如果有人在你提交销毁时提交了对资源的更新, 他们的更新将与资源一起丢失。 - -``` -kusion destroy [flags] -``` - -### Examples - -``` - # Delete the configuration of current stack - kusion destroy -``` - -### Options - -``` - -D, --argument strings 指定编译 KCL 的参数 - -d, --detail 预览后自动展示 apply 计划细节 - -h, --help help for destroy - --operator string 指定操作人 - -O, --overrides strings 指定配置覆盖路径和值 - -Y, --setting strings 指定命令行配置文件 - -w, --workdir string 指定工作目录 - -y, --yes 预览后自动审批并应用更新 -``` - -### Options inherited from parent commands - -``` - --log-level string 设置 kusion 开发日志级别,默认为 INFO,所有选项:DEBUG、INFO、ERROR、WARN、FATAL (default "INFO") - --profile string 要捕获的档案名称。none、cpu、heap、goroutine、threadcreate、block 和 mutex 之一 (default "none") - --profile-output string 档案写入的文件名 (default "profile.pprof") -``` - -### SEE ALSO - -* [kusion](./overview.md) - kusion 通过代码管理 Kubernetes - -###### Auto generated by spf13/cobra on 21-Jan-2022 diff --git a/docs/reference/cli/kusionctl/diff.md b/docs/reference/cli/kusionctl/diff.md deleted file mode 100644 index e1fad05e..00000000 --- a/docs/reference/cli/kusionctl/diff.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -sidebar_position: 5 ---- -# Diff - -比较输入文件 `` 和 `` 之间的差异 - -### Synopsis - -比较文件差异并显示增量。 支持的文件类型有: YAML ([http://yaml.org/](http://yaml.org/)) 和 JSON ([http://json.org/](http://json.org/))。 - -``` -kusion diff [flags] -``` - -### Examples - -``` - # 比较对象来自文件 - kusion diff pod-1.yaml pod-2.yaml - kusion diff pod-1.yaml pod-2.yaml --swap=true - - # 比较对象来自标准输入 - cat pod-1.yaml > pod-full.yaml - echo '---' >> pod-full.yaml - cat pod-2.yaml >> pod-full.yaml - cat pod-full.yaml | kusion diff - -``` - -### Options - -``` - --diff-mode string 差异模式。normal 和 ignore-added 之一。默认值 normal (default "normal") - -h, --help help for diff - -i, --ignore-order-changes 忽略列表中的顺序变化,默认为否 - -b, --omit-header 省略 dyff 摘要标题,默认为否 - -o, --output string 指定输出风格,human 和 raw 之一,默认值 human (default "human") - -k, --sort-by-kubernetes-resource 按 kubernetes 资源顺序排序(非标准行为)。默认为否 (default true) - --swap 交换 进行比较。注意 为标准输入时无效。默认为否 -``` - -### Options inherited from parent commands - -``` - --log-level string 设置 kusion 开发日志级别,默认为 INFO,所有选项:DEBUG、INFO、ERROR、WARN、FATAL (default "INFO") - --profile string 要捕获的档案名称。none、cpu、heap、goroutine、threadcreate、block 和 mutex 之一 (default "none") - --profile-output string 档案写入的文件名 (default "profile.pprof") -``` - -### SEE ALSO - -* [kusion](./overview.md) - kusion 通过代码管理 Kubernetes - -###### Auto generated by spf13/cobra on 21-Jan-2022 diff --git a/docs/reference/cli/kusionctl/index.md b/docs/reference/cli/kusionctl/index.md deleted file mode 100644 index e8ec4809..00000000 --- a/docs/reference/cli/kusionctl/index.md +++ /dev/null @@ -1,3 +0,0 @@ -# Kusion Tools - -The Kusion tool is a command line interface for upper-level users. It encapsulates common workflow tools such as init, validate, compile, apply, and destroy based on business abstract models such as Project & Stack. diff --git a/docs/reference/cli/kusionctl/init.md b/docs/reference/cli/kusionctl/init.md deleted file mode 100644 index 2c8914b4..00000000 --- a/docs/reference/cli/kusionctl/init.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -sidebar_position: 2 ---- -# Init Project - -初始化一个新项目,包含 KCL 文件结构和基础代码 - -### Synopsis - -kusion init 命令可帮助你生成 KCL 脚手架项目。 尝试 "kusion init" 获取一个简单的演示项目。 - -``` -kusion init -``` - -### Examples - -``` - # 使用默认架构初始化新的 KCL 项目 - kusion init -``` - -### Options - -``` - -h, --help help for init - --schema string 指定初始化 KCL 项目的模式类型。默认值 Server (default "Server") -``` - -### Options inherited from parent commands - -``` - --log-level string 设置 kusion 开发日志级别,默认为 INFO,所有选项:DEBUG、INFO、ERROR、WARN、FATAL (default "INFO") - --profile string 要捕获的档案名称。none、cpu、heap、goroutine、threadcreate、block 和 mutex 之一 (default "none") - --profile-output string 档案写入的文件名 (default "profile.pprof") -``` - -### SEE ALSO - -* [kusion](./overview.md) - kusion 通过代码管理 Kubernetes - -###### Auto generated by spf13/cobra on 21-Jan-2022 diff --git a/docs/reference/cli/kusionctl/overview.md b/docs/reference/cli/kusionctl/overview.md deleted file mode 100644 index 0877465f..00000000 --- a/docs/reference/cli/kusionctl/overview.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -sidebar_position: 1 ---- - -# Overview - -## 1. 前言 - -Kusion CLI 即 Kusion 工具链的简称,是基于 KCL 的 DevOps 工具集合,主要包括主工具、转换工具集、插件集等。 - -Kusion CLI 的边界: - -* Kusion CLI 聚焦 DevOps 领域 -* 基于 KCL,非 KCL 的配置语言需要先转换为 KCL 才能识别 -* 多 Runtime,提供的能力一定是多 Runtime 间通用的 -* 定制能力通过 Plugin 提供 - -## 2. Kusion CLI 包括什么? - -| 类别 | 工具名称 | 说明 | -| ---------- | --------------- | -------------------------------------------------------------------------------- | -| 主工具集 | **kusionctl** | kusionctl 是基于 KCL 语言的 DevOps 工具,管理 KCL 配置从生成到生效的整个生命周期 | -| | **kusionup** | kusionup 是一个优雅的 kusion 多版本管理工具 | -| 转换工具集 | **kube2kcl** | 从 yaml/kustomize 转换成 KCL 配置的工具 | -| | **crd2kcl** | 从 crd yaml 转换为 KCL 模型定义的工具 | -| | **tf2kcl** | 从 tf 转换为 KCL 模型定义的工具 | -| | **openapi2kcl** | 从 openapi yaml 转换为 KCL 模型定义的工具 | -| 插件集 | | 可无缝集成 kusionctl 的 plugin | - -## 3. Kusionctl 子命令概览 - -kusionctl 是基于 KCL 语言的 DevOps 工具,管理 KCL 配置的生命周期 - -| 能力类别 | 能力名称 | 包含命令 | 说明 | -| -------- | ------------ | -------- | ----------------------------------------------------------------------------------------------------------- | -| 配置 | 脚手架 | init | kusion init 命令用于从指定模板初始化一个工作目录,其中包括 KCL 描述的配置清单、project.yaml、stack.yaml 等 | -| | 语言集成 | compile | kusion compile 命令用于编译指定的 KCL 配置文件以查看结果是否符合预期 | -| | 配置语法校验 | validate | kusion validate 命令用于验证当前目录的 KCL 配置是否能够正常编译,而不和 backend、state、provider 进行交互; | -| | 元信息查看 | ls | kusion ls 命令用于查看 Project 和 Stack 基本信息 | -| 运行时 | 资源管理 | preview | kusion preview 命令用于预览即将发布的配置,包括资源动作 | -| | | apply | kusion apply 命令用于将配置生效,该命令执行后会先执行 preview,确认后才会真正下发配置 | -| | | delete | kusion delete 命令用于将配置对应的资源删除,该命令会先执行 preview,确认后才会真正删除资源 | -| | | watch | kusion watch 命令用于查看当前 Stack 的资源状态 | -| 其它 | 插件管理 | plugin | kusion plugin 用于管理本地安装的 plugin | -| | 版本信息 | version | kusion version 用于显示版本信息 | -| | 环境变量信息 | env | kusion env 用于查看 kusion 相关的环境变量 | - -备注: - -* **配置**:是指用 KCL 描述的配置清单 - ​ diff --git a/docs/reference/cli/kusionctl/preview.md b/docs/reference/cli/kusionctl/preview.md deleted file mode 100644 index 43f05ba4..00000000 --- a/docs/reference/cli/kusionctl/preview.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -sidebar_position: 6 ---- -# Preview - -Preview a series of resource changes within the stack - -### Synopsis - -Preview a series of resource changes within the stack. - -Create or update or delete resources according to the KCL files within a stack. By default, Kusion will generate an execution plan and present it for your approval before taking any action. - -``` -kusion preview [flags] -``` - -### Examples - -``` - # Preview with specifying work directory - kusion preview -w /path/to/workdir - - # Preview with specifying arguments - kusion preview -D name=test -D age=18 - - # Preview with specifying setting file - kusion preview -Y settings.yaml - - # Preview with ignored fields - kusion preview --ignore-fields="metadata.generation,metadata.managedFields" -``` - -### Options - -``` - -D, --argument stringArray Specify the top-level argument - -C, --backend-config strings backend-config config state storage backend - --backend-type string backend-type specify state storage backend - -d, --detail Automatically show plan details after previewing it - -h, --help help for preview - --ignore-fields strings Ignore differences of target fields - --operator string Specify the operator - -O, --overrides strings Specify the configuration override path and value - -Y, --setting strings Specify the command line setting files - -w, --workdir string Specify the work directory -``` - -### SEE ALSO - -* [kusion](./overview.md) - kusion manages the Kubernetes cluster by code - -###### Auto generated by spf13/cobra on 13-Sep-2022 diff --git a/docs/reference/cli/kusionctl/validate.md b/docs/reference/cli/kusionctl/validate.md deleted file mode 100644 index 3b9ff658..00000000 --- a/docs/reference/cli/kusionctl/validate.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -sidebar_position: 3 ---- -# Validate - -校验当前目录的 KCL 配置是否可以编译 - -### Synopsis - -验证当前目录中的 KCL 配置是否可以编译, 并且不与任何 backend/state/provider 交互。 - -``` -kusion validate [flags] -``` - -### Examples - -``` - # 验证 main.k 中的配置 - kusion validate main.k - - # 使用参数验证 main.k - kusion validate main.k -D name=test -D age=18 - - # 使用来自 settings.yaml 的参数验证 main.k - kusion validate main.k -Y settings.yaml - - # 使用工作目录验证 main.k - kusion validate main.k -w Konfig/appops/demo/dev -``` - -### Options - -``` - -D, --argument strings 指定顶级参数 - -n, --disable-none 禁用转储 None 值 - -h, --help help for validate - -a, --override-AST 指定覆盖选项 - -O, --overrides strings 指定配置覆盖路径和值 - -Y, --setting strings 指定命令行配置文件 - -w, --workdir string 指定工作目录 -``` - -### Options inherited from parent commands - -``` - --log-level string 设置 kusion 开发日志级别,默认为 INFO,所有选项:DEBUG、INFO、ERROR、WARN、FATAL (default "INFO") - --profile string 要捕获的档案名称。none、cpu、heap、goroutine、threadcreate、block 和 mutex 之一 (default "none") - --profile-output string 档案写入的文件名 (default "profile.pprof") -``` - -### SEE ALSO - -* [kusion](./overview.md) - kusion 通过代码管理 Kubernetes - -###### Auto generated by spf13/cobra on 21-Jan-2022 diff --git a/docs/reference/cli/kusionctl/version.md b/docs/reference/cli/kusionctl/version.md deleted file mode 100644 index 47561c96..00000000 --- a/docs/reference/cli/kusionctl/version.md +++ /dev/null @@ -1,41 +0,0 @@ -# Version Info - -打印 kusion 的版本信息 - -### Synopsis - -打印当前会话 kusion 的版本信息。 - -``` -kusion version [flags] -``` - -### Examples - -``` - # 打印 kusion 版本 - kusion version -``` - -### Options - -``` - -h, --help help for version - -j, --json 将版本信息打印成 JSON 格式 - -s, --short 将版本信息打印成短版本字符串 - -y, --yaml 将版本信息打印成 YAML 格式 -``` - -### Options inherited from parent commands - -``` - --log-level string 设置 kusion 开发日志级别,默认为 INFO,所有选项:DEBUG、INFO、ERROR、WARN、FATAL (default "INFO") - --profile string 要捕获的档案名称。none、cpu、heap、goroutine、threadcreate、block 和 mutex 之一 (default "none") - --profile-output string 档案写入的文件名 (default "profile.pprof") -``` - -### SEE ALSO - -* [kusion](./overview.md) - kusion 通过代码管理 Kubernetes - -###### Auto generated by spf13/cobra on 21-Jan-2022 diff --git a/docs/reference/cli/openapi/_category_.json b/docs/reference/cli/openapi/_category_.json deleted file mode 100644 index 58e4c68a..00000000 --- a/docs/reference/cli/openapi/_category_.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "label": "OpenAPI Tools", - "position": 3 -} diff --git a/docs/reference/cli/openapi/crd-to-kcl.md b/docs/reference/cli/openapi/crd-to-kcl.md deleted file mode 100644 index 2496162f..00000000 --- a/docs/reference/cli/openapi/crd-to-kcl.md +++ /dev/null @@ -1,112 +0,0 @@ -# CRD to KCL - -命令 - -```shell -kcl-openapi generate model --crd -f ${your_CRD.yaml} -t ${the_kcl_files_output_dir} --skip-validation -``` - -# 示例 - -- 输入文件:test_crontab_CRD.yaml: - -```yaml -# Deprecated in v1.16 in favor of apiextensions.k8s.io/v1 -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - # name must match the spec fields below, and be in the form: . - name: crontabs.stable.example.com -spec: - # group name to use for REST API: /apis// - group: stable.example.com - # list of versions supported by this CustomResourceDefinition - versions: - - name: v1 - # Each version can be enabled/disabled by Served flag. - served: true - # One and only one version must be marked as the storage version. - storage: true - # either Namespaced or Cluster - scope: Namespaced - names: - # plural name to be used in the URL: /apis/// - plural: crontabs - # singular name to be used as an alias on the CLI and for display - singular: crontab - # kind is normally the CamelCased singular type. Your resource manifests use this. - kind: CronTab - # shortNames allow shorter string to match your resource on the CLI - shortNames: - - ct - preserveUnknownFields: false - validation: - openAPIV3Schema: - type: object - properties: - spec: - type: object - properties: - cronSpec: - type: string - image: - type: string - replicas: - type: integer -``` - -- 命令 - -```shell -kcl-openapi generate model -f test_crontab_CRD.yaml -t ~/ --skip-validation --crd -``` - -- 输出文件: ~/models/stable_example_com_v1_cron_tab.k - -```python -""" -This file was generated by the KCL auto-gen tool. DO NOT EDIT. -Editing this file might prove futile when you re-run the KCL auto-gen generate command. -""" -import kusion_kubernetes.apimachinery.apis - - -schema CronTab: - """stable example com v1 cron tab - """ - - apiVersion: "stable.example.com/v1" = "stable.example.com/v1" - """ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources - """ - - kind: "CronTab" = "CronTab" - """ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - """ - - metadata?: apis.ObjectMeta - """ Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - """ - - spec?: StableExampleComV1CronTabSpec - """spec - """ - - -schema StableExampleComV1CronTabSpec: - """stable example com v1 cron tab spec - """ - - - cronSpec?: str - """cron spec - """ - - image?: str - """image - """ - - replicas?: int - """replicas - """ - -``` diff --git a/docs/reference/cli/openapi/index.md b/docs/reference/cli/openapi/index.md deleted file mode 100644 index f80da564..00000000 --- a/docs/reference/cli/openapi/index.md +++ /dev/null @@ -1,3 +0,0 @@ -# OpenAPI Tools - -Kusion adds support for the KCL configuration policy language to the OpenAPI specification, which can not only convert common models, but also support CRD and other models. diff --git a/docs/reference/cli/openapi/openapi-to-kcl.md b/docs/reference/cli/openapi/openapi-to-kcl.md deleted file mode 100644 index 071bc051..00000000 --- a/docs/reference/cli/openapi/openapi-to-kcl.md +++ /dev/null @@ -1,60 +0,0 @@ -# OpenAPI to KCL - -命令 - -```shell -kcl-openapi generate model -f ${your_open_api_spec.yaml} -t ${the_kcl_files_output_dir} -``` - -示例: - -- 输入文件:test_open_api_spec.yaml: - -```yaml -definitions: - v1.TestInt: - type: object - properties: - name: - type: string - format: int-or-string - required: - - name - x-kcl-type: - import: - package: v1.test_int - alias: test_int - type: TestInt -swagger: "2.0" -info: - title: Kusion - version: v0.0.2 -paths: {} -``` - -- 命令: - -```shell -kcl-openapi generate model -f test_open_api_spec.yaml -t ~/ -``` - -- 输出:~/models/v1/test_int.k - -```python -""" -This is the test_int module in v1 package. -amytestThis file was generated by the KCL auto-gen tool. DO NOT EDIT. -Editing this file might prove futile when you re-run the KCL auto-gen generate command. -""" - - -schema TestInt: - """v1 test int - """ - - - name: int | str - """name - """ - -``` diff --git a/docs/reference/cli/openapi/quick-start.md b/docs/reference/cli/openapi/quick-start.md deleted file mode 100644 index a973d011..00000000 --- a/docs/reference/cli/openapi/quick-start.md +++ /dev/null @@ -1,78 +0,0 @@ ---- -sidebar_position: 1 ---- - -# Quick Start - -## 1. 安装 KCLOpenAPI tool - -目前有两种安装方式可选: - -- [通过 kusionctl 工具集一键安装(推荐)](#joYJh) -- [单独安装 KCLOpenAPI 工具](#BjyR3) - -## 1.1 通过 kusionctl 工具集一键安装 - -- 安装:推荐直接安装 kusionctl 工具集,它内置了 kusionCtl,KCLVM,KCLOpenAPI 等多种工具。关于 kusionctl 安装,请查看 [kusion 快速上手文档](/docs/user_docs/getting-started/install)。 -- 验证安装结果,执行 `kcl-openapi generate model -h`,看到如下信息说明安装成功: - -```shell -kcl-openapi command helps you to generate KCL schema structure from K8s CRD YAML/JSON file. - 1. Translate Swagger Openapi Spec to KCL code - 2. Translate Kubernetes CRD to KCL code - -Examples: - - # convert a K8s CRD file into KCL files - kcl-openapi generate model -f FILENAME --crd --skip-validation - -Options: - --crd=false: Set the spec file is a kube crd - -f, --filename='': The filename to convert - --skip-validation=false: Skips validation of spec prior to generation - -t, --target='': The location to write output kcl files - --version=false: Show the KCLOpenAPI version - -Usage: - kcl-openapi generate model -f FILENAME [options] -``` - -## 1.2 单独安装 KCLOpenAPI 工具: - -- 安装:您也可以单独安装 KCLOpenapi: - -```shell -# 1. 下载二进制程序 -# https://github.com/KusionStack/kcl-openapi/releases - -# 2. 将命令添加至PATH -export PATH=":$PATH" -``` - -- 验证安装结果,执行 `kcl-openapi -h`,看到如下信息说明安装成功: - -```shell -Usage: - kcl-openapi [OPTIONS] - -Swagger tries to support you as best as possible when building APIs. - -It aims to represent the contract of your API with a language agnostic description of your application in json or yaml. - - -Application Options: - -q, --quiet silence logs - --log-output=LOG-FILE redirect logs to file - -Help Options: - -h, --help Show this help message - -Available commands: - generate generate kcl code - validate validate the swagger document -``` - -# 2. 生成 KCL 文件 - -- [OpenAPI to KCL](../openapi/openapi-to-kcl.md) -- [CRD to KCL](../openapi/crd-to-kcl.md) diff --git a/docs/reference/cli/openapi/spec.md b/docs/reference/cli/openapi/spec.md deleted file mode 100644 index 1a2a2eb0..00000000 --- a/docs/reference/cli/openapi/spec.md +++ /dev/null @@ -1,425 +0,0 @@ -# KCL OpenAPI Spec - -[OpenAPI](https://www.openapis.org/) 允许 API 提供方规范地描述 API 操作和模型,并基于它生成自动化工具和特定语言的客户端。 - -## KCL OpenAPI 文件结构 - -依据 OpenAPI 3.0 规范,OpenAPI 文件中应至少包含 openapi、components、 info、paths 四种根节点对象,KCL OpenAPI 聚焦于其中模型定义的部分,即 OpenAPI 文件中的 `definitions`,而描述操作的 Restful API 部分(即 OpenAPI 文件中的 `paths`)则不属于 KCL OpenAPI 定义的范畴。 -​ - -注:除以上列出的节点外,OpenAPI 官方规范还支持 servers、security、tags、externalDocs 四种可选的根节点,但都不是 KCL OpenAPI 所关心的,因此用户无需填写这部分内容,即使填写了也不会产生任何影响。 -​ - -| OpenAPI 顶层对象 | 类型 | 含义 | KCL OpenAPI 工具支持情况 | -| ---------------- | ----------------- | --------------------------------------------------------------- | ------------------------------------------------------------------------------------------------ | -| swagger | string | openapi 版本信息 | 必填项,目前支持 openapi 2.0,即合法取值为 "2.0" | -| definitions | Definition Object | 模型定义 | 必填项 | -| info | Info Object | 当前 API 文件的元数据信息,例如标题、描述信息、版本、开源协议等 | 必填项,定义当前 OpenAPI 文件的基本信息,不会输出到 KCL 代码,但可用于 Swagger-UI 工具可视化展示 | - -为方便初学者快速理解,下面给出一个典型的 KCL OpenAPI 文件(截取自 swagger example [Petstore](https://petstore.swagger.io/))应包含的节点图示。KCL OpenAPI 工具重点关注其中的 definitions 节点,可以看到文件中定义了两个模型(Pet 和 Category),并且 Pet 模型中包含三个属性(name、id、category) - -## KCL schema - -KCL 中使用 schema 结构来定义配置数据的“类型”,关于 KCL schema,可参考文档:传送门 -在 definitions 节点下新增 definition 元素,即可定义 KCL schema. -示例: -下例在 KCL 代码中定义了 Pet、Category 两个 schema,同样地,其对应的 OpenAPI 也在 definitions 节点下包含这两个模型的描述。 - -```python -# KCL schema: -schema Pet: - name: str - id?: int - category?: Category - -schema Category: - name?: str - -# 对应的 OpenAPI 描述 -{ - "definitions": { - "Pet": { - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "id": { - "type": "integer", - "format": "int64" - }, - "category": { - "$ref": "#/definitions/Category" - } - }, - "required": [ - "name" - ] - }, - "Category": { - "type": "object", - "properties": { - "name": { - "type": "string" - } - } - } - }, - "swagger": "2.0", - "info": { - "title": "demo", - "version": "v1" - } -} - -``` - -### schema 名称 - -在 KCL 中,schema 名称紧跟在 schema 关键字后声明,在 OpenAPI 中,模型的名称通过 definition 元素的 key 来定义。 - -### schema 类型 - -KCL schema 在 OpenAPI 中的类型为 "object". 例如上例中 "Pet" 的 "type" 值应为 "object". - -### schema 属性 - -KCL schema 中可以定义若干属性,属性的声明一般包含如下几部分: - -- 属性注解:可选,以 @ 开头,例如 @deprecated 注解表示属性被废弃 -- 属性访问修饰符(final):可选,声明当前属性的值不可被修改 -- 属性名称:必须 -- 属性 optional 修饰符(?):可选,带问号表示当前属性为可选属性,可以不被赋值。反之,不带问号表示必填属性 -- 属性类型:必须,可以是基本数据类型,也可以是 schema 类型, 或者是前述两种类型的并集 -- 属性默认值:非必须 - -它们与 OpenAPI 规范的对应关系如下: - -| KCL schema 属性元素 | OpenAPI 元素 | -| -------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| 属性注解 | 暂不支持,计划扩展一个 deprecate 字段用于描述 deprecated 注解 | | -| 属性名称 | properties 节点下,每个属性的 key 即为属性名称 | -| 属性 optional 修饰符(?) | 模型节点下,通过 required 字段列出该模型的所有必填属性的名称,未被列出的属性即为 optional | -| 属性类型 | 属性节点下,设置 type + format 可以标识属性的基本类型,如果是 schema 类型则用 $ref 字段表示,类型 union 则由扩展字段 x-kcl-types 来标识,此外,属性节点的 enum、pattern 也可以用于表示 KCL 类型。 | -| KCL-OpenAPI 关于类型的对照关系,详见“基本数据类型”小节 | | -| 属性默认值 | 属性节点下,设置 default 字段即可为属性设置默认值 | - -示例: -下例中 Pet 模型包含了 2 个属性:name(string 类型,必填属性,无注解,无默认值)、id(int64 类型,无注解,非必填,默认值为 -1) - -```python -# KCL schema Pet,包含两个属性 name 和 id -schema Pet: - name: str - id?: int = -1 - -# 对应的 OpenAPI 文档 -{ - "definitions": { - "Pet": { - "type": "object", - "properties": { - "name": { - "type": "string", - }, - "id": { - "type": "integer", - "format": "int64", - "default": -1 - } - }, - "required": [ - "name" - ], - } - }, - "swagger": "2.0", - "info": { - "title": "demo", - "version": "v1" - } -} -``` - -### schema 索引签名 - -KCL schema 允许定义索引签名,用于定义属性名不固定的 dict,起到静态模板的作用。具体来说,KCL schema 索引签名包含如下几个元素: - -- 索引签名中 key 的类型:在方括号中声明,必须是基础类型 -- 索引签名中 value 的类型:在冒号后声明,可以是任意合法的 KCL 类型 -- 索引签名中的省略符:在方括号中,key 类型之前声明,使用"..."表示。如果带有该符号,表示该索引签名只用于约束未在 schema 中定义的属性;否则,表示 schema 中所有已定义和未定义属性都收到该索引签名的约束。 -- 索引签名中 key 的别名:在方括号中,紧随左方括号之后声明,使用名称 + 冒号表示,该别名可用于按名称引用索引签名 -- 索引签名的默认值:可以为索引签名设置默认值 - -在 OpenAPI 中,可以借助在模型节点的 `additionalProperties` 字段描述某些 key 为 string 的索引签名。但对于 KCL 索引签名中非 string 类型的 dict key、索引签名 key 的 check 校验,在 OpenAPI 规范没有对等的描述。它们与 OpenAPI 规范的对应关系如下: - -| KCL 索引签名元素 | OpenAPI 元素 | -| ----------------------- | ---------------------------------------------------------------------- | -| 索引签名中 key 的类型 | OpenAPI 仅支持 key 为 string 类型,无法自定义 | -| 索引签名中 value 的类型 | 模型节点的下 additionalProperties 下的 "type" 字段 | -| 索引签名中的省略符 | OpenAPI 中表示索引签名时,只能表示 KCL 中带有省略符的情况 | -| 索引签名中 key 的别名 | OpenAPI 中不支持为索引签名定义 key 别名,(预计通过扩展支持:x-alias) | -| 索引签名的默认值 | 目前不支持 | - -示例:下例中的 KCL schema Pet,包含两个预定义的属性 name 和 id,除此之外,还允许使用该 schema 的配置额外地赋值其他 key 为 string 类型,value 为 bool 类型的属性: - -```python -# KCL schema Pet,包含两个预定义的属性 name 和 id,允许额外给 key 为 string、value 为 bool 的属性赋值 -schema Pet: - name: str - id?: int - [...str]: bool - -# 对应的 OpenAPI 描述 -{ - "definitions": { - "Pet": { - "type": "object", - "properties": { - "name": { - "type": "string", - }, - "id": { - "type": "integer", - "format": "int64", - } - }, - "additionalProperties": { - "type": "bool" - }, - "required": [ - "name" - ], - } - }, - "swagger": "2.0", - "info": { - "title": "demo", - "version": "v1" - } -} -``` - -### schema 继承关系 - -### 内联 schema - -OpenAPI 支持嵌套地定义 schema,但 KCL 目前暂不支持 schema 的内联。OpenAPI 中内联定义的 schema 将被转换为 KCL 中带名称的 schema,其名称的命名规则为:在该内联 schema 的上层 schema 名称的基础上,增加相应的后缀。在拼接后缀时,根据定义了该内联 schema 的外层 OpenAPI 元素类型,后缀内容如下: - -| OpenAPI 文档中定义内联 schema 的元素 | KCL schema 名称拼接规则 | -| ------------------------------------ | ------------------------------ | -| 某属性节点 | 增加该属性节点的名称为后缀 | -| AdditionalProperties 节点 | 增加"AdditionalProperties"后缀 | - -注:KCL 未来也可能会支持内联 schema,届时再更新这部分转换规则 -示例 1:下例中的模型 Deployment 包含有 kind、spec 两个属性,其中 deploymentSpec 属性的 schema 通过内联的方式定义: - -```python -# OpenAPI 文档 -{ - "definitions": { - "Deployment": { - "type": "object", - "properties": { - "kind": { - "type": "string", - }, - "spec": { - "type": "object", - "properties": { - "replicas": { - "type": "integer", - "format": "int64" - } - } - } - }, - "required": [ - "kind", - "spec" - ], - } - }, - "swagger": "2.0", - "info": { - "title": "demo", - "version": "v1" - } -} - -# 转换为 KCL Schema 如下: -schema Deployment: - kind: str - spec: DeploymentSpec - -schema DeploymentSpec: - replicas?: int -``` - -示例 2:下例中的模型 Person 中除固定属性 name 外,还允许包含额外的属性(additionalProperties),并且这部分额外属性的属性值的 schema 通过内联的方式定义: - -```python -# OpenAPI 文档 -{ - "definitions": { - "Person": { - "type": "object", - "properties": { - "name": { - "type": "string", - }, - }, - "required": [ - "name", - "spec" - ], - "additionalProperties": { - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "description": { - "type": "string" - } - }, - "required": [ - "name" - ] - }, - } - }, - "swagger": "2.0", - "info": { - "title": "demo", - "version": "v1" - } -} - -# 转换为 KCL Schema 如下: -schema Person: - name: str - [...str]: [PersonAdditionalProperties] - -schema PersonAdditionalProperties: - name: str - description?: str -``` - -## KCL 文档 - -KCL doc 规范请参考:[传送门](../kcl/docgen.md) -KCL 文档包含 module 文档、schema 文档两类,其中 schema 文档可以由 OpenAPI 转换得到。KCL schema 文档包含: - -- schema 描述信息:位于 schema 声明之后、schema 属性声明之前,是对 schema 的总体介绍 -- schema 属性信息:位于 shcema 描述信息之后,以 Attributes + 分割线分隔 -- schema 附加信息:位于 schema 属性信息之后,以 See Also + 分割线分隔 -- schema 示例信息:位于 schema 附加信息之后,以 Examples + 分割线分隔 - -它们与 OpenAPI 规范的对应关系如下: - -| KCL 文档元素 | OpenAPI 元素 | -| --------------- | ---------------------------------------------------- | -| schema 描述信息 | definitions 节点下,每个模型节点的 description 字段 | -| schema 属性信息 | properties 节点下,每个属性节点的 description 字段 | -| schema 附加信息 | definitions 节点下,每个模型节点的 example 字段 | -| schema 示例信息 | definitions 节点下,每个模型节点的 externalDocs 字段 | - -示例: -下例中为 Pet 模型定义了其 schema 描述文档 "The schema Pet definition";Pet 的两个属性 "name" 和 "id" 也分别定义了其属性文档 "The name of the pet" 及 "The id of the pet";Pet 的附加信息为 "Find more info here. [https://petstore.swagger.io/](https://petstore.swagger.io/)";此外,Pet 模型还提供了模型实例的示例写法。 - -```python -# KCL schema Pet,采用规范的 KCL 文档格式 -schema Pet: - """The schema Pet definition - - Attributes - ---------- - name : str, default is Undefined, required - The name of the pet - id : int, default is -1, optional - The age of the pet - - See Also - -------- - Find more info here. https://petstore.swagger.io/ - - Examples - -------- - pet = Pet { - name = "doggie" - id = 123 - } - """ - name: str - id?: int = -1 - -# 对应的 OpenAPI 文档 -{ - "definitions": { - "Pet": { - "description": "The schema Pet definition", - "type": "object", - "properties": { - "name": { - "type": "string", - "description": "The name of the pet" - }, - "id": { - "type": "integer", - "format": "int64", - "default": -1, - "description": "The age of the pet" - } - }, - "required": [ - "name" - ], - "externalDocs": { - "description": "Find more info here", - "url": "https://petstore.swagger.io/" - }, - "example": { - "name": "doggie", - "id": 123 - } - } - }, - "swagger": "2.0", - "info": { - "title": "demo", - "version": "v1" - } -} -``` - -​ - -## 基本数据类型 - -| JSON Schema type | swagger type | KCL type | comment | -| ---------------- | --------------------------- | --------------- | ----------------------------------------------------------------------------------------------------- | -| boolean | boolean | bool | | -| number | number | float | | -| | number format double | **unsupported** | | -| | number format float | float | | -| integer | integer | int (32) | | -| | integer format int64 | **unsupported** | | -| | integer format int32 | int (32) | | -| string | string | str | | -| | string format byte | str | | -| | string format int-or-string | int | str | -| | string format binay | str | | -| | string format date | unsupported | As defined by full-date - [RFC3339](http://xml2rfc.ietf.org/public/rfc/html/rfc3339.html#anchor14) | -| | string format date-time | unsupported | As defined by date-time - [RFC3339](http://xml2rfc.ietf.org/public/rfc/html/rfc3339.html#anchor14) | -| | string format password | unsupported | for swagger: A hint to UIs to obscure input. | -| | datetime | datetime | | - -# Reference - -- openapi spec 2.0:[https://swagger.io/specification/v2/](https://swagger.io/specification/v2/) -- openapi spec 3.0:[https://spec.openapis.org/oas/v3.1.0](https://spec.openapis.org/oas/v3.1.0) -- openapi spec 3.0(swagger 版本):[https://swagger.io/specification/](https://swagger.io/specification/) -- openapi spec 2.0 #SchemaObject:[https://swagger.io/specification/v2/#schemaObject](https://swagger.io/specification/v2/#schemaObject) -- go swagger:[https://goswagger.io/use/models/schemas.html](https://goswagger.io/use/models/schemas.html) -- swagger data models:[https://swagger.io/docs/specification/data-models/](https://swagger.io/docs/specification/data-models/) diff --git a/docs/reference/konfig/1-overview.md b/docs/reference/konfig/1-overview.md deleted file mode 100644 index eac0d968..00000000 --- a/docs/reference/konfig/1-overview.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -id: overview -sidebar_label: Overview ---- -# Konfig Overview - -KusionStack 推荐通过 **配置大库** 的方式统一管理所有的配置清单和模型库,即不仅存放抽象模型本身的 KCL 定义,还存放各种类型的配置清单,比如应用的运维配置、策略配置等。配置大库推荐托管在各类 VCS 系统中,以方便做配置的回滚和漂移检查。官方的配置大库的最佳实践代号为 Konfig,仓库托管在 [Github](https://github.com/KusionStack/konfig)。 - -⚡️ 配置大库 主要包括: - -* Kusion 模型库 -* 各类配置清单目录:应用运维配置(appops)、建站配置(siteops)等 -* 大库声明文件(kcl.mod) -* 大库测试脚本(Makefile 等) - -之所以用一个大的仓库管理全部的 IaC 配置代码,是由于不同代码包的研发主体不同,会引发出包管理和版本管理的问题,从而导致平台侧需要支持类似编译平台的能力。采用大库模式,业务配置代码、基础配置代码在一个大库中,因此代码间的版本依赖管理比较简单,平台侧处理也比较简单,定位唯一代码库的目录及文件即可,代码互通,统一管理,便于查找、修改、维护(大库模式也是 Google 等头部互联网公司内部实践的模式)。 - -下面是配置大库(Konfig)的架构图: - -![](/img/docs/reference/konfig/konfig-arch-01.png) - -核心模型内部通过前端模型和后端模型两层抽象简化前端用户的配置代码,底层模型则是通过 [KCL OpenAPI](/docs/reference/cli/openapi) 工具自动生成。 - -:::tip -模型的更详细文档可参考 [参考手册/Kusion 模型库](/docs/reference/model/overview)。 -::: diff --git a/docs/reference/konfig/2-structure.md b/docs/reference/konfig/2-structure.md deleted file mode 100644 index 5fba9058..00000000 --- a/docs/reference/konfig/2-structure.md +++ /dev/null @@ -1,82 +0,0 @@ ---- -id: structure -sidebar_label: dir-struct ---- -# Konfig Dir Struct - -本文主要解释 Konfig 配置大库的目录和代码结构,其中涉及的基本概念解释可见[《Kusion 模型库概览》](/docs/reference/model/overview)。 - -## 1. 整体结构 - -```bash -. -├── Makefile # 通过 Makefile 封装常用命令 -├── README.md # 配置大库说明 -├── appops # 应用运维目录,用来放置所有应用的 KCL 运维配置 -│ ├── guestbook-frontend -│ ├── http-echo -│ └── nginx-example -├── base # Kusion Model 模型库 -│ ├── examples # Kusion Model 样例代码 -│ │ ├── monitoring # 监控配置样例 -│ │ ├── native # Kubernetes 资源配置样例 -│ │ ├── provider # 基础资源配置样例 -│ │ └── server # 云原生应用运维配置模型样例 -│ └── pkg -│ ├── kusion_kubernetes # Kubernetes 底层模型库 -│ ├── kusion_models # 核心模型库 -│ ├── kusion_prometheus # Prometheus 底层模型库 -│ └── kusion_provider # 基础资源 底层模型库 -├── hack # 放置一些脚本 -└── kcl.mod # 大库配置文件,通常用来标识大库根目录位置以及大库所需依赖 -``` - -## 2. 核心模型库结构 - -核心模型库一般命名为 kusion_models,主要包含前端模型、后端模型、Mixin、渲染器等,目录结构为: - -```bash -├── commons # 基础资源核心模型库 -├── kube # 云原生资源核心模型库 -│ ├── backend # 后端模型 -│ ├── frontend # 前端模型 -│ │ ├── common # 通用前端模型 -│ │ ├── configmap # ConfigMap 前端模型 -│ │ ├── container # 容器前端模型 -│ │ ├── ingress # Ingress 前端模型 -│ │ ├── resource # 资源规格前端模型 -│ │ ├── secret # Secret 前端模型 -│ │ ├── service # Service 前端模型 -│ │ ├── sidecar # Sidecar 容器前端模型 -│ │ ├── strategy # 策略前端模型 -│ │ ├── volume # Volume 前端模型 -│ │ └── server.k # 云原生应用运维前端模型 -│ ├── metadata # 应用运维的元数据模型 -│ ├── mixins # 统一放置可复用的 Mixin -│ ├── render # 渲染器,把前后端模型联系在一起的桥梁 -│ ├── templates # 静态配置 -│ └── utils # 工具方法 -└── metadata # 通用元数据模型 -``` - -## 3. Project 和 Stack 结构 - -Project 和 Stack 的基本概念可见 [《Project & Stack》](/user_docs/concepts/konfig.md)。 - -Project 在配置大库的应用运维(appops)场景中对应的概念是「应用」,Stack 对应的概念是「环境」,更多映射关系可见[《映射关系》](/user_docs/guides/organizing-projects-stacks/mapping.md)。 - -本节以应用「nginx-example」为例,介绍 Project 和 Stack 在配置大库中的基本目录结构: - -```bash -├── README.md # Project 介绍文件 -├── base # 各环境通用配置 -│ └── base.k # 通用 KCL 配置 -├── dev # 环境特有配置 -│ ├── ci-test # 测试目录 -│ │ ├── settings.yaml # 测试数据 -│ │ └── stdout.golden.yaml # 测试期望结果 -│ ├── kcl.yaml # 多文件编译配置,是 KCL 编译的入口 -│ ├── main.k # 当前环境 KCL 配置 -│ └── stack.yaml # Stack 配置文件 -└── project.yaml # Project 配置文件 -``` diff --git a/docs/reference/konfig/3-guide.md b/docs/reference/konfig/3-guide.md deleted file mode 100644 index 3ecf9c84..00000000 --- a/docs/reference/konfig/3-guide.md +++ /dev/null @@ -1,274 +0,0 @@ ---- -id: guide -sidebar_label: Use Guide ---- -# Use Guide - -## 1. 添加应用 - -在 [快速开始/Usecase](/docs/user_docs/getting-started/usecase) 我们已经展示如何快速添加一个应用(参考 [Project & Stack](/docs/user_docs/concepts/konfig))。 - -## 2. 验证 Konfig 代码 - -### 2.1 快速开始 - -在安装完成 Kusion 工具之后,在 Konfig 根目录执行 `make check-all` 验证大库全部 Project(参考 [Konfig](/docs/user_docs/concepts/konfig)),或者执行 `make check WHAT="http-echo"` 验证 `appops/http-echo` 应用。 - -如果需要单独验证 `appops/http-echo` 应用的 dev 版本,可以进入 `appops/http-echo/dev` 目录执行 `kusion compile` 命令(或者通过更底层的 `kcl -Y kcl.yaml ci-test/settings.yaml -o ci-test/stdout.golden.yaml` 命令),输出的文件在 `appops/http-echo/dev/ci-test/stdout.golden.yaml`。 - -:::tip -更多大库预置命令可以在大库根目录执行 make 命令进行查看: - -```bash -$ make -help 这里是帮助文档 :) -check-all 校验所有 Project -check 校验指定目录下的 Project,比如 make check WHAT=nginx-example 或者 make check WHAT="http-echo nginx-example" -clean-all 清理缓存 -install-hooks 安装 git hooks,目前主要有 pre-commit hook(提交时自动编译) -uninstall-hooks 卸载 git hooks -``` - -::: - -### 2.2 使用样例 - -🎯 根据目录名编译指定应用,比如编译应用 http-echo - -```bash -make check WHAT=http-echo -# OR: make check-http-echo -``` - -
- 执行结果 - -```bash -Matched path: ['/Users/kusion-user/workspace/Konfig/appops/http-echo'] -Matched path total: 1 - -/Users/kusion-user/workspace/Konfig/appops/http-echo [ALL DONE] - ┗━ /Users/kusion-user/workspace/Konfig/appops/http-echo/dev [Success] - -All Success! -Total time: 2.06s, Total app num: 1, Total env num: 1, Time per env: 2.06s -``` - -
- -🎯 编译多个应用 - -```bash -make check WHAT="http-echo nginx-example" -``` - -
- 执行结果 - -```bash -Matched path: ['/Users/kusion-user/workspace/Konfig/appops/http-echo'] -Matched path total: 1 - -Matched path: ['/Users/kusion-user/workspace/Konfig/appops/nginx-example'] -Matched path total: 1 - -/Users/kusion-user/workspace/Konfig/appops/http-echo [ALL DONE] - ┗━ /Users/kusion-user/workspace/Konfig/appops/http-echo/dev [Success] -/Users/kusion-user/workspace/Konfig/appops/nginx-example [ALL DONE] - ┗━ /Users/kusion-user/workspace/Konfig/appops/nginx-example/dev [Success] - -All Success! -Total time: 2.11s, Total app num: 2, Total env num: 2, Time per env: 1.06s -``` - -
- -🎯 关键字除了是应用名,也可以是任意目录名称,比如编译 appops 应用运维目录下的所有应用 - -```bash -make check-appops -# OR: make check WHAT=appops -``` - -
- 执行结果 - -```bash -Matched path: ['/Users/kusion-user/workspace/Konfig/appops'] -Matched path total: 1 - -/Users/kusion-user/workspace/Konfig/appops/nginx-example [ALL DONE] - ┗━ /Users/kusion-user/workspace/Konfig/appops/nginx-example/dev [Success] -/Users/kusion-user/workspace/Konfig/appops/guestbook-frontend [ALL DONE] - ┣━ /Users/kusion-user/workspace/Konfig/appops/guestbook-frontend/prod [Success] - ┣━ /Users/kusion-user/workspace/Konfig/appops/guestbook-frontend/test [Success] - ┗━ /Users/kusion-user/workspace/Konfig/appops/guestbook-frontend/pre [Success] -/Users/kusion-user/workspace/Konfig/appops/http-echo [ALL DONE] - ┗━ /Users/kusion-user/workspace/Konfig/appops/http-echo/dev [Success] - -All Success! -Total time: 4.08s, Total app num: 3, Total env num: 5, Time per env: 0.82s -``` - -
- -🎯 编译所有应用 - -```bash -make check-all -``` - -
- 执行结果 - -```bash -Matched path total: 139 - -/Users/kusion-user/workspace/Konfig/base/examples/server/app_need_namespace [ALL DONE] - ┗━ /Users/kusion-user/workspace/Konfig/base/examples/server/app_need_namespace/prod [Success] -/Users/kusion-user/workspace/Konfig/appops/guestbook-frontend [ALL DONE] - ┣━ /Users/kusion-user/workspace/Konfig/appops/guestbook-frontend/prod [Success] - ┣━ /Users/kusion-user/workspace/Konfig/appops/guestbook-frontend/pre [Success] - ┗━ /Users/kusion-user/workspace/Konfig/appops/guestbook-frontend/test [Success] -/Users/kusion-user/workspace/Konfig/base/examples/server/app_secret [ALL DONE] - ┗━ /Users/kusion-user/workspace/Konfig/base/examples/server/app_secret/prod [Success] -/Users/kusion-user/workspace/Konfig/base/examples/server/app_volume [ALL DONE] - ┗━ /Users/kusion-user/workspace/Konfig/base/examples/server/app_volume/prod [Success] -/Users/kusion-user/workspace/Konfig/base/examples/server/app_config_map [ALL DONE] - ┗━ /Users/kusion-user/workspace/Konfig/base/examples/server/app_config_map/prod [Success] -/Users/kusion-user/workspace/Konfig/base/examples/server/app_label_selector [ALL DONE] - ┗━ /Users/kusion-user/workspace/Konfig/base/examples/server/app_label_selector/prod [Success] -/Users/kusion-user/workspace/Konfig/base/examples/server/app_main_container [ALL DONE] - ┗━ /Users/kusion-user/workspace/Konfig/base/examples/server/app_main_container/prod [Success] -/Users/kusion-user/workspace/Konfig/base/examples/server/app_sidecar [ALL DONE] - ┗━ /Users/kusion-user/workspace/Konfig/base/examples/server/app_sidecar/prod [Success] -/Users/kusion-user/workspace/Konfig/base/examples/server/app_stateful_set [ALL DONE] - ┗━ /Users/kusion-user/workspace/Konfig/base/examples/server/app_stateful_set/prod [Success] -/Users/kusion-user/workspace/Konfig/base/examples/server/app_service [ALL DONE] - ┗━ /Users/kusion-user/workspace/Konfig/base/examples/server/app_service/prod [Success] -/Users/kusion-user/workspace/Konfig/base/examples/server/app_scheduling_strategy [ALL DONE] - ┗━ /Users/kusion-user/workspace/Konfig/base/examples/server/app_scheduling_strategy/prod [Success] -/Users/kusion-user/workspace/Konfig/base/examples/kcl-vault-agent [ALL DONE] - ┗━ /Users/kusion-user/workspace/Konfig/base/examples/kcl-vault-agent/dev [Success] -/Users/kusion-user/workspace/Konfig/base/examples/monitoring/prometheus-example-app [ALL DONE] - ┗━ /Users/kusion-user/workspace/Konfig/base/examples/monitoring/prometheus-example-app/prod [Success] -/Users/kusion-user/workspace/Konfig/base/examples/kcl-vault-csi [ALL DONE] - ┗━ /Users/kusion-user/workspace/Konfig/base/examples/kcl-vault-csi/dev [Success] -/Users/kusion-user/workspace/Konfig/appops/nginx-example [ALL DONE] - ┗━ /Users/kusion-user/workspace/Konfig/appops/nginx-example/dev [Success] -/Users/kusion-user/workspace/Konfig/appops/http-echo [ALL DONE] - ┗━ /Users/kusion-user/workspace/Konfig/appops/http-echo/dev [Success] - -All Success! -Total time: 17.21s, Total app num: 16, Total env num: 18, Time per env: 0.96s -``` - -
- -## 3. 自动编译 - -借助 git hooks 功能和 pre-commit 脚本,实现提交代码(git commit)到大库时自动编译当前变更涉及的应用/项目; - -:::note -自动编译在 terminal 中展示效果较好,在 IDE 进行 commit 提交的展示效果不佳; -::: - -### 3.1 快速开始 - -移动到大库根目录 - -``` -cd ~/Konfig -``` - -安装 - -``` -make install-hooks -``` - -卸载 - -``` -make uninstall-hooks -``` - -:::note -以上演示命令均在大库根目录中执行; -::: - -### 3.2 使用样例 - -git hooks 安装成功后,在提交时会自动编译: - -
- 执行结果 - -```bash -➜ Konfig (master) ✔ make install-hooks -Successfully install pre-commit hooks! -➜ Konfig (master) ✔ git status -On branch master -Your branch is up to date with 'origin/master'. - -Changes not staged for commit: - (use "git add ..." to update what will be committed) - (use "git restore ..." to discard changes in working directory) - modified: appops/http-echo/base/base.k - -no changes added to commit (use "git add" and/or "git commit -a") - -➜ Konfig (master) ✔ git add . -➜ Konfig (master) ✔ git commit -m 'test' -------------- 开始执行提交前置检查 🚀 ------------- -🕒 开始自动执行预编译... -Running kclvm /Users/kusion-user/workspace/Konfig/hack/compile-rocket.py appops/http-echo ... -Matched path: ['/Users/kusion-user/workspace/Konfig/appops/http-echo'] -Matched path total: 1 - -/Users/kusion-user/workspace/Konfig/appops/http-echo [ALL DONE] - ┗━ /Users/kusion-user/workspace/Konfig/appops/http-echo/dev [Success] - -All Success! -Total time: 2.04s, Total app num: 1, Total env num: 1, Time per env: 2.04s -🕒 正在将编译结果加入到暂存区(stage),作为本次提交内容... -💡 预编译执行结束 - -------------- 执行结果 ------------- -预编译: 成功 - -------------- 建议 ------------- -预编译: 无 - -------------- 前置检查完成,已提交 ✅ ------------- - -[master c006e80] test - 2 files changed, 2 insertions(+), 2 deletions(-) - -➜ Konfig (master) ✔ git status -On branch master -Your branch is ahead of 'origin/master' by 1 commit. - (use "git push" to publish your local commits) - -nothing to commit, working tree clean -``` - -
- -### 3.3 临时绕过 - -如果想在本次提交时临时绕过「自动编译」,可以这样做: - -```bash -git commit -n -# OR: git commit --no-verify -``` - -### 3.4 编译结果不自动加入提交 - -从样例中可以看到,编译生成的编译结果会自动加入到本次提交中,如果不想自动加入,可以设置环境变量 ADD_TO_STAGE_AFTER_COMPILE=False,关闭此功能: -export ADD_TO_STAGE_AFTER_COMPILE=False - -:::note -通过 export 设置环境变量只在当前 Terminal 有效 -::: diff --git a/docs/reference/konfig/4-vars.md b/docs/reference/konfig/4-vars.md deleted file mode 100644 index 3bf8d713..00000000 --- a/docs/reference/konfig/4-vars.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -id: vars -sidebar_label: Magic Vars ---- -# Magic Vars - -## 1. 概念 - -魔术变量:预置的、代表基础元数据的变量 - -## 2. 魔术变量列表 - -| 名称 | 作用 | 取值样例 | 备注 | -| -------------------- | -------- | -------- | -------------------------------- | -| __META_APP_NAME | 应用名称 | testapp | 等价于 project.yaml 中 name 的值 | -| __META_ENV_TYPE_NAME | 环境名称 | test | 等价于 stack.yaml 中 name 的值 | -| __META_CLUSTER_NAME | 集群名称 | minikube | 可通过 -D cluster 指定该值 | - -## 3. 环境类型(env)推荐取值 - -``` -dev,test,stable,pre,gray,prod -``` - -## 4. 环境类别(envCategory)推荐取值 - -| 名称 | 取值 | 包含环境 | -| -------- | ------- | --------------- | -| 线上环境 | online | pre,gray,prod | -| 线下环境 | offline | dev,test,stable | diff --git a/docs/reference/konfig/_category_.json b/docs/reference/konfig/_category_.json deleted file mode 100644 index 7b3ba634..00000000 --- a/docs/reference/konfig/_category_.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "label": "Konfig", - "position": 4 -} diff --git a/docs/reference/lang/_category_.json b/docs/reference/lang/_category_.json deleted file mode 100644 index 8c7bfcf8..00000000 --- a/docs/reference/lang/_category_.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "label": "KCL", - "position": 2 -} diff --git a/docs/reference/lang/index.md b/docs/reference/lang/index.md deleted file mode 100644 index ab9d105e..00000000 --- a/docs/reference/lang/index.md +++ /dev/null @@ -1 +0,0 @@ -# KCL Language diff --git a/docs/reference/lang/lang/_category_.json b/docs/reference/lang/lang/_category_.json deleted file mode 100644 index 6066c82b..00000000 --- a/docs/reference/lang/lang/_category_.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "label": "KCL", - "position": 1 -} diff --git a/docs/reference/lang/lang/_kcl_mod.md b/docs/reference/lang/lang/_kcl_mod.md deleted file mode 100644 index 9fd33601..00000000 --- a/docs/reference/lang/lang/_kcl_mod.md +++ /dev/null @@ -1,88 +0,0 @@ ---- -sidebar_position: 6 ---- - -# kcl.mod - -当配置参数变得复杂时,我们可以通过拆分文件和目录的方式重新组装 KCL 代码,不同文件中的 KCL 代码对应包或模块,它们可以通过 import 语句被导入使用。模块和包可以通过相对模块路径导入,也可以通过绝对模块路径导入。而模块的绝对路径是通过 kcl.mod 文件指定的。 - -## 1. 定位模块根目录 - -比如有以下结构: - -``` -. -|── kcl.mod -├── mod1.k -├── mod2.k -├── pkg1 -│   ├── def1.k -│   ├── def2.k -│   └── def3init.k -└── pkg2 - ├── file2.k - └── subpkg3 - └── file3.k -``` - -- kcl.mod 文件所在的目录对应模块的根目录 -- `mod1.k` 对应导入方式 `import mod1` -- `mod2.k` 对应导入方式 `import mod2` -- `pkg1/*.k` 对应导入方式 `import pkg1` -- `pkg2/*.k` 对应导入方式 `import pkg2` -- `pkg1/subpkg3/*.k` 对应导入方式 `import pkg1.subpkg3` - -> **Note:** 对于同目录下的 KCL 文件,不要混用目录和文件的导入方式(比如 `import pkg1` 和 `import pkg1.def1` 就是混用的例子)。 - -## 2. kcl.mod 文件的结构 - -最简单的 kcl.mod 是一个空文件,只是用于定位模块的绝对路径。不过 kcl.mod 其实是一种 [TOML](https://github.com/toml-lang/toml) 格式的文件,其中可以包含一些配置信息。 - -比如以下的 kcl.mod 文件: - -```toml -[build] -enable_pkg_cache=true -cached_pkg_prefix="base.pkg." - -[expected] -kclvm_version="v0.3.9" -kcl_plugin_version="v0.2.14" -``` - -`build` 段打开了缓存,并定义了要换成的包路径前缀。`expected` 段定义了期望的 KCLVM 版本和插件版本。 - -完整的 kcl.mod 对应以下的 Protobuf 结构: - -```protobuf -syntax = "proto3"; - -package kclvm.modfile; - -// kcl.mod 文件对应的内存格式 -// kcl.mod 文件为TOML格式, 字段名字和类型保持一致 -message KclModFile { - string root = 1; // 根目录路径, 由程序填充 - string root_pkg = 2; // 根包import路径, 对应所有子包的前缀, 可以忽略 - - KclModFile_build_section build = 3; // build 配置 - KclModFile_expected_section expected = 4; // expected 配置 -} - -message KclModFile_build_section { - bool enable_pkg_cache = 1; // 启动pkg缓存 - string cached_pkg_prefix = 2; // 缓存的前缀路径 - string target = 3; // 编译的目标,可选 native, wasm -} - -message KclModFile_expected_section { - string min_build_time = 1; // 期望构建时间下界 2021-08-14 20:30:08 - string max_build_time = 2; // 期望构建时间上界 2021-08-16 20:30:08 - string kclvm_version = 3; // KCLVM 版本依赖 - string kcl_plugin_version = 4; // KCLVM Plugin 版本依赖 - string global_version = 5; // 全局版本 -} -``` - -kcl.mod 文件对应 KclModFile 结构,其中包含模块路径和本地路径的映射关系(目前还没有使用)。上面例子中的 build 和 expected 分别对应 KclModFile_build_section 和 KclModFile_expected_section 结构。 - diff --git a/docs/reference/lang/lang/codelab/_category_.json b/docs/reference/lang/lang/codelab/_category_.json deleted file mode 100644 index 2e047bcb..00000000 --- a/docs/reference/lang/lang/codelab/_category_.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "label": "Code Lab", - "position": 2 -} diff --git a/docs/reference/lang/lang/codelab/collaborative.md b/docs/reference/lang/lang/codelab/collaborative.md deleted file mode 100644 index febb4181..00000000 --- a/docs/reference/lang/lang/codelab/collaborative.md +++ /dev/null @@ -1,344 +0,0 @@ ---- -title: "Co-configuration with config operations" -linkTitle: "Co-configuration with config operations" -type: "docs" -weight: 2 -description: Co-configuration with config operations -sidebar_position: 3 ---- -## 1. Introduction - -Kusion Configuration Language (KCL) is a simple and easy-to-use configuration language, where users can simply write the reusable configuration code. - -In this codelab, we will learn how to write the config in a collaborative way using the KCL config operation features. - -### What We Will Learn - -1. Define schemas and organize project directories. -2. Create multiple environment configurations via the KCL config operation features. -3. Configure compiling parameters and tests. - -## 2. Define Schemas and Organize Project Directories - -### Schema Definitions - -Suppose we want to define a server configuration with certain attributes, we can create a simple config by creating a `server.k`, we can fill in the following code as below which defines a reusable schema of the configuration of a server. - -```python -import units - -type Unit = units.NumberMultiplier - -schema Server: - replicas: int = 1 - image: str - resource: Resource = {} - mainContainer: Main = {} - labels?: {str:str} - annotations?: {str:str} - -schema Main: - name: str = "main" - command?: [str] - args?: [str] - ports?: [Port] - -schema Resource: - cpu?: int = 1 - memory?: Unit = 1024Mi - disk?: Unit = 10Gi - -schema Port: - name?: str - protocol: "HTTP" | "TCP" - port: 80 | 443 - targetPort: int - - check: - targetPort > 1024, "targetPort must be larger than 1024" -``` - -In the code above, we define a schema named `Server`, which represents the configuration type that the user will write, which contains some basic type attributes (e.g., `replicas`, `image`, etc) and some composite type attributes (e.g., `resource`, `main`, etc). In addition to some basic types mentioned in the [schema codelab](./schema.md), we can see two types in the above code `Unit` and `units.NumberMultiplier`. Among them, `units.NumberMultiplier` denotes the KCL number unit type, which means that a natural unit or binary unit can be added after the KCL number, such as `1K` for `1000`, `1Ki` for `1024`. `Unit` is the type alias of `units.NumberMultiplier`, which is used to simplify the writing of type annotations. - -### Project Directories - -In order to complete the collaborative configuration development, we first need a configuration project, which contains the configuration of the test application and the differential configuration of different environments, so we are creating the following project directory: - -``` -. -├── appops -│ └── test_app -│ ├── base -│ │ └── base.k -│ ├── dev -│ │ ├── ci-test -│ │ │ └── stdout.golden.yaml -│ │ ├── kcl.yaml -│ │ └── main.k -│ └── prod -│ ├── ci-test -│ │ └── stdout.golden.yaml -│ ├── kcl.yaml -│ └── main.k -├── kcl.mod -└── pkg - └── sever.k -``` - -The directory of the project mainly contains three parts: - -- `kcl.mod`: The file used to identify the root directory of the KCL project. -- `pkg`: `Server` Schema structure reused by different application configurations. -- `appops`: Server configurations of different applications, currently only one application `test_app` is placed. - - `base`: Application common configurations for all environments. - - `dev`: Application configuration for the development environment. - - `prod`: Application configuration for the production environment. - -The meaning of `base.k`, `main.k`, `kcl.yaml` and `ci-test/stdout.golden.yaml` will be mentioned in subsequent sections. - -## 3. Create multiple environment configurations via the KCL config operation features - -### Create a baseline configuration - -After we have organized the project directory and the basic server configuration model, we can write the configuration of the user application. We can create our own test application folder `test_app` and place it in the application configuration folder `appops`. - -For the configuration of an application, we often divide it into a basic configuration and the differential configuration of multiple environments and merge them. Through the configuration merging feature of KCL, we can easily do this. Assuming that we have two configurations of development environment and production environment, we can create three folders: `base`, `dev` and `prod` to store baseline, development environment and production environment configurations respectively. First, we write the configuration of `base/base.k`: - -```python -import pkg - -server: pkg.Server { - # Set the image with the value "nginx:1.14.2" - image = "nginx:1.14.2" - # Add a label app into labels - labels.app = "test_app" - # Add a mainContainer config, and its ports are [{protocol = "HTTP", port = 80, targetPort = 1100}] - mainContainer.ports = [{ - protocol = "HTTP" - port = 80 - targetPort = 1100 - }] -} -``` - -As in the above code, we use the `import` keyword in `base.k` to import the `Server` schema placed under `pkg` and use it to instantiate a configuration named `server`, in which we set `image` attribute to `"nginx:1.14.2"`, and a label `app` with the value `test_app` is added. In addition, we also added the configuration of the main container `mainContainer` with the value `[{protocol = "HTTP", port = 80, targetPort = 1100}]` for the ports attribute. - -KCL command: - -``` -kcl appops/test_app/base/base.k -``` - -Output: - -```yaml -server: - replicas: 1 - image: nginx:1.14.2 - resource: - cpu: 1 - memory: 1073741824 - disk: 10737418240 - mainContainer: - name: main - ports: - - protocol: HTTP - port: 80 - targetPort: 1100 - labels: - app: test_app -``` - -At this point, we have a baseline configuration. - -### Create multiple environment configurations - -Next we configure a differentiated multi-environment configuration. First assume that we want to use a temporary image of our own `nginx:1.14.2-dev` in the development environment, and then use it to override the server configuration in the baseline, we can write the following configuration in `dev/main.k`: - -```python -import pkg - -server: pkg.Server { - # Override the image declared in the base - image = "nginx:1.14.2-dev" -} -``` - -KCL command: - -``` -kcl appops/test_app/base/base.k appops/test_app/dev/main.k -``` - -Output: - -```yaml -server: - replicas: 1 - image: nginx:1.14.2-dev - resource: - cpu: 1 - memory: 1073741824 - disk: 10737418240 - mainContainer: - name: main - ports: - - protocol: HTTP - port: 80 - targetPort: 1100 - labels: - app: test_app -``` - -It can be seen that the `image` field of the output YAML is overwritten to `nginx:1.14.2-dev`. Suppose we also want to add a label to the `dev` environment with a key of `env` and a value of `dev`, we add the following code to `dev/main.k`: - -```python -import pkg - -server: pkg.Server { - # Override the image declared in the base - image = "nginx:1.14.2-dev" - # Union a new label env into base labels - labels.env = "dev" -} -``` - -KCL command: - -``` -kcl appops/test_app/base/base.k appops/test_app/dev/main.k -``` - -```yaml -server: - replicas: 1 - image: nginx:1.14.2-dev - resource: - cpu: 1 - memory: 1073741824 - disk: 10737418240 - mainContainer: - name: main - ports: - - protocol: HTTP - port: 80 - targetPort: 1100 - labels: - app: test_app - env: dev -``` - -It can be seen that there are two labels in the `labels` field of the output YAML. - -In addition, we can also use the `+=` operator to add new values to list type attributes, such as the `mainContainer.ports` configuration in the baseline environment, continue to modify the code in `dev/main.k`: - -```python -import pkg - -server: pkg.Server { - # Override the base image. - image = "nginx:1.14.2-dev" - # Union a new label env into base labels. - labels.env = "dev" - # Append a port into base ports. - mainContainer.ports += [{ - protocol = "TCP" - port = 443 - targetPort = 1100 - }] -} -``` - -KCL command: - -``` -kcl appops/test_app/base/base.k appops/test_app/dev/main.k -``` - -Output: - -```yaml -server: - replicas: 1 - image: nginx:1.14.2-dev - resource: - cpu: 1 - memory: 1073741824 - disk: 10737418240 - mainContainer: - name: main - ports: - - protocol: HTTP - port: 80 - targetPort: 1100 - - protocol: TCP - port: 443 - targetPort: 1100 - labels: - app: test_app - env: dev -``` - -Using the same method, we can build the production configuration, write the code in the `dev/main.k` file, and add a label to it. - -```python -import pkg - -server: pkg.Server { - # Union a new label env into base labels - labels.env = "prod" -} -``` - -KCL command: - -``` -kcl appops/test_app/base/base.k appops/test_app/prod/main.k -``` - -Output: - -```yaml -server: - replicas: 1 - image: nginx:1.14.2 - resource: - cpu: 1 - memory: 1073741824 - disk: 10737418240 - mainContainer: - name: main - ports: - - protocol: HTTP - port: 80 - targetPort: 1100 - labels: - app: test_app - env: prod -``` - -## 4. Configure compiling parameters and tests - -In the previous section, we built a multi-environment configuration through code. It can be seen that the KCL command line compilation parameters of different environments are similar, so we can configure these compilation parameters into a file and input them to the KCL command line for invocation. Configure the following code in `dev/kcl.yaml`: - -```yaml -kcl_cli_configs: - files: - - ../base/base.k - - main.k - output: ./ci-test/stdout.golden.yaml -``` - -Then we can compile the configuration in the development environment with the following command: - -``` -cd appops/test_app/dev && kcl -Y ./kcl.yaml -``` - -In addition, we have configured the `output` field in `dev/kcl.yaml` to output YAML to a file for subsequent configuration distribution or testing. You can verify that the application's configuration is as expected by walking through the `kcl.yaml` builds in each environment and comparing with `./ci-test/stdout.golden.yaml`. - -## 5. The Final Step - -Congratulations! - -We have completed the third lesson about KCL. diff --git a/docs/reference/lang/lang/codelab/index.md b/docs/reference/lang/lang/codelab/index.md deleted file mode 100644 index dbe554a9..00000000 --- a/docs/reference/lang/lang/codelab/index.md +++ /dev/null @@ -1 +0,0 @@ -# Code Lab diff --git a/docs/reference/lang/lang/codelab/schema.md b/docs/reference/lang/lang/codelab/schema.md deleted file mode 100644 index 423b0cb5..00000000 --- a/docs/reference/lang/lang/codelab/schema.md +++ /dev/null @@ -1,816 +0,0 @@ ---- -title: "Write complex config using KCL Schema" -linkTitle: "Write complex config using KCL Schema" -type: "docs" -weight: 2 -description: Write complex config using KCL Schema -sidebar_position: 2 ---- -## 1. Introduction - -Kusion Configuration Language (KCL) is a simple and easy-to-use configuration language, where users can simply write the reusable configuration code. - -In this codelab, we will learn how to write customized config using KCL, such that we can define a schema and write the config in a collaborative way. - -### What We Will Learn - -1. Define a simple schema -2. Set default immutable values to schema fields -3. Create config based on a simple schema -4. Write complex logic in schema -5. Create a new schema via schema combinations -6. Create a config of a deeply nested schema using dict/map -7. Create new schema via schema inheritance -8. Create new schema via multiple mixin schemas -9. Declare validation rules for the schema -10. Config schema output layout -11. Share and reuse schema - -## 2. Write Simple Schema - -Suppose we want to define a workload with certain attributes, we can create a simple config by creating a `my_config.k`, we can fill in the following code as below which defines a reusable schema of the configuration of deploy. - -```python -schema Deployment: - name: str - cpu: int - memory: int - image: str - service: str - replica: int - command: [str] - labels: {str:str} -``` - -In the code above, `cpu` and `memory` are defined as int value; `name`, `image` and `service` are string; `command` is a list of string type; `labels` is a dict type, whose key type and value type are both string. - -Besides, each attribute **must** be assigned with a not-None value as a schema instance unless it is modified by a question mark **?** as an optional attribute. - -```python -schema Deployment: - name: str - cpu: int - memory: int - image: str - service: str - replica: int - command: [str] - labels?: {str:str} # labels is an optional attribute -``` - -When there is an inheritance relationship: - -- If the attribute is optional in the base schema, it could be optional or required in the sub-schema. -- If the attribute is required in the base schema, it must be required in the sub-schema. - -## 3. Enhance Schema as Needed - -Suppose we need to set default values to service and replica, we can make them as below: - -```python -schema Deployment: - name: str - cpu: int - memory: int - image: str - service: str = "my-service" # defaulting - replica: int = 1 # defaulting - command: [str] - labels?: {str:str} # labels is an optional attribute -``` - -And then we can set the service type annotation as the string literal type to make it immutable: - -```python -schema Deployment: - name: str - cpu: int - memory: int - image: str - service: "my-service" = "my-service" - replica: int = 1 - command: [str] - labels?: {str:str} -``` - -In the schema, type hint is a `must`, for example we can define cpu as `cpu: int`. - -Specially, we can define a string-interface dict as `{str:}`, and in case we want to define an object or interface, just define as `{:}`. - -## 4. Create Config Based on Simple Schema - -Now we have a simple schema definition, we can use it to define config as: - -```python -nginx = Deployment { - name = "my-nginx" - cpu = 256 - memory = 512 - image = "nginx:1.14.2" - command = ["nginx"] - labels = { - run = "my-nginx" - env = "pre-prod" - } -} -``` - -Run with the following KCL command, we should be able to see the generated yaml files as the output as below: - -KCL command: - -``` - kcl my_config.k -``` - -Stdout: - -```yaml -nginx: - name: my-nginx - cpu: 256 - memory: 512 - image: nginx:1.14.2 - service: my-service - replica: 1 - command: - - nginx - labels: - run: my-nginx - env: pre-prod -``` - -> Check the manual and specification out for more details about collection data types and block. - -In addition, the **config selector expressions** can be used to init a schema instance, and we can ignore the comma at the end of the line in the config expression. - -```python -nginx = Deployment { - name = "my-nginx" - cpu = 256 - memory = 512 - image = "nginx:1.14.2" - command = ["nginx"] # Ignore the comma at the end of the line - labels.run = "my-nginx" # A dict variable in schema can use selector expressions - labels.env = "pre-prod" # A dict variable in schema can use selector expressions -} -``` - -## 5. Write More Complex Logic in Schema - -Suppose we have some schema logic, we can wrapper it into schema: - -```python -schema Deployment[priority]: - name: str - cpu: int = _cpu - memory: int = _cpu * 2 - image: str - service: "my-service" = "my-service" - replica: int = 1 - command: [str] - labels?: {str:str} - - _cpu = 2048 - if priority == 1: - _cpu = 256 - elif priority == 2: - _cpu = 512 - elif priority == 3: - _cpu = 1024 - else: - _cpu = 2048 -``` - -Now, we can define a config by creating a schema instance and pass in priority as an argument to schema: - -```python -nginx = Deployment(priority=2) { - name = "my-nginx" - image = "nginx:1.14.2" - command = ["nginx"] - labels.run = "my-nginx" - labels.env = "pre-prod" -} -``` - -Run with kcl, we should see the generated yaml files as output as below: - -KCL command: - -``` -kcl my_config.k -``` - -Stdout: - -```yaml -nginx: - name: my-nginx - cpu: 512 - memory: 1024 - image: nginx:1.14.2 - service: my-service - replica: 1 - command: - - nginx - labels: - run: my-nginx - env: pre-prod -``` - -## 6. Create New Schema via Schema Combinations - -Now we want to define a detailed schema with service and volumes, we can do it as follows: - -```python -schema Deployment[priority]: - name: str - cpu: int = _cpu - memory: int = _cpu * 2 - volumes?: [Volume] - image: str - service?: Service - replica: int = 1 - command: [str] - labels?: {str:str} - - if priority == 1: - _cpu = 256 - elif priority == 2: - _cpu = 512 - elif priority == 3: - _cpu = 1024 - else: - _cpu = 2048 - -schema Port: - name: str - protocol: str - port: int - targetPort: int - -schema Service: - name: "my-service" = "my-service" - ports: [Port] - -schema Volume: - name: str - mountPath: str - hostPath: str -``` - -In this case, Deployment is composed of Service and a list of Volumes, and Service is composed of a list of Ports. - -## 7. Create Config of Deeply Nested Schema using Dict/Map - -Now we have a new Deployment schema, however, we may notice that it contains multiple layers of nested structures, in fact, this is very common in complex structure definitions, and we often have to write imperative assembly code to generate the final structure. - -With KCL, we can create the config with simple dict declaration, with the capability of full schema initialization and validation. For example, we can simply config nginx by the new Deployment schema as follows: - -```python -nginx = Deployment(priority=2) { - name = "my-nginx" - image = "nginx:1.14.2" - volumes = [Volume { - name = "mydir" - mountPath = "/test-pd" - hostPath = "/data" - }] - command = ["nginx"] - labels.run = "my-nginx" - labels.env = "pre-prod" - service.ports = [Port { - name = "http" - protocol = "TCP" - port = 80 - targetPort = 9376 - }] -} -``` - -Run with KCL, we will see the generated yaml files as below: - -KCL command: - -``` -kcl my_config.k -``` - -Stdout: - -```yaml -nginx: - name: my-nginx - cpu: 512 - memory: 1024 - volumes: - - name: mydir - mountPath: /test-pd - hostPath: /data - image: nginx:1.14.2 - service: - name: my-service - ports: - - name: http - protocol: TCP - port: 80 - targetPort: 9376 - replica: 1 - command: - - nginx - labels: - run: my-nginx - env: pre-prod -``` - -Note that, the dict that we use to define Deployment config must be aligned with the schema definition, otherwise we will get an error. For example, suppose we define a wrong type of service port as below: - -```python -nginx = Deployment(priority=2) { - name = "my-nginx" - image = "nginx:1.14.2" - volumes = [Volume { - name = "mydir" - mountPath = "/test-pd" - hostPath = "/data" - }] - command = ["nginx"] - labels.run = "my-nginx" - labels.env = "pre-prod" - service.ports = [Port { - name = "http" - protocol = "TCP" - port = [80] # wrong data type, trying to assign List to int - targetPort = 9376 - }] -} -``` - -Run with KCL, we will see the error message as output as below: - -KCL command: - -```python -kcl my_config.k -``` - -Stderr: - -``` -The type got is inconsistent with the type expected: expect int, got [int(80)] -``` - -## 8. Declare Schema Validation Rules - -Now we have seen a complex schema, in which every field has a type hint to make it less error-prone. But this is not good enough, we want to support more enhanced verifications to our schemas, so that code errors in schemas and configs can be discovered as soon as possible. - -Lots of validation rules, like None type check, range check, value check, length check, regular expression matching, enum check have already been added or in progress. Here is a code sample: - -```python -import regex - -schema Deployment[priority]: - name: str - cpu: int = _cpu - memory: int = _cpu * 2 - volumes?: [Volume] - image: str - service?: Service - replica: int = 1 - command: [str] - labels?: {str:str} - - if priority == 1: - _cpu = 256 - elif priority == 2: - _cpu = 512 - elif priority == 3: - _cpu = 1024 - else: - _cpu = 2048 - - check: - multiplyof(cpu, 256), "cpu must be a multiplier of 256" - regex.match(image, "^[a-zA-Z]+:\d+\.\d+\.\d+$"), "image name should be like 'nginx:1.14.2'" - 1 <= replica < 100, "replica should be in range (1, 100)" - len(labels) >= 2 if labels, "the length of labels should be large or equal to 2" - "env" in labels, "'env' must be in labels" - len(command) > 0, "the command list should be non-empty" - -schema Port: - name: str - protocol: str - port: int - targetPort: int - - check: - port in [80, 443], "we can only expose 80 and 443 port" - protocol in ["HTTP", "TCP"], "protocol must be either HTTP or TCP" - 1024 < targetPort, "targetPort must be larger than 1024" - -schema Service: - name: "my-service" = "my-service" - ports: [Port] - - check: - len(ports) > 0, "ports list must be non-empty" - -schema Volume: - name: str - mountPath: str - hostPath: str -``` - -Since the attributes defined by the schema are **required** by default, the verification that judges that the variable cannot be None/Undefined can be omitted. - -```python -schema Volume: - name: str - mountPath: str - hostPath: str -``` - -Now we can write the config based on the new schema and expose config errors in time. For example, with the invalid config as below: - -```python -nginx = Deployment(priority=2) { - name = "my-nginx" - image = "nginx:1142" # image value is not matching the regex - volumes = [Volume { - name = "mydir" - mountPath = "/test-pd" - hostPath = "/data" - }] - command = ["nginx"] - labels.run = "my-nginx" - labels.env = "pre-prod" - service.ports = [Port { - name = "http" - protocol = "TCP" - port = 80 - targetPort = 9376 - }] -} -``` - -Every field is type-valid, but the image name is invalid. - -Run with KCL, we will see the error message as below: - -KCL command: - -``` -kcl my_config.k -``` - -Stderr: - -``` -Schema check is failed to check condition: regex.match(image, "^[a-zA-Z]+:\d+\.\d+\.\d+$"), "image name should be like 'nginx:1.14.2'" -``` - -> The verification capability of KCL covers the verification defined by Openapi so that we can write any API verifications through KCL. - -## 9. Create New Schema via Schema Inheritance - -Now we have a solid Deployment schema definition and we can use it to declare config. - -Usually, schema Deployment will be used in multiple scenarios. We can directly use the schema to declare the configurations in different use cases (see the above section), or we can produce a more specific schema definition through inheritance. - -For example, we can use the Deployment schema as a basis, to define the nginx's base schema, and extend the definition -in each scenario. - -In this case, we define some commonly used attributes. Please note that we mark the name to be immutable with the 'final' keyword to prevent it from being overwritten. - -```python -schema Nginx(Deployment): - """ A base nginx schema """ - name: "my-nginx" = "my-nginx" - image: str = "nginx:1.14.2" - replica: int = 3 - command: [str] = ["nginx"] - -schema NginxProd(Nginx): - """ A prod nginx schema with stable configurations """ - volumes: [Volume] = [{ - name = "mydir" - mountPath = "/test-pd" - hostPath = "/data" - }] - """ A volume mapped to host path """ - service: Service = { - ports = [{ - name = "http" - protocol = "TCP" - port = 80 - targetPort = 9376 - }] - } - """ An 80 port to target backend server """ -``` - -Now we have some static configurations for nginx. It is recommended to declare configurations that we think are static there, and put more dynamic configurations as below: - -```python -nginx = Nginx { - labels.run = "my-nginx" - labels.env = "pre-prod" -} -``` - -```python -nginx = NginxProd { - labels.run = "my-nginx" - labels.env = "pre-prod" -} -``` - -Now, we can simply define nginx prod config just with runtime label value "prod" which is not that static. - -In fact, under some complex situation, we can split all configurations into the basic, business, and environment configuration definitions in this way, and achieve collaboration among team members based on this. - -Run with KCL, we will see the generated yaml files as output as below: - -KCL command: - -``` -kcl prod_config.k -``` - -Stdout: - -```yaml -nginx: - name: my-nginx - cpu: 512 - memory: 1024 - volumes: - - name: mydir - mountPath: /test-pd - hostPath: /data - image: nginx:1.14.2 - service: - name: my-service - ports: - - name: http - protocol: TCP - port: 80 - targetPort: 9376 - replica: 3 - command: - - nginx - labels: - run: my-nginx - env: pre-prod -``` - -## 10. Create New Schema by Multiple Protocol and Mixin Schemas Inheritance - -Now, we can complete the declaration of the server configuration through the Deployment schema. - -However, usually, the actual situation is more complicated, and the deployment may have a variety of optional variable accessories. - -For example, we want to support a persistent volume claim based on an existing schema, as a reusable Kubernetes schema. In this case, we can just wrapper it with a `mixin` and a `protocol` as follows: - -```python -import kusion_kubernetes.api.core.v1 - -protocol PVCProtocol: - pvc?: {str:} - -mixin PersistentVolumeClaimMixin for PVCProtocol: - """ - PersistentVolumeClaim (PVC) sample: - Link: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims - """ - - # Mix in a new attribute `kubernetesPVC` - kubernetesPVC?: v1.PersistentVolumeClaim - - if pvc: - kubernetesPVC = v1.PersistentVolumeClaim { - metadata.name = pvc.name - metadata.labels = pvc.labels - spec = { - accessModes = pvc.accessModes - resources = pvc.resources - storageClassName = pvc.storageClassName - } - } -``` - -With this PersistentVolumeClaimMixin, we define a PVC schema with a clear `user interface`, and use Kubernetes PVC as an implementation. Then, we can define a server schema with Deployment schema, and PVC mixin schema. - -``` -schema Server(Deployment): - mixin [PersistentVolumeClaimMixin] - pvc?: {str:} - """ pvc user interface data defined by PersistentVolumeClaimMixin """ -``` - -In the Server schema, Deployment is the base schema, and PersistentVolumeClaimMixin is an optional add-on whose user interface data is `pvc?: {str:}`. - -Note, the `mixin` is often used to add new attributes to the host schema, or to modify the existing attributes of the host schema. Thus, `mixin` can use the attributes in the host schema. Since the `mixin` is designed to be reusable, we need an additional `protocol` to constrain the attribute names and types in the host schema for the `mixin`. - -Now, if we want a deploy with a PVC, just declare as user interface: - -```python -server = Server { - name = "my-nginx" - image = "nginx:1.14.2" - volumes = [Volume { - name = "mydir" - mountPath = "/test-pd" - hostPath = "/data" - }] - command = ["nginx"] - labels = { - run = "my-nginx" - env = "pre-prod" - } - service.ports = [Port { - name = "http" - protocol = "TCP" - port = 80 - targetPort = 9376 - }] - pvc = { - name = "my_pvc" - accessModes = ["ReadWriteOnce"] - resources.requests.storage = "8Gi" - storageClassName = "slow" - } -} -``` - -Run with kcl, we will see the generated yaml files as output as below: - -KCL command: - -``` -kcl server.k -``` - -Stdout: - -```yaml -server: - name: my-nginx - cpu: 512 - memory: 1024 - volumes: - - name: mydir - mountPath: /test-pd - hostPath: /data - image: nginx:1.14.2 - service: - name: my-service - ports: - - name: http - protocol: TCP - port: 80 - targetPort: 9376 - replica: 1 - command: - - nginx - labels: - run: my-nginx - env: pre-prod - pvc: - name: my_pvc - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 8Gi - storageClassName: slow ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: my_pvc -spec: - accessModes: - - ReadWriteOnce - storageClassName: slow - resources: - requests: - storage: 8Gi -``` - -If we don't want a persistent volume, just remove the pvc config block. - -## 11. Share and Reuse Schema - -The Server schema could be shared via `import`, we can simply package our code with KCL. - -```python -import pkg - -server = pkg.Server { - name = "my-nginx" - image = "nginx:1.14.2" - volumes = [Volume { - name = "mydir" - mountPath = "/test-pd" - hostPath = "/data" - }] - command = ["nginx"] - labels.run = "my-nginx" - labels.env = "pre-prod" - service.ports = [Port { - name = "http" - protocol = "TCP" - port = 80 - targetPort = 9376 - }] -} -``` - -Another skill we should know about sharing code is, modules under the same package do not need to import each other. - -Suppose we have models in a pkg: - -``` -pkg/ - - deploy.k - - server.k - - pvc.k -``` - -And in `server.k`, we can just use Deployment schema in `deploy.k` and pvc schema in `pvc.k` without import: - -```python -# no import needed -schema Server(Deployment): - mixin [PersistentVolumeClaimMixin] - pvc?: {str:} - """ pvc user interface data defined by PersistentVolumeClaimMixin """ -``` - -And then users must import the pkg to use it as a whole: - -```python -import pkg - -server = pkg.Server { - name = "my-nginx" - image = "nginx:1.14.2" - volumes = [pkg.Volume { - name = "mydir" - mountPath = "/test-pd" - hostPath = "/data" - }] - command = ["nginx"] - labels = { - run = "my-nginx" - env = "pre-prod" - } - service.ports = [pkg.Port { - name = "http" - protocol = "TCP" - port = 80 - targetPort = 9376 - }] -} -``` - -Run kcl command: - -``` -kcl pkg_server.k -``` - -Output: - -```yaml -server: - name: my-nginx - cpu: 512 - memory: 1024 - volumes: - - name: mydir - mountPath: /test-pd - hostPath: /data - image: nginx:1.14.2 - service: - name: my-service - ports: - - name: http - protocol: TCP - port: 80 - targetPort: 9376 - replica: 1 - command: - - nginx - labels: - run: my-nginx - env: pre-prod -``` - -## 12. The Final Step - -Congratulations! - -We have completed the second lesson about KCL, we have used KCL to replace our key-value text file to get better programming support. diff --git a/docs/reference/lang/lang/codelab/simple.md b/docs/reference/lang/lang/codelab/simple.md deleted file mode 100644 index 041cb370..00000000 --- a/docs/reference/lang/lang/codelab/simple.md +++ /dev/null @@ -1,495 +0,0 @@ ---- -title: "Write simple config with KCL" -linkTitle: "Write simple config with KCL" -type: "docs" -weight: 2 -description: Write simple config with KCL -sidebar_position: 1 ---- -## 1. Introduction - -Kusion Configuration Language (KCL) is a simple and easy-to-use configuration language, where users can simply -write the reusable configuration code. - -In this first codelab, we will learn how to write a simple config with KCL. - -Learning this codelab only requires basic programming knowledge, and experience with python will make it even easier. - -### What We Will Learn - -1. Write simple key-value configuration in a programmable way -2. Write simple logic in KCL code -3. Write collections in KCL code -4. Test and debug with KCL code -5. Use built-in support in KCL code -6. Share and reuse KCL code -7. Write config with dynamic input arguments - -## 2. Write Key-Value Pairs - -Generate a simple config by creating a `my_config.k`, we can fill in the following code without strict format which describes the configuration of deploy. - -```python -cpu = 256 -memory = 512 -image = "nginx:1.14.2" -service = "my-service" -``` - -In the code above, cpu and memory are declared as int value, while image and service are string literal. - -Run with KCL, we will see the generated data in yaml format as below: - -KCL command: - -``` -kcl my_config.k -``` - -Stdout: - -```yaml -cpu: 256 -memory: 512 -image: nginx:1.14.2 -service: my-service -``` - -The exported variable is immutable by default so that once it is declared, we can't modify it some where else. - -## 3. Write Simple Logic - -Sometimes we want to write a logic in configuration, then we can use: - -- Mutable and non-exported variable starting with '_' -- If-else statement - -A non-exported variable means it will not appear in the output YAML, and it can be assigned multiple times. - -Here is a sample to show how to adjust the resource with conditions. - -KCL command: - -```python -kcl my_config.k -``` - -```python -_priority = 1 # a non-exported and mutable variable -_cpu = 256 # a non-exported and mutable variable - -if _priority == 1: - _cpu = 256 -elif _priority == 2: - _cpu = 512 -elif _priority == 3: - _cpu = 1024 -else: - _cpu = 2048 - -cpu = _cpu -memory = _cpu * 2 -image = "nginx:1.14.2" -service = "my-service" -``` - -Run with KCL, we will see the generated data in yaml format as below: - -```python -kcl my_config.k -``` - -Stdout: - -```yaml -cpu: 256 -memory: 512 -image: nginx:1.14.2 -service: my-service -``` - -.. note:: -KCL has rich support of operators and string member functions, please read manual and specification for more details. - -## 4. Write Collections - -We can use collections to represent complex data types. The collections which are already supported are: - -- list -- dict - -```python -_priority = 1 # a non-exported and mutable variable -_cpu = 256 # a non-exported and mutable variable - -if _priority == 1: - _cpu = 256 -elif _priority == 2: - _cpu = 512 -elif _priority == 3: - _cpu = 1024 -else: - _cpu = 2048 - -cpu = _cpu -memory = _cpu * 2 -command = ["nginx"] # a list -labels = {run = "my-nginx"} # a dict -image = "nginx:1.14.2" -service = "my-service" -``` - -Run with kcl, we will see the generated data as yaml format as below: - -KCL command: - -``` -kcl my_config.k -``` - -Stdout: - -```yaml -cpu: 512 -memory: 1024 -command: - - nginx -labels: - run: my-nginx -image: nginx:1.14.2 -service: my-service -``` - -> Check manual and specification out for more about collection date type and member functions. - -## 5. Append Items Into Collections - -We can combine logical expressions, comprehensions, slices, unions and other characteristics to dynamically add elements to the collection - -```python -_priority = 1 # a non-exported and mutable variable -_cpu = 256 # a non-exported and mutable variable -_env = "pre-prod" - -if _priority == 1: - _cpu = 256 -elif _priority == 2: - _cpu = 512 -elif _priority == 3: - _cpu = 1024 -else: - _cpu = 2048 - -cpu = _cpu -memory = _cpu * 2 -_command = ["nginx"] # a list -_command = _command + ["-f", "file"] # Append itemsinto command using + operator to contact two lists -command = [c.lower() for c in _command] # Take eachelement in the list to lowercase -_labels = { - run = "my-nginx" - if _env: - env = _env # Append a dict key-value pair when the _env is not None/Undefined or empty using if expressions -} # a dict -labels = _labels -image = "nginx:1.14.2" -service = "my-service" -``` - -Run with kcl, we will see the generated data as yaml format as below: - -```python -kcl my_config.k -``` - -Stdout: - -```yaml -cpu: 256 -memory: 512 -command: -- nginx -- -f -- file -labels: - run: my-nginx -image: nginx:1.14.2 -service: my-service -``` - -## 6. Write Assert - -To make code testable and robust, we can verify config data with assertions. - -```python -_priority = 1 # a non-exported and mutable variable -_cpu = 256 # a non-exported and mutable variable - -if _priority == 1: - _cpu = 256 -elif _priority == 2: - _cpu = 512 -elif _priority == 3: - _cpu = 1024 -else: - _cpu = 2048 - -cpu = _cpu -memory = _cpu * 2 -command = ["nginx"] # a list -labels = {run = "my-nginx"} # a dict -image = "nginx:1.14.2" -service = "my-service" -assert "env" in labels, "env label is a must" -assert cpu >= 256, "cpu cannot be less than 256" -``` - -Run with KCL, we will see eval failure with an error message as output as below: - -``` -kcl my_config.k -``` - -Stderr: - -``` -Assertion failure: env label is a must. -``` - -After adding env:pre-prod pair into labels, we will get the output as: - -```yaml -cpu: 512 -memory: 1024 -command: - - nginx -labels: - run: my-nginx - env: pre-prod -image: nginx:1.14.2 -service: my-service -``` - -## 7. Use Handy Built-in Support - -What's more, we can use built-in functions to help we debug or simplify coding. - -```python -_priority = 1 # a non-exported and mutable variable -_cpu = 256 # a non-exported and mutable variable - -if _priority == 1: - _cpu = 256 -elif _priority == 2: - _cpu = 512 -elif _priority == 3: - _cpu = 1024 -else: - _cpu = 2048 - -_name = "nginx" -# exported variables -cpu = _cpu -memory = _cpu * 2 -command = [_name] # a list -labels = { - run = "my-{}".format(_name) - env = "pre-prod" -} # a dict -image = "{}:1.14.2".format(_name) # string format -service = "my-service" - -# debugging -print(labels) # debugging by print - -# test -assert len(labels) > 0, "labels can't be empty" # uselen() to get list length -assert "env" in labels, "env label is a must" -assert cpu >= 256, "cpu cannot be less than 256" -``` - -This sample shows how we use `format()`, `len()`, `print()` function to help customize the config. - -Run with KCL, we will see the generated data in yaml format as below: - -KCL command: - -``` -kcl my_config.k -``` - -Stdout: - -```yaml -cpu: 512 -memory: 1024 -command: - - nginx -labels: - run: my-nginx - env: pre-prod -image: nginx:1.14.2 -service: my-service -run: my-nginx -env: pre-prod -``` - -Note: more built-in functions and modules can be seen in spec/module - -## 8. Reuse Variables in Another Module - -To make our code well-organized, we can simply separate our code to `my_config.k` and `my_config_test.k`. - -Config data defined in `my_config.k`, - -```python -_priority = 1 # a non-exported and mutable variable -_cpu = 256 # a non-exported and mutable variable - -if _priority == 1: - _cpu = 256 -elif _priority == 2: - _cpu = 512 -elif _priority == 3: - _cpu = 1024 -else: - _cpu = 2048 -_name = "nginx" - -# exported variables -cpu = _cpu -memory = _cpu * 2 -command = [_name] # a list -labels = { - run = "my-{}".format(_name) - env = "pre-prod" -} # a dict -image = "{}:1.14.2".format(_name) # string format -service = "my-service" -``` - -And test code defined in `my_config_test.k`, in which we can import my_config.k: - -```python -import my_config - -# debugging -print(my_config.labels) # debugging by print - -# test -assert len(my_config.labels) > 0, "labels can't beempty" # use len() to get list length -assert "env" in my_config.labels, "env label is a must" -assert my_config.cpu >= 256, "cpu cannot be less than256" -``` - -## 9. Config with Input Arguments - -Sometimes we need to get external input via parameters dynamically from the end user or platform. - -In this case, we can pass in `priority` and `env` on demand: - -- Pass in arguments: `-D priority=1 -D env=pre-prod` -- Get value by `option` keyword in KCL code - -```python -_priority = option("priority") # a non-exported and mutable variable -_env = option("env") # a non-exported and mutable variable -_cpu = 256 # a non-exported and mutable variable - -if _priority == 1: - _cpu = 256 -elif _priority == 2: - _cpu = 512 -elif _priority == 3: - _cpu = 1024 -else: - _cpu = 2048 - -_name = "nginx" -# exported variables -cpu = _cpu -memory = _cpu * 2 -command = [_name] # a list -labels = { - run = "my-{}".format(_name) - env = _env -} # a dict -image = "{}:1.14.2".format(_name) # string format -service = "my-service" -``` - -Run with KCL, we will see the generated data in yaml format as below: - -``` -kcl -D priority=2 -D env=pre-prod my_config.k -``` - -Stdout: - -```yaml -cpu: 512 -memory: 1024 -command: - - nginx -labels: - run: my-nginx - env: pre-prod -image: nginx:1.14.2 -service: my-service -``` - -## 10. Simplify Logic Expression using Dict - -When we need to write complex logic, we can use dict to simplify the writing of logic. - -```python -_priority = option("priority") # a non-exported and mutable variable -_env = option("env") # a non-exported and mutable variable -_priorityCpuMap = { - "1" = 256 - "2" = 512 - "3" = 1024 -} -# Using a dict to simplify logic and the default value is 2048 -_cpu = _priorityCpuMap[_priority] or 2048 -_name = "nginx" -# exported variables -cpu = _cpu -memory = _cpu * 2 -command = [_name] # a list -labels = { - run = "my-{}".format(_name) - env = _env -} # a dict -image = "{}:1.14.2".format(_name) # string format -service = "my-service" -``` - -Run with KCL, we will see the generated data in yaml format as below: - -KCL command: - -``` -kcl -D priority=2 -D env=pre-prod my_config.k -``` - -Stdout: - -```yaml -cpu: 512 -memory: 1024 -command: - - nginx -labels: - run: my-nginx - env: pre-prod -image: nginx:1.14.2 -service: my-service -``` - -## 11. The Final Step - -Congratulations! - -We have completed the first lesson about KCL, we have used KCL to replace our key-value text file to get better programming support. - -Please check schema codelab out now to learn how to write an advanced config collaboratively with KCL `schema` mechanism. diff --git a/docs/reference/lang/lang/error/_category_.json b/docs/reference/lang/lang/error/_category_.json deleted file mode 100644 index 95062745..00000000 --- a/docs/reference/lang/lang/error/_category_.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "label": "Errors and Warnings", - "position": 4 -} diff --git a/docs/reference/lang/lang/error/_error.md b/docs/reference/lang/lang/error/_error.md deleted file mode 100644 index 596a0139..00000000 --- a/docs/reference/lang/lang/error/_error.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -title: "错误检查" -linkTitle: "错误检查" -type: "docs" -weight: 1 -description: KCL 语言规范 ---- -When errors happen, developers should be able to detect the error and abort -execution. Thus, KCL introduce the `assert` syntax. - -In the previous topic of `schema` syntax. Errors can also be raised when a -schema is violated. - -## Syntax - -The syntax of the `assert` statement is the following. - -``` -assert_stmt: 'assert' test [',' test] -``` - -In the basic form, an `assert` statement evaluates an expression. If the -expression is evaluated to `False`, the assertion is failed, and an error -should be reported. - -In the extended form, an error message can be provided. The error message is -another expression. It is only evaluated when the expression to be evaluated -is evaluated to `False`. The evaluation result of the error message is printed -when reporting the error. - -The following is an example: - -```py -a = 1 -b = 3 -# a != b evaluates to True, therefore no error should happen. -assert a != b -# a == b is False, in the reported error message, the message "SOS" should be printed. -assert a == b, "SOS" -``` - -## The Implementation - -When an error happens, no matter it is caused by the `assert` or the `schema` syntax, -the virtual machine should exit with an exit code greater than `0`. - -The virtual machine may choose to dump the back trace information, and it is strongly -recommended to implement it. - -In practice, KCLVM can dump back trace by default, and an argument can be introduced -to disable it. diff --git a/docs/reference/lang/lang/error/exception.md b/docs/reference/lang/lang/error/exception.md deleted file mode 100644 index 1dbe8797..00000000 --- a/docs/reference/lang/lang/error/exception.md +++ /dev/null @@ -1,1395 +0,0 @@ ---- -title: "KCL Errors and Warnings" -linkTitle: "KCL Errors and Warnings" -type: "docs" -weight: 2 -description: KCL Errors and Warnings ---- - -The articles in this section of the documentation explain the diagnostic error and warning messages that are generated by the KCLVM. - -**Important:** -**The KCLVM can report many kinds of errors and warnings. After an error or warning is found, the build tools may make assumptions about code intent and attempt to continue, so that more issues can be reported at the same time. If the tools make the wrong assumption, later errors or warnings may not apply to your project. When you correct issues in your project, always start with the first error or warning that's reported and rebuild often. One fix may make many subsequent errors go away.** - -In the following sections you will find: - -[KCL Syntax Error (E1xxx)](#11-kcl-syntax-error-e1xxx) : The KCLVM may reports KCL syntax errors when illegal syntax is used in KCL program. - -[KCL Compile Error (E2xxx)](#12-kcl-compile-error-e2xxx): The KCLVM may reports KCL compile errors when the KCL program conforms to the KCL syntax but does not conform to the KCL semantics. - -[KCL Runtime Error (E3xxx)](#13-kcl-runtime-error-e3xxx): The KCLVM may report KCL runtime errors when the virtual machine executing a KCL program that passes the compilation. - -[KCL Compile Warning (W2xxx)](#14-kcl-compile-warning-w2xxx): When the compiler compiles KCL programs and finds possible potential errors, such warnings will be reported by KCLVM. - -## 1.1 KCL Syntax Error (E1xxx) - -This section mainly includes KCL errors: - -| ewcode | KCL exception | messages | -| ------ | ------------------------------------------------------------------- | ----------------------- | -| E1001 | [InvalidSyntaxError](#111-invalidsyntaxerror-e1001) | Invalid syntax | -| E1002 | [KCLTabError](#112-kcltaberror-e1002) | Tab Error | -| E1003 | [KCLIndentationError](#113-kclindentationerrore1003) | Indentation Error | -| E1I37 | [IllegalArgumentSyntaxError](#114-illegalargumentsyntaxerror-e1i37) | Illegal argument syntax | - -### 1.1.1 InvalidSyntaxError (E1001) - -KCLVM will report `InvalidSyntaxError` when KCL has a syntax error. - -The `ewcode` of `InvalidSyntaxError` is `E1001`. - -For example: - -``` -a, b = 1, 2 # Multiple assign is illegal in KCL syntax -``` - -The KCL program will cause the following error message. - -``` -KCL Syntax Error[E1001] : Invalid syntax ----> File /syntax_error/general/multiple_assign/case0/main.k:1:6 -1 |a = 1, 2 - 6 ^ -> Expected one of ['newline'] -Invalid syntax -``` - -Possible resolution: - -- Check and fix KCL syntax errors based on the KCL Language Standard - -### 1.1.2 KCLTabError (E1002) - -KCLVM will report `KCLTabError` when KCL has a tab and white space syntax error. - -In KCL, it is forbidden to mix tabs and four spaces in one indentation block. And we recommend only using white spaces or tabs for indentation in the entire KCL project, don’t mix them. - -The `ewcode` of `KCLTabError` is `E1002`. - -For example: - -``` -schema Person: - name: str # begin with a tab - age: int # begin with four white spaces, - # and four white spaces != tab in the env -``` - -The KCL program will cause the following error message. - -``` -KCL Syntax Error[E1002] : Tab Error ----> File /syntax_error/tab/tab_error_0/main.k:2:14 -2 | name: str - 14 ^ -> Failure -Inconsistent use of tabs and spaces in indentation -``` - -Possible resolution: - -- Only use a tab or four white spaces in KCL, do not mix them. - -### 1.1.3 KCLIndentationError(E1003) - -KCLVM will report `KCLIndentationError` when KCL has an indentation syntax error. - -The KCL syntax includes indentation. A tab or four white spaces in KCL represents an indentation. The other cases will be regarded as syntax errors by KCLVM. - -The `ewcode` of `KCLIndentationError` is `E1003`. - -For example: - -``` -schema Person: - name: str # a tab or four white spaces is legal. - age: int # three white spaces are illegal - info: str # two white spaces is illegal -``` - -The KCL program will cause the following error message. - -``` -KCL Syntax Error[E1003] : Indentation Error ----> File /syntax_error/indent/indent_error_0/main.k:2:14 -2 | name: str - 14 ^ -> Failure -Unindent 3 does not match any outer indentation level -``` - -Possible resolution: - -- Only use a tab or four white spaces in the KCL program for indentation. - -### 1.1.4 IllegalArgumentSyntaxError (E1I37) - -KCLVM will report `IllegalArgumentSyntaxError` when KCL has an illegal argument in KCL syntax. - -The `ewcode` of `IllegalArgumentSyntaxError` is `E1I37`. - -For example: - -``` -# Parameters without default values -# must be in front of parameters with default values. -a = option(type="list", default={"key": "value"}, "key1") -``` - -The KCL program will cause the following error message. - -``` -KCL Syntax Error[E1I37] : Illegal argument syntax ----> File /option/type_convert_fail_2/main.k:1:51 -1 |a = option(type="list", default={"key": "value"}, "key1") - 51 ^^^^^^ -> Failure -positional argument follows keyword argument -``` - -Possible resolution: - -``` -func(input_1, ..., input_n, - param_with_key_1 = input_with_key_1, ..., param_with_key_n = input_with_key_n) -``` - -## 1.2 KCL Compile Error (E2xxx) - -This section mainly includes KCL errors: - -| ewcode | KCL exception | messages | -| ------ | ---------------------------------------------------------------------- | --------------------------------------------------- | -| E2F04 | [CannotFindModule](#121-cannotfindmodulee2f04) | Cannot find the module | -| E2F05 | [FailedLoadModule](#122-failedloadmodulee2f05) | Failed to load module | -| E2H13 | [UnKnownDecoratorError](#123-unknowndecoratorerrore2h13) | UnKnown decorator | -| E2H14 | [InvalidDecoratorTargetError](#124-invaliddecoratortargeterrore2h14) | Invalid Decorator Target | -| E2C15 | [MixinNamingError](#125-mixinnamingerrore2c15) | Illegal mixin naming | -| E2C16 | [MixinStructureIllegal](#126-mixinstructureillegale2c16) | Illegal mixin structure | -| E2B17 | [CannotAddMembersComplieError](#127-cannotaddmemberscomplieerrore2b17) | Cannot add members to a schema | -| E2B20 | [IndexSignatureError](#128-indexsignatureerrore2b20) | Invalid index signature | -| E2G22 | [TypeComplieError](#129-typecomplieerrore2g22) | The type got is inconsistent with the type expected | -| E2L23 | [CompileError](#1210-compileerrore2l23) | A complie error occurs during compiling | -| E2L25 | [KCLNameError](#1211-kclnameerrore2l25) | Name Error | -| E2L26 | [KCLValueError](#1212-kclvalueerrore2l26) | Value Error | -| E2L27 | [KCLKeyError](#1213-kclkeyerrore2l27) | Key Error | -| E2L28 | [UniqueKeyError](#1214-uniquekeyerrore2l28) | Unique key error | -| E2A29 | [KCLAttributeComplieError](#1215-kclattributecomplieerrore2a29) | Attribute error occurs during compiling | -| E2D32 | [MultiInheritError](#1216-multiinheriterrore2d32) | Multiple inheritance is illegal | -| E2D34 | [IllegalInheritError](#1217-illegalinheriterrore2d34) | Illegal inheritance | -| E2I36 | [IllegalArgumentComplieError](#1218-illegalargumentcomplieerrore2i36) | Illegal argument during compiling | -| E3L41 | [ImmutableCompileError](#1219-immutablecompileerror-e3l41) | Immutable variable is modified | - -### 1.2.1 CannotFindModule(E2F04) - -KCLVM will report `CannotFindModule` when KCL imports a module that does not exist. - -The `ewcode` of `CannotFindModule` is `E2F04`. - -For example: - -``` -import .some0.pkg1 as some00 # some0 not found in package - -Name1 = some00.Name # some0.pkg1.name -``` - -The KCL program will cause the following error message. - -``` -KCL Complier Error[E2F04] : Cannot find the module ----> File import_abs_fail_0/app-main/main.k:1:1 -1 |import .some0.pkg1 as some00 # some0 not found in app-main package - 1 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -> Failure -Cannot find the module .some0.pkg1 from import_abs_fail_0/app-main/some0/pkg1 -``` - -Possible resolution: - -- Add the import module file under the import path. - -### 1.2.2 FailedLoadModule(E2F05) - -KCLVM will report `FailedLoadModule` when an error occurs during loading a KCL external package. - -The `ewcode` of `FailedLoadModule` is `E2F05`. - -Possible resolution: - -- Check whether the module file is readable. -- Check whether the module file is a KCL file. - -### 1.2.3 UnKnownDecoratorError(E2H13) - -KCLVM will report `UnKnownDecoratorError` when an unknown decorator is used in KCL. - -The `ewcode` of `UnKnownDecoratorError` is `E2H13`. - -For example: - -``` -@err_deprecated # It is an unknown decorator -schema Person: - firstName: str = "John" - lastName: str - name: str - -JohnDoe = Person { - name: "deprecated" -} -``` - -The KCL program will cause the following error message. - -``` -KCL Complier Error[E2H13] : UnKnown decorator ----> File deprecated/unknown_fail_1/main.k:1:2 -1 |@err_deprecated - 2 ^ -> Failure -UnKnown decorator err_deprecated -``` - -Possible resolution: - -- Check whether the decorator exists. - -### 1.2.4 InvalidDecoratorTargetError(E2H14) - -KCLVM will report `InvalidDecoratorTargetError` when the target cannot be the target of the decorator. - -The `ewcode` of `InvalidDecoratorTargetError` is `E2H14`. - -Possible resolution: - -- Check whether the decorator in KCL is illegal. - -### 1.2.5 MixinNamingError(E2C15) - -KCLVM will report `MixinNamingError` when a mixin name does not end with 'Mixin'. - -The `ewcode` of `MixinNamingError` is `E2C15`. - -For example: - -``` -schema Person: - firstName: str - lastName: str - fullName: str - -schema Fullname: # It is a mixin, but 'Fullname' is not end with 'Mixin - fullName = "{} {}".format(firstName, lastName) - -schema Scholar(Person): - mixin [Fullname] - school: str - -JohnDoe = Scholar { - "firstName": "John", - "lastName": "Doe", - "fullName": "Doe Jon" -} -``` - -The KCL program will cause the following error message. - -``` -KCL Complier Error[E2C15] : Illegal mixin naming ----> File mixin/invalid_name_failure/main.k:10:12 -10 | mixin [Fullname] - 12 ^ -> Failure -a valid mixin name should end with 'Mixin', got 'Fullname' -``` - -Possible resolution: - -- If the schema is a mixin, then the name of the schema should end with Mixin. - -### 1.2.6 MixinStructureIllegal(E2C16) - -KCLVM will report `MixinStructureIllegal` when the KCL structure is illegal. - -The `ewcode` of `MixinStructureIllegal` is `E2C16`. - -Possible resolution: - -- Check the structure of schema as Mixin. - -### 1.2.7 CannotAddMembersComplieError(E2B17) - -KCLVM will report `CannotAddMembersComplieError` when members that are not in the schema are used. - -The `ewcode` of `CannotAddMembersComplieError` is `E2B17`. - -For example: - -```python -schema Girl: - gender: str = "female" - -alice = Girl { - "first": "alice", # "first" can not be found in schema Girl - "last": " Green", # "last" can not be found in schema Girl - "age": 10 # "age" can not be found in schema Girl -} -``` - -The KCL program will cause the following error message. - -``` -KCL Complier Error[E2B18] : Cannot add members to a schema ----> File /invalid/add_attribute/main.k:9:9 -9 |alice = Girl { - 9 ^ -> Failure -first,last,age: No such member in the schema -``` - -Possible resolution: - -- Add the members to the schema. -- Remove the using of the members not exists - -### 1.2.8 IndexSignatureError(E2B20) - -The `ewcode` of `IndexSignatureError` is `E2B20`. - -KCLVM will report `IndexSignatureError` when: - -1. Multiple index signatures in one schema. - -For example: - -``` -schema Data: - [str]: str - [str]: int # Multiple index signatures in one schema. - -data = Data { - name: "test" -} -``` - -The KCL program will cause the following error message. - -``` -KCL Complier Error[E2B20] : Invalid index signature ----> File index_signature/fail_1/main.k:3:5 -3 | [str]: int - 5 ^^^^^^^^^^ -> Failure -only one index signature is allowed in the schema -``` - -Possible resolution: - -- - Remove the extra index signature in the schema. - -2. The name of index signature attributes has the same name that conflicts with other attributes in the schema. - -For example: - -``` -schema Data: - name: str # name - [name: str]: str # the same name with the above attribute - -data = Data { - name: "test" -} -``` - -The KCL program will cause the following error message. - -``` -KCL Complier Error[E2B20] : Invalid index signature ----> File index_signature/fail_2/main.k:3:5 -3 | [name: str]: str - 5 ^ -> Failure -index signature attribute name 'name' cannot have the same name as schema attributes -``` - -Possible resolution: - -- Remove attributes or index signatures that have conflicts with the same name in the schema, or change their names. - -3. Schema index signature value type has conflicts with the instance of schema. - -For example: - -``` -schema Data: - [str]: int - -data = Data { - name: "test" # Conflict with [str]:int, "test" is a string. -} -``` - -The KCL program will cause the following error message. - -``` -KCL Complier Error[E2L23] : A complie error occurs during compiling ----> File index_signature/fail_3/main.k:4:8 -4 |data = Data { - 8 ^ -> Failure -expected schema index signature value type int, got str(test) of the key 'name' -``` - -Possible resolution: - -- - Check that the type of schema index signature is consistent with the attribute type in the schema instance. - -4. Schema index signature has conflicts with schema. - -For example: - -``` -schema Data: - count: int # got int - [str]: str # except str - -data = Data { - count: 1 -} -``` - -The KCL program will cause the following error message. - -``` -KCL Complier Error[E2B20] : Invalid index signature ----> File index_signature/fail_4/main.k:2:5 -2 | count: int - 5 ^ -> Failure -the type 'int' of schema attribute 'count' does not meet the index signature definition [str]: str -``` - -Possible resolution: - -- - Change schema for index signature or change index signature for schema. - -### 1.2.9 TypeComplieError(E2G22) - -KCLVM will report `TypeComplieError` when a type error occurs in compiling type check. - -The `ewcode` of `TypeComplieError` is `E2G22`. - -For example: - -``` -schema Person: - firstName: str - lastName: int - -JohnDoe = Person { - "firstName": "John", - "lastName": "Doe" # Type Error,lastName: int,“Doe” is a string. -} -``` - -The KCL program will cause the following error message. - -``` -KCL Complier Error[E2G22] : The type got is inconsistent with the type expected ----> File type/type_fail_0/main.k:7:5 -7 | "lastName": "Doe" - 5 ^ -> Failure -expect int, got str(Doe) -``` - -Possible resolution: - -- Check that the type of value assigned to a variable is consistent with the type of the variable. - -### 1.2.10 CompileError(E2L23) - -The `ewcode` of `CompileError` is `E2L23`. - -KCLVM will report `CompileError` when: - -1. unsupport type union. - -For example: - -``` -_data = [1, 2, 3] -_data |= "value" -``` - -The KCL program will cause the following error message. - -``` -KCL Complier Error[E2L23] : A complie error occurs during compiling ----> File union/fail/fail_1/main.k:2 -2 |_data |= "value" -> Failure -unsupported operand type(s) for |=: '[int]' and 'str(value)' -``` - -Possible resolution: - -1. unsupported operand type. - -For example: - -``` -a = None -b = 1 + None # Unsupport operand type + for int and None -``` - -The KCL program will cause the following error message. - -``` -KCL Complier Error[E2L23] : A complie error occurs during compiling ----> File operator/operator_fail_0/main.k:2 -2 |b = 1 + None -> Failure -unsupported operand type(s) for +: 'int(1)' and 'NoneType' -``` - -Possible resolution: - -- Adjust the operator so that it supports both operand types. -- Adjust the operands so that they conform to the constraints of the operator at the same time. - -1. variable is not defined. - -For example: - -``` -a = 1 -b = "${c + 1}" # 'c' is not defined -``` - -The KCL program will cause the following error message. - -``` -KCL Complier Error[E2L23] : A complie error occurs during compiling ----> File var_not_define_fail_0/main.k:2:8 -2 |b = "${c + 1}" - 8 ^ -> Failure -name 'c' is not defined -``` - -Possible resolution: - -- - Define undefined variables. -- - Remove the undefined variable from the expression. - -1. invalid assign expression. - -For example: - -``` -# pkg.k -a = 1 - -# main.k -import pkg -pkg.a |= 2 -``` - -The KCL program will cause the following error message. - -``` -KCL Complier Error[E2L23] : A complie error occurs during compiling ----> File pkg_inplace_modify_1/main.k:3:1 -3 |pkg.a |= 2 - 1 ^^^^^ -> Failure -module 'pkg' can't be assigned -``` - -Possible resolution: - -- - Check the assignment expression. - -1. invalid string expression. - -For example: - -``` -a = 1 -b = "${b = a + 1}" # Invalid string interpolation expression -``` - -The KCL program will cause the following error message. - -``` -KCL Complier Error[E2L23] : A complie error occurs during compiling ----> File invalid_format_value_fail_0/main.k:2:5 -2 |b = "${b = a + 1}" - 5 ^^^^^^^^^^^^^^ -> Failure -invalid string interpolation expression 'b = a + 1' -``` - -Possible resolution: - -- - Check the string expression. - -1. invalid loop variable. - -For example: - -``` -data = {"key1": "value1", "key2": "value2"} -dataLoop = [i for i, j, k in data] # the number of loop variables can only be 1 or 2 -``` - -The KCL program will cause the following error message. - -``` -KCL Complier Error[E2L23] : A complie error occurs during compiling ----> File dict/invalid_loop_var_fail_0/main.k:2:19 -2 |dataLoop = [i for i, j, k in data] # error - 19 ^^^^^^^ -> Failure -the number of loop variables is 3, which can only be 1 or 2 -``` - -### 1.2.11 KCLNameError(E2L25) - -KCLVM will report `KCLNameError` when a name error occurs in compiling. - -The `ewcode` of `KCLNameError` is `E2L25`. - -### 1.2.12 KCLValueError(E2L26) - -KCLVM will report `KCLValueError` will be raised when a value error occurs in compiling. - -The `ewcode` of `KCLValueError` is `E2L25`. - -### 1.2.13 KCLKeyError(E2L27) - -KCLVM will report `KCLKeyError` will be raised when a key error occurs in compiling. - -The `ewcode` of `KCLKeyError` is `E2L25`. - -### 1.2.14 UniqueKeyError(E2L28) - -KCLVM will report `UniqueKeyError` when duplicate names appear in the KCL code. - -The `ewcode` of `UniqueKeyError` is `E2L28`. - -For example: - -``` -schema Person: - name: str = "kcl" - age: int = 1 - -schema Person: - aa: int - -x0 = Person{} -x1 = Person{age:101} -``` - -The KCL program will cause the following error message. - -``` -KCL Complier Error[E2L28] : Unique key error ----> File /schema/same_name/main.k:5:1 -5 |schema Person: - 1 ^ -> Failure -Variable name 'Person' must be unique in package context -``` - -Possible resolution: - -- Check if the name with error has been used. - -### 1.2.15 KCLAttributeComplieError(E2A29) - -KCLVM will report `KCLAttributeComplieError` when KCL has an illegal attribute in the schema. - -The `ewcode` of `KCLAttributeComplieError` is `E2A29`. - -For example: - -``` -# pkg -schema A: - field_A: str - -# main -import pkg as p - -a = p.D + 1 -``` - -The KCL program will cause the following error message. - -``` -KCL Complier Error[E2A29] : Attribute error occurs during compiling ----> File /import/module/no_module_attr_fail_0/main.k:3:5 -3 |a = p.D + 1 - 5 ^ -> Failure -module 'pkg' has no attribute 'D' -``` - -Possible resolution: - -- Check for the existence of the schema attribute when using it. - -### 1.2.16 MultiInheritError(E2D32) - -KCLVM will report `MultiInheritError` when multiple inheritance appears in the schema. - -The `ewcode` of `MultiInheritError` is `E2D32`. - -For example: - -``` -schema Person: - firstName: str - lastName: str - -schema KnowledgeMixin: - firstName: int - subject: str - -schema Scholar(KnowledgeMixin, Person): - school: str -``` - -The KCL program will cause the following error message. - -``` -KCL Complier Error[E2D32] : Multiple inheritance is illegal ----> File /schema/inherit/multi_inherit_fail_1/main.k:9:16 -9 |schema Scholar(KnowledgeMixin, Person): - 16 ^^^^^^^^^^^^^^^^^^^^^^ -> Failure -Multiple inheritance of Scholar is prohibited -``` - -Possible resolution: - -- Check the inheritance structure of the program, and multi-inheritance is not supported in KCL. - -### 1.2.17 IllegalInheritError(E2D34) - -KCLVM will report `IllegalInheritError` when an illegal inheritance occurs in the schema. - -The `ewcode` of `IllegalInheritError` is `E2D34`. - -For example: - -``` -schema FullnameMixin: - fullName = "{} {}".format(firstName, lastName) - -schema Scholar(FullnameMixin): # mixin inheritance is illegal - school: str -``` - -The KCL program will cause the following error message. - -``` -KCL Complier Error[E2D34] : Illegal inheritance ----> File /schema/inherit/inherit_mixin_fail/main.k:8:1 -8 |schema Scholar(FullnameMixin): - 1 ^ -> Failure -mixin inheritance FullnameMixin is prohibited -``` - -Possible resolution: - -- Schema supports single inheritance of schema in KCL. - -### 1.2.18 IllegalArgumentComplieError(E2I36) - -KCLVM will report `IllegalArgumentComplieError` when the argument of option in KCL is illegal. - -The `ewcode` of `IllegalArgumentComplieError` is `E2I36`. -For example: - -``` -a = option("key") - -# kcl main.k -D key=value= -# key=value= is an illegal expression -``` - -The KCL program will cause the following error message. - -``` -KCL Complier Error[E2I36] : Illegal argument during compiling -'key=value=' -``` - -Possible resolution: - -- Check whether the KCL option arguments are legal. - -### 1.2.19 ImmutableCompileError (E3L41) - -KCLVM will report `ImmutableCompileError` when the value of the immutable variable changes. - -The `ewcode` of `ImmutableCompileError` is `E3L41`. - -For example: - -``` -a = 2147483646 -a += 1 -``` - -The KCL program will cause the following error message. - -``` -KCL Compile Error[E2L41] : Immutable variable is modified ----> File /range_check_int/augment_assign/main.k:2:1 -2 |a += 1 - 1 ^ -> Failure -Immutable variable is modified -``` - -Possible resolution: - -- Set immutable variables changed to private or remove immutable variables. - -## 1.3 KCL Runtime Error (E3xxx) - -This section mainly includes KCL errors: - -| ewcode | KCL exception | messages | -| ------ | ---------------------------------------------------------------------- | --------------------------------------------------- | -| E3F06 | [RecursiveLoad](#131-recursiveload-e3f06) | Recursively loading module | -| E3K04 | [FloatOverflow](#132-floatoverflow-e3k04) | Float overflow | -| E3K09 | [IntOverflow](#133-intoverflow-e3k09) | Integer overflow | -| E3N11 | [DeprecatedError](#134-deprecatederror-e3n11) | Deprecated error | -| E3A30 | [KCLAttributeRuntimeError](#135-kclattributeruntimeerror-e3a30) | Attribute error occurs at runtime | -| E3G21 | [TypeRuntimeError](#136-typeruntimeerror-e3g21) | The type got is inconsistent with the type expected | -| E3B17 | [SchemaCheckFailure](#137-schemacheckfailure-e3b17) | Schema check is failed to check condition | -| E3B19 | [CannotAddMembersRuntimeError](#138-cannotaddmembersruntimeerrore3b19) | Cannot add members to a schema | -| E3M38 | [EvaluationError](#139-evaluationerrore3m38) | Evaluation failure | -| E3M39 | [InvalidFormatSpec](#1310-invalidformatspec-e3m39) | Invalid format specification | -| E3M40 | [KCLAssertionError](#1311-kclassertionerror-e3m40) | Assertion failure | -| E3M44 | [ImmutableRuntimeError](#1312-immutableruntimeerror-e3m44) | Immutable variable is modified | -| E2D33 | [CycleInheritError](#1313-cycleinheriterror-e2d33) | Cycle Inheritance is illegal | -| E3M42 | [KCLRecursionError](#1314-kclrecursionerror-e3m42) | Recursively reference | - -### 1.3.1 RecursiveLoad (E3F06) - -KCLVM will report `RecursiveLoad` when a cycle import of external packages occurs in KCL. - -The `ewcode` of `RecursiveLoad` is `E2F06`. - -For example: - -``` -# module.k -import main # module.k imports main.k - -print('module') - -# main.k -import module # main.k imports module.k - -print('main') -``` - -The KCL program will cause the following error message. - -``` -KCL Runtime Error[E3F06] : Recursively loading module ----> File /import/recursive_import_fail/main.k:4 -4 | -> Failure -In module module, recursively loading modules: module, main -``` - -Possible resolution: - -- - Check whether there is a circle import in KCL. - -### 1.3.2 FloatOverflow (E3K04) - -KCLVM will report `FloatOverflow` when a floating-point number overflows in KCL. - -The `ewcode` of `FloatOverflow` is `E3K04`. - -For example: - -``` -uplimit = 3.402823466e+38 -epsilon = 2.220446049250313e-16 -a = uplimit * (1 + epsilon) - -# kcl main.k -r -d -``` - -The KCL program will cause the following error message. - -``` -KCL Runtime Error[E3K07] : Float overflow ----> File /range_check_float/overflow/number_0/main.k:6 -6 |a = uplimit * (1 + epsilon) -> Failure -3.402823466000001e+38: A 32-bit floating point number overflow -``` - -Possible resolution: - -- Check whether the value of the float is the float range supported by KCL. - -### 1.3.3 IntOverflow (E3K09) - -KCLVM will report `IntOverflow` when an integer number overflows in KCL. - -The `ewcode` of `IntOverflow` is `E3K09`. - -For example: - -``` -_a = 9223372036854775807 -_a += 1 - -# kcl test.k -d -``` - -The KCL program will cause the following error message. - -``` -KCL Runtime Error[E3K09] : Integer overflow ----> File /range_check_int/augment_assign_fail_1/main.k:2 -2 |_a += 1 -> Failure -9223372036854775808: A 64 bit integer overflow -``` - -Possible resolution: - -- Check whether the value of the integer is the integer range supported by KCL. - -### 1.3.4 DeprecatedError (E3N11) - -KCLVM will report `DeprecatedError` when a deprecated variable is used and the strict is True. - -The `ewcode` of `DeprecatedError` is `E3N11`. - -For example: - -``` -schema Person: - firstName: str = "John" - lastName: str - @deprecated(version="1.16", reason="use firstName and lastName instead", strict=True) - name: str - -JohnDoe = Person { - name: "deprecated" # name is deprecated and strict is True -} -``` - -The KCL program will cause the following error message. - -``` -KCL Runtime Error[E3N11] : Deprecated error ----> File /schema/deprecated/member_standard_1/main.k:7 -7 |JohnDoe = Person { -> Failure -name was deprecated since version 1.16, use firstName and lastName instead -``` - -Possible resolution: - -- When strict is set to True, using deprecated code will cause an error and stop KCLVM. -- You can set the strict to False which will cause a warning insteads of an error. -- Adjust the code without using deprecated code. - -### 1.3.5 KCLAttributeRuntimeError (E3A30) - -KCLVM will report `KCLAttributeRuntimeError`, if an error occurs during dynamically accessing schema attributes through variables at runtime. - -The `ewcode` of `KCLAttributeRuntimeError` is `E3A30`. - -For example: - -``` -import math - -a = math.err_func(1) # err_func is not found in math -``` - -The KCL program will cause the following error message. - -``` -KCL Runtime Error[E3A30] : Attribute error occurs at runtime ----> File /import/module/no_module_attr_fail_2/main.k:3 -3 |a = math.err_func(1) -> Failure -module 'math' has no attribute 'err_func' -``` - -Possible resolution: - -- Check whether the attributes of schema are correct. - -### 1.3.6 TypeRuntimeError (E3G21) - -KCLVM will report `TypeRuntimeError` when an type error occurs in the runtime type check. - -The `ewcode` of `TypeRuntimeError` is `E3G21`. - -For example: - -``` -schema Person: - name: str = "Alice" - -_personA = Person {} -_personA |= {"name": 123.0} # name: str = "Alice" -personA = _personA -``` - -The KCL program will cause the following error message. - -``` -KCL Runtime Error[E3G21] : The type got is inconsistent with the type expected ----> File /fail/fail_4/main.k:5 -5 |_personA |= {"name": 123.0} -> Failure -expect str, got float -``` - -Possible resolution: - -- Stop the wrong type union or adjust to the type union supported by KCL. - -### 1.3.7 SchemaCheckFailure (E3B17) - -KCLVM will report `SchemaCheckFailure` when the schema check conditions are not met. - -The `ewcode` of `SchemaCheckFailure` is `E3B17`. - -For example: - -``` -schema Person: - lastName: str - age: int - check: - age < 140, "age is too large" - -JohnDoe = Person { - "lastName": "Doe", - "age": 1000 # the check condition: age < 140 -} -``` - -The KCL program will cause the following error message. - -``` -KCL Runtime Error[E3B17] : Schema check is failed to check condition ----> File /check_block/check_block_fail_1/main.k:9:11 -9 |JohnDoe = Person { - 11 ^ -> Check failed in the schema ----> File /check_block/check_block_fail_1/main.k:7 -7 | age < 140, "age is too large" -> Check failed on the condition -age is too large -``` - -Possible resolution: - -- - Check whether the attributes of schema can satisfy the conditions in check. - -### 1.3.8 CannotAddMembersRuntimeError(E3B19) - -KCLVM will report `CannotAddMembersRuntimeError` when members that are not in the schema are used. - -The `ewcode` of `CannotAddMembersRuntimeError` is `E3B19`. - -For example: - -``` -schema Name: - name: str - -schema Person: - name: Name - -person = Person { - name.err_name: "Alice" # err_name is not found in schema Name -} -``` - -The KCL program will cause the following error message. - -``` -KCL Runtime Error[E3B19] : Cannot add members to a schema ----> File /nest_var/nest_var_fail_1/main.k:8:5 -8 | name.err_name: "Alice" - 5 ^ -> Failure -err_name: No such member in the schema -``` - -Possible resolution: - -- Add a non-existent member to the schema. -- Access members that exist in the schema. - -### 1.3.9 EvaluationError(E3M38) - -KCLVM will report `EvaluationError` when an illegal evaluation occurs in KCL. - -The `ewcode` of `EvaluationError` is `E3M38`. - -For example: - -``` -_list1 = [1, 2, 3] # _list1 is a variable, and its type can only be known at runtime -_list2 = None # _list1 is a variable, and its type can only be known at runtime - -result2 = _list1 + _list2 # list + NoneType is illegal -``` - -The KCL program will cause the following error message. - -``` -KCL Runtime Error[E3M38] : Evaluation failure ----> File /datatype/list/add_None_fail/main.k:4 -4 |result2 = _list1 + _list2 -> Failure -can only concatenate list (not "NoneType") to list -``` - -Possible resolution: - -- Check whether the evaluation of the expression is legal. - -### 1.3.10 InvalidFormatSpec (E3M39) - -KCLVM will report `InvalidFormatSpec` when an illegal string format appears in KCL. - -The `ewcode` of `InvalidFormatSpec` is `E3M39`. - -For example: - -``` -a = 1 -b = 1 -data = "${a: #js}" + " $$ " # #js is illegal string -``` - -The KCL program will cause the following error message. - -``` -KCL Runtime Error[E3M39] : Invalid format specification ----> File /datatype/str_interpolation/invalid_format_spec_fail_0/main.k:3 -3 |data = "${a: #js}" + " $$ " -> Failure -#js is invalid format spec -``` - -Possible resolution: - -- Adjust illegal String to String supported by KCL standards. - -### 1.3.11 KCLAssertionError (E3M40) - -KCLVM will report `KCLAssertionError` when assert False occurs in KCL. - -The `ewcode` of `KCLAssertionError` is `E3M40`. - -For example: - -``` -assert False -``` - -The KCL program will cause the following error message. - -``` -KCL Runtime Error[E3M40] : Assertion failure ----> File /assert/invalid/fail_0/main.k:1 -1 |assert False -> Failure -Assertion failure -``` - -Possible resolution: - -- Check the condition of Assert, and when the Assert condition is False, such an error occurs, removing the Assert statement or changing the condition to True. - -### 1.3.12 ImmutableRuntimeError (E3M44) - -KCLVM will report `ImmutableRuntimeError` when the value of the immutable variable changes. - -The `ewcode` of `ImmutableRuntimeError` is `E3M44`. - -For example: - -``` -schema Person: - final firstName : str - lastName : str - -schema Scholar(Person): - firstName = "CBA" - -scholar = Scholar { - "firstName": "ABC" # firstName in schema Person is final. -} -``` - -The KCL program will cause the following error message. - -``` -KCL Runtime Error[E3M41] : Immutable variable is modified ----> File /final/fail_lazy_init_0/main.k:12:5 -12 | "firstName": "ABC" - 5 ^ -> Failure -final schema field 'firstName' -``` - -Possible resolution: - -- Check if the final variables have been assigned or other changes affect the values of the final variables. - -### 1.3.13 CycleInheritError (E2D33) - -KCLVM will report `CycleInheritError` when circle inheritance appeared in the schema. - -The `ewcode` of `CycleInheritError` is `E2D33`. - -For example: - -``` -schema Parent(Son): - parent_field: str - -schema Son(GrandSon): - son_field: str - -schema GrandSon(Parent): - grandson_field: str - -parent = Parent { - parent_field: "" -} -``` - -The KCL program will cause the following error message. - -``` -KCL Complier Error[E2D33] : Cycle Inheritance is illegal ----> File /inherit/cycle_inherit_fail_1/main.k:7:1 -7 |schema GrandSon(Parent): - 1 ^ -> Failure -GrandSon and Parent -``` - -Possible resolution: - -- Check schema inheritance relationship to avoid A inheritance B and B inheritance A at the same time. - -### 1.3.14 KCLRecursionError (E3M42) - -KCLVM will report `KCLRecursionError` when a circle reference appears in the program. - -The `ewcode` of `KCLRecursionError` is `E3M42`. - -For example: - -``` -schema Parent(Son): - parent_field: str - son: Son = Son { # Parent has attribute Son - parent: Parent { - parent_field: "123" - } - } - -schema Son: - son_field: str - parent: Parent = Parent { # Son has attribute Parent - son: Son { - son_field: "123" - } - } - -parent = Parent { - parent_field: "", -} -``` - -The KCL program will cause the following error message. - -``` -KCL Runtime Error[E3M42] : Recursively reference ----> File /init/init_cycle_fail_0/main.k:10 -10 | son_field: str -> Failure -maximum recursion depth exceeded in __instancecheck__ -``` - -Possible resolution: - -- Check the members in the schema to avoid the problem of circle references. - -## 1.4 KCL Compile Warning (W2xxx) - -This section mainly includes KCL warnings: - -| ewcode | KCL exception | messages | -| ------ | ------------------------------------------------- | ------------------ | -| W2K04 | [FloatUnderflow](#141-floatunderflow-w2k04) | Float underflow | -| W2P10 | [InvalidDocstring](#142-invaliddocstring-w2p10) | Invalid docstring | -| W2N12 | [DeprecatedWarning](#143-deprecatedwarning-w2n12) | Deprecated warning | - -### 1.4.1 FloatUnderflow (W2K04) - -KCLVM will report `FloatUnderflow` when a floating-point number underflows in KCL. - -The `ewcode` of `FloatUnderflow` is `W2K08`. - -For example: - -``` -downlimit = 1.175494351e-38 -uplimit = 3.402823466e+38 - -epsilon = 2.220446049250313e-16 - -a = uplimit / (1 + epsilon) -b = downlimit / (1 + epsilon) - -# kcl main.k -r -d -``` - -The KCL program will cause the following error message. - -``` -KCL Complier Warning[W2K08] : Float underflow ----> File /range_check_float/underflow/number_0/main.k:7 -7 |b = downlimit / (1 + epsilon) -> Failure -1.1754943509999997e-38: A 32-bit floating point number underflow -``` - -Possible resolution: - -- Check whether the value of the float number is in the range supported by KCL. - -### 1.4.2 InvalidDocstring (W2P10) - -KCLVM will report `InvalidDocstring` when a string is illegal in KCL doc. - -The `ewcode` of `InvalidDocstring` is `W2P10`. - -Possible resolution: - -- Please write doc according to KCL standards. - -### 1.4.3 DeprecatedWarning (W2N12) - -KCLVM will report `DeprecatedWarning` when a deprecated variable is used and the strict is False. - -The `ewcode` of `DeprecatedWarning` is `W2N12`. - -For example: - -``` -schema Person: - firstName?: str = "John" - lastName?: str - @deprecated(version="1.16", reason="use firstName and lastName instead", strict=False) - name?: str - -JohnDoe = Person { - name: "deprecated" # name is deprecated and strict is False -} -``` - -The KCL program will cause the following error message. - -``` -KCL Compile Warning[W2N12] : Deprecated warning -name was deprecated since version 1.16, use firstName and lastName instead -``` - -Possible resolution: - -- Try not to use deprecated code. If the strict is True, KCLVM will output the error and stop running. diff --git a/docs/reference/lang/lang/error/index.md b/docs/reference/lang/lang/error/index.md deleted file mode 100644 index 15fbda6f..00000000 --- a/docs/reference/lang/lang/error/index.md +++ /dev/null @@ -1 +0,0 @@ -# Errors and Warnings diff --git a/docs/reference/lang/lang/index.md b/docs/reference/lang/lang/index.md deleted file mode 100644 index 66fd3dde..00000000 --- a/docs/reference/lang/lang/index.md +++ /dev/null @@ -1 +0,0 @@ -# KCL diff --git a/docs/reference/lang/lang/spec/_category_.json b/docs/reference/lang/lang/spec/_category_.json deleted file mode 100644 index 7b24faae..00000000 --- a/docs/reference/lang/lang/spec/_category_.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "label": "Spec", - "position": 3 -} diff --git a/docs/reference/lang/lang/spec/codestyle.md b/docs/reference/lang/lang/spec/codestyle.md deleted file mode 100644 index 0267a73c..00000000 --- a/docs/reference/lang/lang/spec/codestyle.md +++ /dev/null @@ -1,623 +0,0 @@ ---- -title: "Code Style" -linkTitle: "Code Style" -type: "docs" -weight: 2 -description: Code Style ---- -## Introduction - -This document gives the KCL code style conventions. Good code style can play a vital role in the development and maintenance of the project. We can learn the KCL code style by referring to the full text of the description and sample codes, and use KCL format and lint tools to help coding. - -## Source File Encoding - -KCL file encoding should always use **UTF-8**. - -## Code Layout - -### Indentation - -Use **4 spaces** per indentation level such as in the schema statement and if statement. - -```python -schema PersonA: - name: str # non-recommended - age: int - -schema PersonB: - name: str # recommended - age: int - -if True: - a = 1 # recommended -elif True: - b = 2 # non-recommended -else: - c = 3 # non-recommended -``` - -The closing brace/bracket/parenthesis on multiline constructs should line up under **first character** of the line that starts the multiline construct, as in: - -```python -# valid and recommended -my_list = [ - 1, 2, 3, - 4, 5, 6, -] -``` - -```python -# invalid -my_list = [ - 1, 2, 3, - 4, 5, 6, - ] -``` - -### Tabs or Spaces - -- Spaces are the preferred indentation method. -- Tabs should be used solely to remain consistent with code that is already indented with tabs. - -KCL disallows mixing the use of tabs and spaces for indentation and an error will be reported during the compile time. - -### Blank Lines - -- Surround top-level schema definitions with one blank line. -- Keep at most one blank line between two statements and remove redundant blank lines. -- Remove extra blank characters at the end of the line -- Remove extra blank characters in a blank line. -- There is no blank line in the header of the file, start writing from the first line. -- Only one blank line will be left at the end of the KCL file. - -```python -# Remove blank lines in the file header -a = 1 # Remove white space at the end of the line -# Keep at most one blank line between two statements - -b = 2 -# Only leave one blank line at the end of the file - -``` - -### Inline Expressions - -Write indentation of KCL `if`, `elif`, `else` and other conditions on different lines. - -```python -if True: print("") # non-recommended - -if True: # recommended - print("") -``` - -### Line Break and Continuation lines - -- For long expressions, use the line continuation symbol `\` and keep the left end of multiple expressions aligned. -- The 4-space rule is optional for continuation lines. - -```python -anotherString = "Too long expression " + \ - "Too long expression " # non-recommended - -longString = "Too long expression " + \ - "Too long expression " + \ - "Too long expression " # recommended -``` - -### When to Use Trailing Commas - -- Always use trailing commas. - -### Maximum Line Length - -- The general recommendation is **80 characters** but not absolute. - -### Symbol Break White Space - -Try to keep the spaces between different symbols, but not too many, usually one is good. - -```python -a = 1 # recommended -b = 1 + 2 # non-recommended -``` - -### Whitespace in Expressions and Statements - -Avoid extraneous whitespace in the following situations: - -- The parentheses `()`, brackets `[]` and braces `{}` in the expression have no spaces inside. - -```python -a = (1 + 2) # recommended -b = ( 1 + 2 ) # non-recommended - -c = [1, 2, 3] # recommended -d = [ 1, 2, 3 ] # non-recommended - -e = {key = "value"} # recommended -f = { key = "value" } # non-recommended -``` - -```python -spam(ham[1], {eggs = 2}) # recommended -spam( ham[ 1 ], { eggs = 2 } ) # non-recommended -``` - -- Between a trailing comma and a following close parenthesis. - -```python -foo = [0,] # recommended -bar = [0, ] # non-recommended -``` - -- Immediately before the open parenthesis that starts the argument list of a function call. - -```python -print(1) # recommended -print (1) # non-recommended -``` - -- Immediately before the open parenthesis that starts indexing or slicing. - -```python -dct = {key = "value"} -lst = [1, 2, 3] - -a = dct['key'] # recommended -b = dct ['key'] # non-recommended - -c = lst[0] # recommended -d = lst [1] # non-recommended -``` - -- More than one space around an assignment `=` (or other) operator to align it with another. - -```python -# recommended: -x = 1 -y = 2 -long_variable = 3 -``` - -```python -# non-recommended: -x = 1 -y = 2 -long_variable = 3 -``` - -- Always surround these binary operators with a single space on either side: assignment (`=`), augmented assignment (`+=`, `-=`, etc.), comparisons (`==`, `<`, `>`, `!=`, `<=`, `>=`, `in`, `not in`, `is`, `is not`), booleans (`and`, `or`, `not`). - -```python -# recommended: -i = i + 1 -submitted += 1 -x = x * 2 - 1 -hypot2 = x * x + y * y -c = (a + b) * (a - b) -``` - -```python -# non-recommended: -i = i+1 -submitted+=1 -x = x*2 - 1 -hypot2 = x*x + y*y -c = (a+b) * (a-b) -``` - -- Break one blank line between different statements e.g., import, schema and expression statements. - -```python -import math -import net - -schema Person: - name: str - -person = Person { - name = "Alice" -} -``` - -- Compound statements (multiple statements on the same line) are generally discouraged - -```python -# recommended: -if foo == 'blah': - do_blah_thing() -do_one() -do_two() -do_three() -``` - -```python -# non-recommended: -if foo == 'blah': do_blah_thing() -do_one(); do_two(); do_three() -``` - -## Naming Conventions - -### Naming Styles - -The following naming styles are commonly distinguished: - -- `b` (single lowercase letter) -- `B` (single uppercase letter) -- `lowercase` -- `lower_case_with_underscores` -- `UPPERCASE` -- `UPPER_CASE_WITH_UNDERSCORES` -- `CapitalizedWords` (capitalize all letters of the acronym in ``CapitalizedWords`` e.g., `HTTPServer`.) -- `mixedCase` (differs from `CapitalizedWords` by initial lowercase character) -- `Capitalized_Words_With_Underscores` (ugly and non-recommended) - -### Names to Avoid - -Never use the characters 'l' (lowercase letter el), 'O' (uppercase letter oh), or 'I' (uppercase letter eye) as single-character variable names. - -### Package and Module Names - -Package and module names should have short, all-lowercase names. - -### Schema Names - -Schema names should normally use the `CapWords` convention. - -### Constants - -Constants are usually defined on a module level and written in all capital letters with underscores separating words such as `MAX_OVERFLOW` and `TOTAL`. - -## Import - -- Imports should usually be on separate lines. -- Imports are always put at the top of the file, just after any module comments and docstrings, and before module globals and constants. -- Imports should be grouped in the following order and we should put a blank line between each group of imports. - 1. Standard library imports. - 2. Related third party plugin imports. - 3. Local application/library specific imports. -- Use an alias when we import a package name with a relatively long path. -- Leave only one space between the Import keyword and the package name. - -```python -import net # recommended -import math # non-recommended - -import ..pkg.internal_pkg as alias_pkg # recommended -``` - -## Comments - -- Comments should be complete sentences. The first word should be capitalized unless it is an identifier that begins with a lower-case letter (never alter the case of identifiers!). -- Block comments generally consist of one or more paragraphs built out of complete sentences, with each sentence ending in a period. -- Use two spaces after a sentence-ending period in multi-sentence comments, except after the final sentence. - -### Block Comments - -Block comments generally apply to some (or all) code that follows them, and are indented to the same level as that code. Each line of a block comment starts with a `#` and **a single space**(unless it is indented text inside the comment). - -Paragraphs inside a block comment are separated by a line containing a single `#`. - -```python -# This is a block comment -a = 1 -``` - -### Inline Comments - -Use inline comments sparingly. - -An inline comment is a comment on the same line as a statement. Inline comments should be separated by **at least two spaces** from the statement. They should start with a `#` and **a single space**. - -```python -a = 1 # This is an inline comment -``` - -### Documentation Strings - -Write docstrings for all public schema and schema attributes. - -```python -schema Person: - """ - Person schema doc string - """ - - name: str = "Alice" - """ - Person schema attribute name doc string - """ -``` - -## String - -- Single-quoted strings and double-quoted strings are the same in KCL. -- Use double-quoted string with lowercase prefix -- For triple-quoted strings, always use double quote characters to be consistent with the docstring convention. -- When a string contains single or double quote characters, use the other one to avoid backslashes in the string. - -```python -strA = b"123" # recommended -strB = B'123' # non-recommended - -strC = "'123'" # recommended -strD = "\"123\"" # non-recommended -``` - -## Number - -- Use lowercase for the prefix of non-decimal numbers, and use uppercase for the number itself. - -```python -foo = 0xAB # recommended -bar = 0Xab # non-recommended -``` - -## Operators - -### Binary Operators - -- Leave only one space before and after the assignment `=`. -- Leave only one space before and after the binary operator in the expression. - -```python -a = 1 # recommended -b=2 # non-recommended -c= 3 # non-recommended -d =4 # non-recommended - -_value = (1 + 2 * 3) # recommended -_value = (1+2*3) # non-recommended -``` - -### Unary Operators - -- There is only no space after unary operators e.g., `~`, `+` and `-`. - -```python -_value = 1 + -2 * ~3 # recommended -_value = 1+ - 2 * ~ 3 # non-recommended -``` - -- There is no space after `**` and `*` in the dict/list deduction expressions and argument expressions. - -```python -_list = [1, 2, 3] -_list = [*_list, [4, 5 ,6]] # recommended -_list = [* _list, [4, 5 ,6]] # non-recommended - -_dict = {**{k = "v"}, **{k = "v"}} # recommended -_dict = {** {k = "v"}, ** {k = "v"}} # non-recommended -``` - -- Use `is not` operator rather than `not ... is`. - -```python -# recommended: -if foo is not None: - a = 1 -``` - -```python -# non-recommended: -if not foo is None: - a = 1 -``` - -## Dict - -- There is no space before the colon `:` at the instantiation of KCL dict and schema config, and a space after the colon `:`. - -```python -d1 = {labels: {k1 = "v1"}} # recommended -d2 = {labels : {k1 = "v1"}} # non-recommended -d3 = {labels :{k1 = "v1"}} # non-recommended -``` - -- Always surround the override attribute operator `=` and the insert attribute operator `+=` with a single space on either sid. - -```python -d1 = {key = "value"} # recommended -d2 = {key= "value"} # non-recommended -d3 = {key ="value"} # non-recommended -``` - -```python -d1 = {key += [0, 1, 2]} # recommended -d2 = {key+= [0, 1, 2]} # non-recommended -d3 = {key +=[0, 1, 2]} # non-recommended -``` - -- Remove all commas at the end of the line in the KCL multiline dict because the end commas of each line are optional. - -```python -d = { - key1 = "value1" - key2 = "value2" - key3 = "value3" - key4 = "value4" -} -``` - -## List - -- Keep only **one space** after the comma `,` separating elements in the list - -```python -a = [1, 2, 3] # recommended -b = [1,2,3] # non-recommended -``` - -- Keep only **one space** before and after the comprehension expression token `for` and `in` in the dict and list. - -```python -a = [i for i in range(10)] # recommended -b = [i for i in range(10)] # non-recommended -``` - -## Slice - -- Keep the same number of spaces before and after the colon `:` of the list slice. - -```python -l = [1, 2, 3] -a = l[0:2] # recommended -b = l[0 : 2] # non-recommended -c = l[0: 2] # non-recommended - -d = l[0 + 0 : 1 + 1] # recommended -d = l[0 + 0:1 + 1] # non-recommended -``` - -## Schema - -- Leave only one space before and after the schema attribute assignment `=`. -- Always add a doc string to a schema, which is a good programming habit. - -```python -schema Person: - """ - Schema doc string - """ - name: str = "Alice" # recommended - age : int=12 # non-recommended - -person = Person {} -``` - -- Keep **no spaces** around the schema inheritance operator `()` - -```python -schema Base: - name: str - -schema Person(Base): # recommended - age: int - -schema Schema ( Base ): # non-recommended - age: int -``` - -- Keep **only one space** between the brackets and the schema name of the config at schema instantiation. - -```python -schema Base: - name: str - -schema Person(Base): - age: int - -personA = Person{} # non-recommended -personB = Person {} # recommended -``` - -- Keep **only one space** between the **mixin** keyword and the following `[]` operator - -```python -schema NameMixin: - name: str = "name" - -schema Person: - mixin [NameMixin] # non-recommended - age: int - -schema Parent: - mixin [NameMixin] # recommended - age: int -``` - -### Attribute Annotations - -- Annotations for schema attributes should have a single space after the colon `:` and no space before the colon `:`. - -```python -# recommended: -schema Person: - name: str # No space before the colon `:` - age: int = 18 # Spaces around assignment`=` -``` - -```python -# non-recommended: -schema Person: - codeA:int # No space after the colon `:` - codeB : int # Space before the colon `:` - name: str="Alice" # No spaces around assignment`=` -``` - -- There are no spaces around the colon `:` in the dict type annotation. - -```python -schema Person: - labels: {str:str} # recommended - keyValues: {str : str} # non-recommended -``` - -### Arguments - -- There are no spaces around the assignment `=` in the function/schema/decorator keyword arguments (kwargs). - -```python -schema Person[nameVar]: - # Decorator kwargs - @deprecated(strict=False) # recommended - name: str = nameVar - - @deprecated(strict = False) # non-recommended - age: int - -# Schema kwargs -personA = Person(nameVar="Alice") {} # recommended -personB = Person(nameVar = "Bob") {} # non-recommended - -# Function kwargs -print("", end='') # recommended -print("", end = '') # non-recommended -``` - -## Keywords - -- Only one space is usually reserved around the keyword, such as `schema`, `mixin`, `final`, `is` and `not`, etc. - -```python -schema NameMixin: - check: - name not None - -schema Person: - """ - Person schema definition - """ - mixin [NameMixin] - - final name: str = "Alice" - age: int - -person = Person { - age = 18 -} -``` - -## Function - -- There are no spaces around the function/package select operator `.` -- There are no spaces between the function name and the parentheses `()`. - -```python -import math - -print(math.log(10)) # recommended -print( math . log (10)) # non-recommended -``` - -## Other Recommendations - -- All commas `,` semicolons `;`, colons `:` has no spaces before them. - -```python -if True: - a = 1;b = 2 # non-recommended - c = 3; d = 4 # recommended -``` diff --git a/docs/reference/lang/lang/spec/datatypes.md b/docs/reference/lang/lang/spec/datatypes.md deleted file mode 100644 index 9c5f5ec2..00000000 --- a/docs/reference/lang/lang/spec/datatypes.md +++ /dev/null @@ -1,430 +0,0 @@ ---- -title: "Data Types" -linkTitle: "Data Types" -type: "docs" -weight: 2 -description: Data Types ---- -## Syntax - -### Bool - -Boolean values are the two constant objects `False` and `True`. They are used to represent truth values (although other values can also be considered false or true). The built-in function bool() can be used to convert any value to a Boolean, if the value can be interpreted as a truth value. - -### Int - -Int, or integer, is an arbitrarily sized integer, positive or negative, without decimals, of 64 binary digits precision(-9,223,372,036,854,775,808~9,223,372,036,854,775,807). Int is created by int literals or as the result of built-in functions and operators. Unadorned integer literals (including `hex`, `octal` and `binary` numbers) yield integers. The constructor int() can be used to produce int of a specific type. - -Besides, integer literals may have an `SI` or `IEC` multiplier. - -+ `SI`: General integer or fixed-point number form: `P`, `T`, `G`, `M`, `K`, `k`, `m`, `u`, `n`. -+ `IEC`: Corresponding power of 2: `Pi`, `Ti`, `Gi`, `Mi`, `Ki`. - -```python -a = 1 # positive integer: 1 -b = -1 # negative integer: -1 -c = 0x10 # hexadecimal literal: 16 -d = 0o10 # octal literal: 8, or the form `010` -e = 0b10 # binary literal: 2 -f = 10Ki # integer literal with IEC multiplier: 10240 -g = 1M # integer literal with SI multiplier: 1000000 -h = int("10") # int constructor: 10 -i = int("10Ki") # int constructor with multiplier: 10240 -``` - -Notes: - -+ Report an error if unable to represent an integer value precisely. - -### Float - -Float, floating-point, approximation to real numbers, positive or negative, containing one or more decimals, of 64 bit IEEE 754 floats. The constructor float() can be used to produce int of a specific type. - -```python -a = 1.10 -b = 1.0 -c = -35.59 -d = 32.3+e18 -f = -90. -g = -32.54e100 -h = 70.2-E12 -i = float("112") # float constructor -``` - -Notes: - -+ Report an error if unable to represent a floating-point value due to overflow -+ Report a warning if unable to represent a floating-point value due to underflow. Round to the nearest representable value if unable to represent a floating-point value due to limits on precision. These requirements apply to the result of any expression except for built-in functions for which an unusual loss of precision must be explicitly documented. - -#### None - -In KCL, `None` can indicate that the value of the object is empty, which is similar to `nil` in Go or `null` in Java, and corresponds to `null` in YAML and JSON. - -```python -a = None -b = [1, 2, None] -c = {"key1" = "value1", "key2" = None} -``` - -Please note that `None` cannot participate in the four arithmetic operations, but it can participate logical operators and comparison operators to perform calculations. - -```python -a = 1 + None # error -b = int(None) # error -c = not None # True -d = None == None # True -e = None or 1 # 1 -f = str(None) # None -``` - -#### Undefined - -`Undefined` is similar to None, but its semantics is that a variable is not assigned any value and will not be output to YAML or JSON. - -```python -a = Undefined -b = [1, 2, Undefined] -c = {"key1" = "value1", "key2" = Undefined} -``` - -Please note that `Undefined` cannot participate in the four arithmetic operations, but it can participate logical operators and comparison operators to perform calculations. - -```python -a = 1 + Undefined # error -b = int(Undefined) # error -c = not Undefined # True -d = Undefined == Undefined # True -e = Undefined or 1 # 1 -f = str(Undefined) # Undefined -``` - -### Common Numeric Operations - -Int and Float support the following operations (see built-in proposal for built-in details): - -+ `x + y`: sum of x and y. -+ `x - y`: difference of x and y. -+ `x * y`: product of x and y. -+ `x / y`: quotient of x and y. -+ `x // y`: floored quotient of x and y. -+ `x % y`: remainder of x / y. -+ `x ** y`: x to the power y. -+ `-x`: x negated. -+ `+x`: x unchanged. -+ `~x`: x bitwise negation. -+ `abs(x)`: absolute value or magnitude of x. -+ `int(x)`: x converted to integer. -+ `float(x)`: x converted to floating point. - -KCL supports mixed arithmetic: when a binary arithmetic operator has operands of different numeric types, the operand with the "narrower" type is widened to that of the other, where integer is narrower than floating-point. - -### String - -Strings are immutable sequences of Unicode characters. String literals are written in a variety of ways: - -```python -'allows embedded "double" quotes' # Single quotes -"allows embedded 'single' quotes" # Double quotes -'''Three single quotes''', """Three double quotes""" # Triple quoted -``` - -Triple quoted strings may span multiple lines. - -Indexing a string produces strings of length 1, for a non-empty string s, `s[0] == s[0:1]`. - -```python -a = "Hello, World!" -b = a[2:5] # "llo" -c = a[-5:-2] # "orl" -d = a[::-1] # "'!dlroW ,olleH'" -``` - -+ `str(x=None) -> str` - -Return a string. If *x* is not provided, raise a runtime error. - -```python -x = str(3.5) # "3.5" -``` - -#### Members - -Built-in function and members of a string - -+ `str#len() -> int` - Return the number of characters in the string. -+ `capitalize() -> str` - Return a copy of the string with its first character (if any) capitalized and the rest lowercased. -+ `count(sub: str, start: int = 0, end: int = -1) -> int` - Returns the number of (non-overlapping) occurrences of substring sub in string, optionally restricting to `[start:end]`, start being inclusive and end being exclusive. -+ `endswith(suffix: str, start: int = 0, end: int = -1) -> bool` - Returns `True` if the string ends with the specified suffix, otherwise return `False`, optionally restricting to `[start:end]`, start being inclusive and end being exclusive. -+ `find(sub: str, start: int = 0, end: int = -1) -> int` - Returns the lowest index where substring sub is found, or -1 if no such index exists, optionally restricting to `[start:end]`, start being inclusive and end being exclusive. -+ `format(*args, **kwargs) -> str` - Perform string interpolation. Format strings contain replacement fields surrounded by curly braces {}. Anything that is not contained in braces is considered literal text, which is copied unchanged to the output. If you need to include a bracket character in the literal text, it can be escaped by doubling: A replacement field can be either a name, a number or empty. Values are converted to strings using the str function. -+ `index(sub: str, start: int = 0, end: int = -1) -> int` - Returns the first index where sub is found, or raises an error if no such index exists, optionally restricting to `[start:end]` start being inclusive and end being exclusive. -+ `isalnum() -> bool` - Returns True if all characters in the string are alphanumeric (`[a-zA-Z0-9]`) and there is at least one character, False otherwise. -+ `isalpha() -> bool` - Returns True if all characters in the string are alphabetic (`[a-zA-Z]`) and there is at least one character. -+ `isdigit() -> bool` - Returns True if all characters in the string are digits (`[0-9]`) and there is at least one character. -+ `islower() -> bool` - Returns True if all cased characters in the string are lowercase and there is at least one character. -+ `isspace() -> bool` - Returns True if all characters are white space characters and the string contains at least one character. -+ `istitle() -> bool` - Returns True if the string is in title case and it contains at least one character. This means that every uppercase character must follow an uncased one (e.g., whitespace) and every lowercase character must follow a cased one (e.g., uppercase or lowercase). -+ `isupper() -> bool` - Returns True if all cased characters in the string are uppercase and there is at least one character. -+ `join(iterable: list) -> str` - Return a string which is the concatenation of the strings in iterable. A TypeError will be raised if there are any non-string values in iterable. The separator between elements is the string providing this method. Example: - - ```python - >>> "|".join(["a", "b", "c"]) - "a|b|c" - ``` -+ `lower() -> str` - Returns a copy of the string with all the cased characters converted to lowercase. -+ `lstrip(chars: str) -> str` - Return a copy of the string with leading characters removed. The chars argument is a string specifying the set of characters to be removed. If omitted or None, the chars argument defaults to removing whitespace. The chars argument is not a prefix; rather, all combinations of its values are stripped: - - ```python - >>> ' spacious '.lstrip() - 'spacious ' - >>> 'www.example.com'.lstrip('cmowz.') - 'example.com' - ``` -+ `replace(old: str, new: str, count: int) -> str` - Return a copy of the string with all occurrences of substring old replaced by new. If the optional argument count is given, only the first count occurrences are replaced. -+ `rfind(sub: str, start: int = 0, end: int = -1) -> int` - Return the highest index in the string where substring sub is found, such that sub is contained within s[start:end]. Optional arguments start and end are interpreted as in slice notation. Return -1 on failure. -+ `rindex(sub: str, start: int = 0, end: int = -1) -> int` - Returns the last index where sub is found, or raises an ValueError if no such index exists, optionally restricting to `[start:end]`, start being inclusive and end being exclusive. -+ `rsplit(sep: str, maxsplit: int = -1) -> list` - Return a list of the words in the string, using sep as the delimiter string. If maxsplit is given, at most maxsplit splits are done, the rightmost ones. If sep is not specified or None, any whitespace string is a separator. Except for splitting from the right, rsplit() behaves like split() which is described in detail below. -+ `rstrip(chars: str) -> str` - Return a copy of the string with trailing characters removed. The chars argument is a string specifying the set of characters to be removed. If omitted or None, the chars argument defaults to removing whitespace. The chars argument is not a suffix; rather, all combinations of its values are stripped: - - ```python - >>> ' spacious '.rstrip() - ' spacious' - >>> 'mississippi'.rstrip('ipz') - 'mississ' - ``` -+ `split(sep: str, maxsplit: int) -> list` - Return a list of the words in the string, using sep as the delimiter string. If maxsplit is given, at most maxsplit splits are done (thus, the list will have at most maxsplit+1 elements). If maxsplit is not specified or -1, then there is no limit on the number of splits (all possible splits are made). - - If sep is given, consecutive delimiters are not grouped together and are deemed to delimit empty strings (for example, `'1,,2'.split(',')` returns `['1', '', '2']`). The sep argument may consist of multiple characters (for example, `'1<>2<>3'.split('<>')` returns `['1', '2', '3']`). Splitting an empty string with a specified separator returns `['']`. - - For example: - - ```python - >>> '1,2,3'.split(',') - ['1', '2', '3'] - >>> '1,2,3'.split(',', maxsplit=1) - ['1', '2,3'] - >>> '1,2,,3,'.split(',') - ['1', '2', '', '3', ''] - ``` - - If sep is not specified or is None, a different splitting algorithm is applied: runs of consecutive whitespace are regarded as a single separator, and the result will contain no empty strings at the start or end if the string has leading or trailing whitespace. Consequently, splitting an empty string or a string consisting of just whitespace with a `None` separator returns `[]`. - - For example: - - ```python - >>> '1 2 3'.split() - ['1', '2', '3'] - >>> '1 2 3'.split(maxsplit=1) - ['1', '2 3'] - >>> ' 1 2 3 '.split() - ['1', '2', '3'] - ``` -+ `splitlines(keepends: str) -> list` - Return a list of the lines in the string, breaking at line boundaries('\n', '\r\n', '\r'). Line breaks are not included in the resulting list unless keepends is given and true. - - This method splits on the following line boundaries. In particular, the boundaries are a superset of universal newlines. - - For example: - - ```python - >>> 'ab c\n\nde fg\rkl\r\n'.splitlines() - ['ab c', '', 'de fg', 'kl'] - >>> 'ab c\n\nde fg\rkl\r\n'.splitlines(keepends=True) - ['ab c\n', '\n', 'de fg\r', 'kl\r\n'] - ``` - - Unlike `split()` when a delimiter string sep is given, this method returns an empty list for the empty string, and a terminal line break does not result in an extra line: - - ```python - >>> "".splitlines() - [] - >>> "One line\n".splitlines() - ['One line'] - ``` - - For comparison, `split('\n')` gives: - - ```python - >>> ''.split('\n') - [''] - >>> 'Two lines\n'.split('\n') - ['Two lines', ''] - ``` -+ `startswith(prefix: str, start: int = 0, end: int = -1) -> bool` - Return `True` if string starts with the prefix, otherwise return False. prefix can also be a list of prefixes to look for. With optional start, test string beginning at that position. With optional end, stop comparing string at that position. -+ `strip(chars: str) -> str` - Return a copy of the string with the leading and trailing characters removed. The chars argument is a string specifying the set of characters to be removed. If omitted or None, the chars argument defaults to removing whitespace. The chars argument is not a prefix or suffix; rather, all combinations of its values are stripped: - - ```python - >>> ' spacious '.strip() - 'spacious' - >>> 'www.example.com'.strip('cmowz.') - 'example' - ``` - - The outermost leading and trailing chars argument values are stripped from the string. Characters are removed from the leading end until reaching a string character that is not contained in the set of characters in chars. A similar action takes place on the trailing end. For example: - - ```python - >>> comment_string = '#....... Section 3.2.1 Issue #32 .......' - >>> comment_string.strip('.#! ') - 'Section 3.2.1 Issue #32' - ``` -+ `title() -> str` - Return a titlecased version of the string where words start with an uppercase character and the remaining characters are lowercase. - - For example: - - ```python - >>> 'Hello world'.title() - 'Hello World' - ``` - - The algorithm uses a simple language-independent definition of a word as groups of consecutive letters. The definition works in many contexts but it means that apostrophes in contractions and possessives form word boundaries, which may not be the desired result: - - ```python - >>> "they're bill's friends from the UK".title() - "They'Re Bill'S Friends From The Uk" - ``` -+ `upper() -> str` - Return a copy of the string with all the cased characters 4 converted to uppercase. Note that `s.upper().isupper()` might be `False` if s contains uncased characters or if the Unicode category of the resulting character(s) is not “Lu” (Letter, uppercase), but e.g., “Lt” (Letter, titlecase). - -### List - -Lists are immutable sequences, typically used to store collections of homogeneous items (where the precise degree of similarity will vary by application). - -Lists may be constructed in several ways: - -+ Using a pair of square brackets to denote the empty list: `[]` -+ Using square brackets, separating items with commas: `[a]`, `[a, b, c]` -+ Using a list comprehension: `[x for x in iterable]` -+ Using the type constructor: list() or list(iterable) - -The constructor builds a list whose items are the same and in the same order as iterable’s items.Iterable may be either a sequence, a container that supports iteration, or an iterator object. If iterable is already a list, a copy is made and returned, similar to `iterable[:]`. For example, `list('abc')` returns `['a', 'b', 'c']` and `list([1, 2, 3])` returns `[1, 2, 3]`. If no argument is given, the constructor creates a new empty list `[]`. - -Lists implement all of the common sequence operations. - -#### Members - -+ `len()` - Return the number of items in the list. - -### Common Sequence Operations - -The operations in the following table are supported by List and Dict. - -This table lists the sequence operations sorted in ascending priority. In the table, s and t are sequences of the same type, n, i, j and k are integers and x is an arbitrary object that meets any type and value restrictions imposed by s. - -The `in` and `not in` operations have the same priorities as the comparison operations. The + -(concatenation) and * (repetition) operations have the same priority as the corresponding numeric operations. - -| Operation | Result | Notes | -| ------------ | -------------------------------------------------- | ----- | -| `x in s` | `True` if an item of s is equal to x, else `False` | #1 | -| `x not in s` | `False` if an item of s is equal to x, else `True` | #1 | -| `s + t` | the concatenation of s and t | #5 | -| `s[i]` | ith item of s, origin 0 | #2 | -| `s[i:j]` | slice of s from i to j | #2 #3 | -| `s[i:j:k]` | slice of s from i to j with step k | #2 #4 | -| `min(s)` | smallest item of s | | -| `max(s)` | largest item of s | | - -Notes: - -+ 1. While the in and not in operations are used only for simple containment testing in the - general case, some specialized sequences (str) also use them for subsequence testing: - -```python ->>> "gg" in "eggs" -True -``` - -+ 2. If i or j is negative, the index is relative to the end of sequence s: `s.len() + i` or `s.len() + j` is substituted. But note that -0 is still 0. -+ 3. The slice of s from i to j is defined as the sequence of items with index k such that `i <= k < j`. If i or j is greater than `s.len()`, use `s.len()`. If i is omitted or None, use 0. If j is omitted or None, use `s.len()`. If i is greater than or equal to j, the slice is empty. -+ 4. The slice of s from i to j with step k is defined as the sequence of items with index `x = i + n*k` such that `0 <= n < (j-i)/k`. In other words, the indices are `i`, `i+k`, `i+2*k`, `i+3*k` and so on, stopping when j is reached (but never including j). When k is positive, i and j are reduced to s.len() if they are greater. When k is negative, i and j are reduced to s.len() - - + If they are greater. If i or j are omitted or None, they become “end” values (which end depends on the sign of k). Note, k cannot be zero. If k is None, it is treated like 1. -+ 5. Concatenating immutable sequences always results in a new object. This means that building up a sequence by repeated concatenation will have a quadratic runtime cost in the total sequence length. To get a linear runtime cost, you must switch to one of the alternatives below: - - + if concatenating str objects, you can build a list and use `str.join()` at the end -+ 6. `index` raises `ValueError` when x is not found in s. Not all implementations support passing the additional arguments i and j. These arguments allow efficient searching of subsections of the sequence. Passing the extra arguments is roughly equivalent to using `s[i:j].index(x)`, only without copying any data and with the returned index being relative to the start of the sequence rather than the start of the slice. - -### Dict - -Dict is an immutable mapping object maps hashable values to arbitrary objects. A dictionary’s keys are almost arbitrary values. Values that are not hashable, that is, values containing lists, dictionaries may not be used as keys. Numeric types used for keys obey the normal rules for numeric comparison: if two numbers compare equal (such as 1 and 1.0) then they can be used interchangeably to index the same dictionary entry. (Note however, that since computers store floating-point numbers as approximations it is usually unwise to use them as dictionary keys.) Dict is ordered. The order of the keys is the order of their declaration. - -Dictionaries can be created by placing a comma-separated list of keys: value pairs within braces, for example: `{'jack': 4098, 'sjoerd': 4127}` or `{4098: 'jack', 4127: 'sjoerd'}`, by the dict constructor, or list/dict comprehension. - -+ `dict(obj)` - -Return a new dictionary initialized from an optional positional argument and a possibly empty set of keyword arguments.If no positional argument is given, an empty dictionary is created. If a positional argument is given and it is a mapping object, a dictionary is created with the same key-value pairs as the mapping object. Otherwise, the positional argument must be an iterable object. Each item in the iterable must itself be an iterable with exactly two objects. The first object of each item becomes a key in the new dictionary, and the second object the corresponding value. If a key occurs more than once, the last value for that key becomes the corresponding value in the new dictionary. If keyword arguments are given, the keyword arguments and their values are added to the dictionary created from the positional argument. If a key being added is already present, the value from the keyword argument replaces the value from the positional argument.To illustrate, the following examples all return a dictionary equal to `{"one": 1, "two": 2, "three": 3}`: - -```python ->>> a = {'two': 1, 'one': 2, 'three': 3} ->>> b = {'one': 1, 'two': 2, 'three': 3} ->>> c = {'three': 3, 'one': 1, 'two': 2} ->>> a == b == c -True -``` - -Providing keyword arguments as in the first example only works for keys that are valid KCL identifiers. Otherwise, any valid keys can be used. - -In the dict comprehension, key/value pairs yielded by the generator expression is set in the dictionary in the order yielded: the first occurrence of the key determines its insertion order, and the last determines the value associated to it. - -```python ->>> {str(i): 2 * i for i in range(3)} -{"0": 0, "1": 2, "2": 4} - ->>> a = {"one": 1, "two": 2, "three": 3} ->>> b = {k: v for k, v in a if v >= 2} -{two: 2, three: 3} -``` - -#### Operations & Members - -These are the operations that dictionaries the support. - -+ `list(d)` - Return a list of all the keys used in the dictionary d. -+ `len()` - Return the number of items in the dictionary d. -+ `d[key]` - Return the item of d with key. Return Undefined if key is not in the map. -+ `key in d` - Return True if d has a key, else False. -+ `key not in d` - Equivalent to not key in d. -+ `d.key` - Return the item of d with key. Return Undefined if key is not in the map. - -Dictionaries compare equal if and only if they have the same (key, value) pairs(keys' ordering matters). Order comparisons (‘<’, ‘<=’, ‘>=’, ‘>’) raise TypeError. - -```python ->>> d = {"one": 1, "two": 2, "three": 3, "four": 4} ->>> d -{'one': 1, 'two': 2, 'three': 3, 'four': 4} ->>> list(d) -['one', 'two', 'three', 'four'] -``` diff --git a/docs/reference/lang/lang/spec/error.md b/docs/reference/lang/lang/spec/error.md deleted file mode 100644 index 84f9044a..00000000 --- a/docs/reference/lang/lang/spec/error.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -title: "Errors" -linkTitle: "Errors" -type: "docs" -weight: 2 -description: Errors ---- -When errors happen, developers should be able to detect the error and abort -execution. Thus, KCL introduce the `assert` syntax. - -In the previous topic of `schema` syntax. Errors can also be raised when a -schema is violated. - -## Syntax - -The syntax of the `assert` statement is the following. - -``` -assert_stmt: 'assert' test [',' test] -``` - -In the basic form, an `assert` statement evaluates an expression. If the -expression is evaluated to `False`, the assertion is failed, and an error -should be reported. - -In the extended form, an error message can be provided. The error message is -another expression. It is only evaluated when the expression to be evaluated -is evaluated to `False`. The evaluation result of the error message is printed -when reporting the error. - -The following is an example: - -```python -a = 1 -b = 3 -# a != b evaluates to True, therefore no error should happen. -assert a != b -# a == b is False, in the reported error message, the message "SOS" should be printed. -assert a == b, "SOS" -``` - -## The Implementation - -When an error happens, no matter it is caused by the `assert` or the `schema` syntax, -the virtual machine should exit with an exit code greater than `0`. - -The virtual machine may choose to dump the back trace information, and it is strongly -recommended to implement it. - -In practice, KCLVM can dump back trace by default, and an argument can be introduced -to disable it. diff --git a/docs/reference/lang/lang/spec/expressions.md b/docs/reference/lang/lang/spec/expressions.md deleted file mode 100644 index 58999cb4..00000000 --- a/docs/reference/lang/lang/spec/expressions.md +++ /dev/null @@ -1,842 +0,0 @@ ---- -title: "Expressions" -linkTitle: "Expressions" -type: "docs" -weight: 2 -description: Expressions ---- -## Syntax - -In KCL, an expression specifies the computation of a value. - -The syntax is the following: - -``` -expression: test ("," test)* -test: if_expr | primary_expr | unary_expr | binary_expr -``` - -KCL expressions consist of `if` expression, `primary` expression, `unary` expression, and `binary` expression. - -### Primary Expressions - -Primary expressions are the operands for unary and binary expressions. - -Operands are self-delimiting. An **operand** may be followed by any number of selector dot, a function call, or slice suffixes, to form a primary expression. The grammar uses `expression`, where a multiple-component expression is allowed, and `test` where it accepts an expression of only a single component. - -Syntax: - -``` -primary_expr: operand | primary_expr select_suffix | primary_expr call_suffix | primary_expr subscript_suffix -``` - -### Operands - -Operand denotes the elementary value in an expression. An operand may be an identifier, a literal, or a parenthesized expression. - -Syntax: - -``` -operand: operand_name | number | string | "True" | "False" | "None" | list_expr | list_comp | dict_expr | dict_comp | "(" expression ")" -operand_name: identifier | qualified_identifier -``` - -### Identifiers - -In KCL, an identifier is a name, may with selectors, that identifies a value. - -Syntax: - -``` -identifier: NAME -``` - -Examples: - -```python -x -a -_b -``` - -Use the `$` character prefix to define keyword identifiers. - -```python -$if = 1 -$else = "s" -``` - -Please note: whether the non-keyword identifier is prefixed with `$` has the same effect. - -```python -_a = 1 -$_a = 2 # equal to `_a = 2` -``` - -To simplify the definition of the qualified identifier, such as 'pkg.type', we additionally define `qualified_identifier`: - -Syntax: - -``` -qualified_identifier: identifier "." identifier -``` - -Examples: - -```python -pkg.a -``` - -The package name in qualified_identifier must be imported. - -### Basic Literals - -Basic literals supported in KCL are `int`, `float`, `string` and `bool` including `True` and `False`. Evaluation of basic literal yields a value of the given type with the given value. - -Syntax: - -``` -operand: number | string | "True" | "False" | "None" | "Undefined" -``` - -Examples: - -```python -1 -2.3 -"abc" -True -False -None -Undefined -``` - -See more details about **data type** spec. - -### Parenthesized Expressions - -An expression enclosed in parentheses yields the result of that expression. - -Syntax: - -``` -operand: '(' [expression] ')' -``` - -Examples: - -```python -x = (1 + 2) * (3 + 4) # 21 -``` - -### Dictionary Expressions - -A dictionary expression is a comma-separated immutable list of colon-separated key/value expression pairs, enclosed in curly brackets, and it yields a new dictionary object. An optional comma may follow the final pair. - -``` -dict_expr: '{' [entries [',']] '}' -entries: entry {',' entry} -entry: test ':' test | "**" primary_expr -``` - -Examples: - -```python -{} -{"one": 1} -{"one": 1, "two": 2} -``` - -The key and value expressions are evaluated in left-to-right order. Evaluation fails if the same key is used multiple times. - -Only hashable values may be used as the keys of a dictionary. This includes all built-in types except dictionaries, and lists. - -We can ignore the comma `,` at the end of the line for writing dict key-value pairs in multiple lines: - -```python -data = { - "key1" = "value1" # Ignore the comma ',' at the end of line - "key2" = "value2" -} # {"key1": "value1", "key2": "value2"} -``` - -We can ignore the key quotation marks when we writing simple literals on the key. - -```python -data = { - key1 = "value1" # Ignore the comma ',' at the end of line - key2 = "value2" -} # {"key1": "value1", "key2": "value2"} -``` - -In addition, the **config selector expressions** can be used to init a schema instance. - -```python -person = { - base.count = 2 - base.value = "value" - labels.key = "value" -} # {"base": {"count": 2, "value": "value"}, "labels": {"key": "value"}} -``` - -We can **merge** dict using the dict unpacking operator `**` like this: - -```python -_part1 = { - a = "b" -} - -_part2 = { - c = "d" -} - -a_dict = {**_part1, **_part2} # {"a: "b", "c": "d"} -``` - -We can use `if expressions` to dynamically add elements to the dict element, elements that meet the conditions are added to the dict, and elements that do not meet the conditions are ignored. - -```python -a = 1 # 1 -data = { - key1 = "value1" - if a == 1: key2 = "value2" - if a > 0: key3 = "value3" - if a < 0: key4 = "value4" -} # {"key1": "value1", "key2": "value2", "key3": "value3"} -``` - -```python -a = 1 # 1 -data1 = { - key1 = "value1" - if a == 1: - key2 = "value2" - elif a > 0: - key3 = "value3" - else: - key4 = "value4" -} # {"key1": "value1", "key2": "value2"} -data2 = { - key1 = "value1" - if a == 1: key2 = "value2" - elif a > 0: key3 = "value3" - else: key4 = "value4" -} # {"key1": "value1", "key2": "value2"} -``` - -### List Expressions - -A list expression is a comma-separated immutable list of element expressions, enclosed in square brackets, and it yields a new list. An optional comma may follow the last element expression. - -``` -list_expr: '[' [list_item [',']] ']' -list_item: test | "*" primary_expr -``` - -Element expressions are evaluated in left-to-right order. - -Examples: - -```python -[] # [], empty list -[1] # [1], a 1-element list -[1, 2, 3] # [1, 2, 3], a 3-element list -``` - -We can use `if expressions` to dynamically add elements to the list element, elements that meet the conditions are added to the list, and elements that do not meet the conditions are ignored. - -```python -a = 1 # 1 -data = [ - 1 - if a == 1: 2 - if a > 0: 3 - if a < 0: 4 -] # [1, 2, 3] -``` - -```python -a = 1 # 1 -data1 = [ - 1 - if a == 1: - 2 - elif a == 2: - 3 - else: - 3 -] # [1, 2] -data2 = [ - 1 - if a == 1: 2 - elif a == 2: 2 - else: 3 -] # [1, 2] -``` - -### Comprehensions - -A comprehension constructs a new list or dictionary value by looping over one or more iterables and evaluating a body expression that produces successive elements of the result. - -Syntax: - -``` -list_comp: '[' list_item comp_clause+ ']' . -dict_comp: '{' entry comp_clause+ '}' . - -comp_clause: 'for' loop_variables [","] 'in' test ['if' test] -loop_variables: primary_expr (',' primary_expr)* -``` - -A list comprehension consists of a single expression followed by one or more clauses, the first of which must be a `for` clause. Each `for` clause resembles a `for` statement, and specifies an iterable operand and a set of variables to be assigned by successive values of the iterable. An `if` cause resembles an `if` statement, and specifies a condition that must be met for the body expression to be evaluated. A sequence of `for` and `if` clauses acts like a nested sequence of `for` and `if` statements. - -Examples: - -```python -[x * x for x in range(5)] # [0, 1, 4, 9, 16] -[x * x for x in range(5) if x % 2 == 0] # [0, 4, 16] -[[x, y] for x in range(5) \ - if x % 2 == 0 \ - for y in range(5) \ - if y > x] # [[0, 1], [0, 2], [0, 3], [0, 4], [2, 3], [2, 4]] -``` - -Besides, we can use two variables in the list comprehension, the first variable denotes the list index and the second variable denotes the list item. - -```python -data = [1000, 2000, 3000] -# Single variable loop -dataLoop1 = [i * 2 for i in data] # [2000, 4000, 6000] -dataLoop2 = [i for i in data if i == 2000] # [2000] -dataLoop3 = [i if i > 2 else i + 1 for i in data] # [1000, 2000, 3000] -# Double variable loop -dataLoop4 = [i + v for i, v in data] # [1000, 2001, 3002] -dataLoop5 = [v for i, v in data if v == 2000] # [2000] -# Use `_` to ignore loop variables -dataLoop6 = [v if v > 2000 else v + i for i, v in data] # [1000, 2001, 3000] -dataLoop7 = [i for i, _ in data] # [0, 1, 2] -dataLoop8 = [v for _, v in data if v == 2000] # [2000] -``` - -A dict comprehension resembles a list comprehension, but its body is a pair of expressions, key: value, separated by a colon, and its result is a dictionary containing the key/value pairs for which the body expression was evaluated. Evaluation fails if the value of any key is un-hashable. - -Besides, we can use two variables in the dict comprehension, the first variable denotes the dict key and the second variable denotes the dict value of the key. - -```python -data = {"key1" = "value1", "key2" = "value2"} -# Single variable loop -dataKeys1 = {k: k for k in data} # {"key1": "key1", "key2": "key2"} -dataValues1 = {k: data[k] for k in data} # {"key1": "value1", "key2": "value2"} -# Double variable loop -dataKeys2 = {k: k for k, v in data} # {"key1": "key1", "key2": "key2"} -dataValues2 = {v: v for k, v in data} # {"value1": "value1", "value2": "value2"} -dataFilter = {k: v for k, v in data if k == "key1" and v == "value1"} # {"key1": "value1"} -# Use `_` to ignore loop variables -dataKeys3 = {k: k for k, _ in data} # {"key1": "key1", "key2": "key2"} -dataValues3 = {v: v for _, v in data} # {"value1": "value1", "value2": "value2"} -``` - -As with a `for` loop, the loop variables may exploit compound assignment: - -```python -[x * y + z for [x, y], z in [[[2, 3], 5], [["o", 2], "!"]] # [11, 'oo!'] -``` - -KCL does not accept an un-parenthesized list as the operand of a for clause: - -```python -[x * x for x in 1, 2, 3] # parse error: unexpected comma -``` - -Comprehensions defines a new lexical block, so assignments to loop variables have no effect on variables of the same name in an enclosing block: - -```python -x = 1 -_ = [x for x in [2]] # new variable x is local to the comprehension -print(x) # 1 -``` - -The operand of a comprehension's first clause (always a for) is resolved in the lexical block enclosing the comprehension. In the examples below, identifiers referring to the outer variable named x have been distinguished by subscript. - -```python -x0 = [1, 2, 3] -[x * x for x in x0] # [1, 4, 9] -[x * x for x in x0 if x % 2 == 0] # [4] -``` - -All subsequent for and if expressions are resolved within the comprehension's lexical block, as in this rather obscure example: - -```python -x0 = [[1, 2], [3, 4], [5, 6]] -[x * x for x in x0 for x in x if x % 2 == 0] # [4, 16, 36] -``` - -which would be more clearly rewritten as: - -```python -x = [[1, 2], [3, 4], [5, 6]] -[z * z for y in x for z in y if z % 2 == 0] # [4, 16, 36] -``` - -### Conditional Expressions - -A conditional expression has the form `a if cond else b`. It first evaluates the condition `cond`. If it's true, it evaluates `a` and yields its value; otherwise it yields the value of `b`. - -Syntax: - -``` -if_expr: test "if" test "else" test -``` - -Examples: - -```python -x = True if enabled else False # if enabled is -``` - -### Unary Expressions - -In KCL, supported unary operators are `+`, `-`, `~`, and `not`. - -Syntax: - -``` -unary_expr: ("+" | "-" | "~") primary_expr - | "not" test -``` - -Usage: - -``` -+ number unary positive (int, float) -- number unary negation (int, float) -~ number unary bitwise inversion (int) -not x logical negation (any type) -``` - -The `+` and `-` operators may be applied to any number (int or float) and return the number. -The `not` operator returns the negation of the truth value of its operand. - -Examples: - -```python -~1 # -2 -~-1 # 0 -~0 # -1 -not True # False -not 0 # True -``` - -### Binary Expressions - -In KCL, binary expressions consist of `comparisons`, `logical operations`, `arithmetic operations` and `membership tests`. - -Syntax: - -``` -binary_expr: test bin_op test -bin_op: 'or' - | 'and' - | '==' | '!=' | '<' | '>' | '<=' | '>=' - | 'in' | 'not' 'in' - | '|' - | '^' - | '&' - | '-' | '+' - | '*' | '%' | '/' | '//' - | '<<' | '>>' -``` - -#### Logical Operations - -The `or` and `and` operators yield the logical disjunction and conjunction of their arguments, which need not be Booleans. - -The expression `x or y` yields the value of `x` if its truth value is `True`, or the value of `y` otherwise. - -```python -False or False # False -False or True # True -True or True # True -1 or "hello" # 1 -``` - -Similarly, `x` and `y` yields the value of `x` if its truth value is `False`, or the value of `y` otherwise. - -```python -False and False # False -False and True # False -True and True # True -1 and "hello" # "hello" -``` - -These operators use "short circuit" evaluation, so the second expression is not evaluated if the value of the first expression has already determined the result, allowing constructions like these: - -```python -x and x[0] == 1 # x[0] is not evaluated if x is empty -len(x) == 0 or x[0] == "" -not x or not x[0] -``` - -#### Comparisons - -The `==` operator reports whether its operands are equal; the `!=` operator is its negation. - -The operators `<`, `>`, `<=`, and `>=` perform an ordered comparison of their operands. It is an error to apply these operators to operands of unequal type, unless one of the operands is an `int` and the other is a `float`. Of the built-in types, only the following support ordered comparison, using the ordering relation shown: - -``` -NoneType # None <= None -bool # False < True -int # mathematical -float # as defined by IEEE 754 -string # lexicographical -list # lexicographical -``` - -Comparison of floating-point values follows the IEEE 754 standard, which breaks several mathematical identities. For example, if `x` is a `NaN` value, the comparisons `x < y`, `x == y`, and `x > y` all yield false for all values of `y`. - -The remaining built-in types support only equality comparisons. Values of type `dict` and `schema` compare equal if their elements compare equal, and values of type function or `builtin_function_or_method` are equal only to themselves. - -``` -dict # equal contents -schema # equal exported-attributes -function # identity -builtin_function_or_method # identity -``` - -#### Arithmetic Operations - -The following table summarizes the binary arithmetic operations available for built-in types: - -``` -Arithmetic (int or float; result has type float unless both operands have type int) - number + number # addition - number - number # subtraction - number * number # multiplication - number / number # real division (result is always a float) - number // number # floored division - number % number # remainder of floored division - number ^ number # bitwise XOR - number << number # bitwise left shift - number >> number # bitwise right shift - -Concatenation - string + string - list + list - -Repetition (string/list) - int * sequence - sequence * int - -Union - int | int - list | list - dict | dict - schema | schema - schema | dict -basictype | basictype -``` - -The operands of the arithmetic operators `+`, `-`, `*`, `//`, and `%` must both be numbers (`int` or `float`) but need not have the same type. The type of the result has type `int` only if both operands have that type. The result of real division / always has type `float`. - -The `+` operator may be applied to non-numeric operands of the same type, such as two lists, or two strings, in which case it computes the concatenation of the two operands and yields a new value of the same type. - -```python -"Hello, " + "world" # "Hello, world" -[1, 2] + [3, 4] # [1, 2, 3, 4] -``` - -The `*` operator may be applied to an integer n and a value of type `string`, `list`, in which case it yields a new value of the same sequence type consisting of n repetitions of the original sequence. The order of the operands is immaterial. Negative values of n behave like zero. - -```python -'mur' * 2 # 'murmur' -3 * range(3) # [0, 1, 2, 0, 1, 2, 0, 1, 2] -``` - -The `&` operator requires two operands of the same type, such as `int`. For integers, it yields the bitwise intersection (AND) of its operands. - -The `|` operator likewise computes bitwise, unions basic types and unions collection and schema data, such as **list**, **dict** and **schema**. - -Computing bitwise examples: - -```python -0x12345678 | 0xFF # 0x123456FF -``` - -Unioning basic types examples: - -```python -schema x: - a: int | str # attribute a could be a int or string -``` - -A union type is used to define a schema attribute type. See more details in **schema** spec. -Supported types in a union type are `int`, `str`, `float`, `bool`, `list` and `dict`. - -Unioning collection and schema data: - -- Unioning List. Overwrite the list expression on the right side of the operator `|` to the list variable on the left side of the operator one by one according to the **index**. - -```python -_a = [1, 2, 3] -_b = [4, 5, 6, 7] -x = _a | _b # [4, 5, 6, 7] 4 -> 1; 5 -> 2; 6 -> 3; 7 -> None -``` - -Unioning to the specific index or all elements is still under discussion. - -- Unioning Dict. Union the dict expression on the right side of the operator `|` one by one to the dict variable on the left side of the operator according to the **key** - -```python -_a = {key1 = "value1"} -_b = {key1 = "overwrite", key2 = "value2"} -_c = _a | _b # {"key1": "overwrite", "key2": "value2"} -``` - -The union of collection and schema is a new one whose attributes are unioning b to a, preserving the order of the attributes of the operands, left before right. - -Unioning to the specific key or all keys is still under discussion. - -- Unioning Schema. - -The union operation for schema is similar to dict. - -Schema union could be done as: - -``` -schema Person: - firstName: str - lastName: str - -_a = Person { - firstName = "John" -} -_b = {lastName = "Doe"} -_a = _a | _b # {"firstName": "John", "lastName": "Doe"} -``` - -Unioning to a specific attribute is still under discussion. Unioning to all attributes is not applicable to schema instances. - -See **selector expression** in **expression** spec for more details. - -The `^` operator accepts operands of `int`. For integers, it yields the bitwise XOR (exclusive OR) of its operands. - -The `<<` and `>>` operators require operands of `int` type both. They shift the first operand to the left or right by the number of bits given by the second operand. It is a dynamic error if the second operand is negative. Implementations may impose a limit on the second operand of a left shift. - -```python -0x12345678 & 0xFF # 0x00000078 -0b01011101 ^ 0b110101101 # 0b111110000 -0b01011101 >> 2 # 0b010111 -0b01011101 << 2 # 0b0101110100 -``` - -#### Membership Tests - -Usage: - -``` - any in sequence (list, dict, schema, string) - any not in sequence -``` - -The `in` operator reports whether its first operand is a member of its second operand, which must be a list, dict, schema, or string. The `not in` operator is its negation. Both return a Boolean. - -The meaning of membership varies by the type of the second operand: the members of a list are its elements; the members of a dict are its keys; the members of a string are all its substrings. - -```python -1 in [1, 2, 3] # True - -d = {"one" = 1, "two" = 2} -"one" in d # True -"three" in d # False -1 in d # False -[] in d # False - -"nasty" in "dynasty" # True -"a" in "banana" # True -"f" not in "way" # True - -d = data {one = 1, two = 2} # data is a schema with attributes one and two -"one" in d # True -"three" in d # False -``` - -### Function Invocations - -KCL allows calling built-in functions and functions from built-in and system modules. Whether KCL allows defining new functions is under discussion. - -Syntax: - -``` -call_suffix: "(" [arguments [","]] ")" -arguments: argument ("," argument)* -argument: test | identifier "=" test | "*" test | "**" test -``` - -To call a function, the basic way is shown as the following code excerpt: - -```python -print("An argument") - -import math -# 2 powers 3 is 8. -a = math.pow(2, 3) -``` - -As you can see, arguments are separated with `,`. Arguments can only be passed in this way. KCL supports positional arguments and key-value arguments. - -Note that: - -- Some functions have parameters with default values. -- Some functions accept variadic arguments. - -When an argument is not supplied for a parameter without a default value, -an error will be reported. - -### Selector Expressions - -A selector expression selects the attribute or method of the value. - -#### Select attribute - -Syntax: - -``` -select_suffix: ["?"] "." identifier -``` - -KCL provides a wealth of ways to identify or filter attributes. - -x.y - -- schema: it denotes the attribute value of a schema `x` identified by `y` -- package: it denotes the identifier of a package `x` identified by `y` - -Examples: - -``` -schema Person: - name: str - age: int - -person = Person { - name = "Alice" - age = 18 -} -name = person.name # "Alice" -age = person.age # 18 -``` - -x?.y - -If the x if None/Undefined or empty(empty list or dict), just return None, otherwise behave as x.y. - -Examples - -``` -noneData = None -data?.name # None - -emptyDict = {} -emptyDict?.name # None - -emptyList = [] -emptyList?[0] # None -``` - -As a supplementary of the `selector` expression in KCL code, the KCL compiler needs to provide corresponding identifying and filtering features through the command line and api form. - -#### Select method - -Syntax: - -``` -select_suffix: "." identifier -``` - -A `identifier` identifies method belongs to the built-in types `string`, `list`, `dict`, and `schema`. - -- A selector expression fails if the value does not have an attribute of the specified name. -- A selector expression that selects a method typically appears within a call expression, as in these examples: - -Examples: - -```python -["able", "baker", "charlie"].index("baker") # 1 -"banana".count("a") # 3 -"banana".reverse() # error: string has no .reverse field or method -Person.instances() # all instances of schema Person -``` - -But when not called immediately, the selector expression evaluates to a bound method, that is, a method coupled to a specific receiver value. A bound method can be called like an ordinary function, without a receiver argument: - -``` -f = "banana".count -f # -f("a") # 3 -f("n") # 2 -``` - -### Index expressions - -An index expression `a[i]` yields the `i` th element of an indexable type such as a string or list. The index `i` must be an `int` value in the range `-n` ≤ `i` < `n`, where `n` is `len(a)`; any other index results in an error. - -Syntax: - -``` -subscript_suffix: "[" [test] "]" -``` - -A valid negative index `i` behaves like the non-negative index `n+i`, allowing for convenient indexing relative to the end of the sequence. - -```python -"abc"[0] # "a" -"abc"[1] # "b" -"abc"[-1] # "c" - -["zero", "one", "two"][0] # "zero" -["zero", "one", "two"][1] # "one" -["zero", "one", "two"][-1] # "two" -``` - -An index expression `d[key]` may also be applied to a dictionary `d`, to obtain the value associated with the specified key. It returns `Undefined` if the dictionary contains no such key. - -An index expression appearing on the left side of an assignment causes the specified list or dictionary element to be updated: - -```python -a = range(3) # a == [0, 1, 2] -a[2] = 7 # a == [0, 1, 7] - -coins["suzie b"] = 100 -``` - -It is a dynamic error to attempt to update an element of an immutable type, such as a list or string, or a frozen value of a mutable type. - -### Slice expressions - -A slice expression `a[start:stop:stride]` yields a new value containing a sub-sequence of `a`, which must be a string, or list. - -``` -subscript_suffix: "[" [test] [":" [test] [":" [test]]] "]" -``` - -Each of the `start`, `stop`, and `stride` operands is optional; if present, and not `None`, each must be an integer. -The `stride` value defaults to 1. If the stride is not specified, the colon preceding it may be omitted too. It is an error to specify a stride of zero. - -Conceptually, these operands specify a sequence of values `i` starting at start and successively adding `stride` until `i` reaches or passes `stop`. The result consists of the concatenation of values of `a[i]` for which `i` is valid.` - -The effective start and stop indices are computed from the three operands as follows. Let `n` be the length of the sequence. - -**If the stride is positive**: If the `start` operand was omitted, it defaults to -infinity. If the `end` operand was omitted, it defaults to +infinity. For either operand, if a negative value was supplied, `n` is added to it. The `start` and `end` values are then "clamped" to the nearest value in the range 0 to `n`, inclusive. - -**If the stride is negative**: If the `start` operand was omitted, it defaults to +infinity. If the `end` operand was omitted, it defaults to -infinity. For either operand, if a negative value was supplied, `n` is added to it. The `start` and `end` values are then "clamped" to the nearest value in the range -1 to `n`-1, inclusive. - -```python -"abc"[1:] # "bc" (remove first element) -"abc"[:-1] # "ab" (remove last element) -"abc"[1:-1] # "b" (remove first and last element) -"banana"[1::2] # "aaa" (select alternate elements starting at index 1) -"banana"[4::-2] # "nnb" (select alternate elements in reverse, starting at index 4) -``` - -It's not allowed to define a slice expression as a left value in KCL. -Cause list and string are immutable, re-slicing can directly operate to operand to ensure better performance. diff --git a/docs/reference/lang/lang/spec/index.md b/docs/reference/lang/lang/spec/index.md deleted file mode 100644 index 88bbaa59..00000000 --- a/docs/reference/lang/lang/spec/index.md +++ /dev/null @@ -1 +0,0 @@ -# KCL Spec diff --git a/docs/reference/lang/lang/spec/kcl-spec.md b/docs/reference/lang/lang/spec/kcl-spec.md deleted file mode 100644 index 604c75be..00000000 --- a/docs/reference/lang/lang/spec/kcl-spec.md +++ /dev/null @@ -1,300 +0,0 @@ ---- -title: "KCL Spec" -linkTitle: "KCL Spec" -type: "docs" -weight: 2 -description: KCL Spec ---- -## Lexical rules - -### Keywords and reserved words - -The following are the keywords of the KCL: - -```python - True False None Undefined import - and or in is not - as if else elif for - schema mixin protocol check assert - all any map filter lambda - rule -``` - -The following are reserved words for the KCL: - -```python - pass return validate rule flow - def del raise except try - finally while from with yield - global nonlocal struct class final -``` - -### Line comment - -```python -# a comment -``` - -### Operators - -```python - + - * ** / // % - << >> & | ^ < > - ~ <= >= == != = - += -= *= **= /= //= %= - <<= >>= &= ^= -``` - -### Delimiters - -```python - ( ) [ ] { } - , : . ; @ -``` - -### Operator precedence - -The following list of operators is ordered from **highest to lowest**: - -| Operator | Description | -| ---------------------------------------------------------------- | -------------------------------------------------------- | -| `**` | Exponentiation (highest priority) | -| `+x` `-x` `~x` | Positive, negative, bitwise NOT | -| `*` `/` `%` `//` | Multiplication, division, floor division and remainder | -| `+` `-` | Addition and subtraction | -| `<<` `>>` | Left and right shifts | -| `&` | Bitwise AND | -| `^` | Bitwise XOR | -| `|` | Bitwise OR | -| `in`, `not in`, `is`, `is not`, `<`, `<=`, `>`, `>=`, `!=`, `==` | Comparisons, including membership and identity operators | -| `not` | Boolean NOT | -| `and` | Boolean AND | -| `or` | Boolean OR | -| `if – else` | Conditional expression = | -| `=`, `+=`, `-=`, `*=`, `/=`, `%=`, `&=`, `|=`, `^=`, `**=`, `//=`, `<<=`, `>>=` | Assign | - -## Grammar - -KCL uses Python's [LarkParser](https://lark-parser.readthedocs.io/en/latest/) tool to describe the grammar, and the specification rules are as follows: - -```bnf -// Copyright 2021 The KCL Authors. All rights reserved. - -//////////// KCL grammar //////////// -start: (NEWLINE | preamble_statement)* - -//////////// statement //////////// -preamble_statement: preamble_small_stmt | preamble_compound_stmt -preamble_small_stmt: (small_stmt | import_stmt) NEWLINE -preamble_compound_stmt: compound_stmt | schema_stmt -statement: small_stmt NEWLINE | compound_stmt -compound_stmt: if_stmt -small_stmt: assign_stmt | expr_stmt | assert_stmt - -//////////// import_stmt //////////// -import_stmt: IMPORT dot_name (AS NAME)? -dot_name: [leading_dots] identifier (DOT identifier)* -leading_dots: DOT+ - -/////////// assert_stmt //////////// -assert_stmt: ASSERT test (COMMA test)? - -//////////// if_stmt //////////// -if_stmt: IF test COLON suite (ELIF test COLON suite)* (ELSE COLON suite)? -suite: small_stmt NEWLINE | NEWLINE _INDENT statement+ _DEDENT - -//////////// assign_stmt //////////// -assign_stmt: primary_expr (ASSIGN primary_expr)* ASSIGN test - | primary_expr augassign test -augassign: COMP_PLUS | COMP_MINUS | COMP_MULTIPLY | COMP_DOUBLE_STAR | COMP_DIVIDE - | COMP_DOUBLE_DIVIDE | COMP_MOD | COMP_AND | COMP_OR | COMP_XOR | COMP_SHIFT_LEFT - | COMP_SHIFT_RIGHT - -//////////// schema_stmt //////////// -schema_stmt: [decorators] SCHEMA [RELAXED] identifier [LEFT_BRACKETS [arguments] RIGHT_BRACKETS] [LEFT_PARENTHESES operand_name RIGHT_PARENTHESES] COLON NEWLINE [schema_body] -schema_body: _INDENT (string NEWLINE)* [mixin_stmt] (schema_attribute_stmt | statement)* [check_block] _DEDENT -schema_attribute_stmt: [decorators] (FINAL)? identifier COLON type [(ASSIGN | augassign) test] NEWLINE - -/////////// decorators //////////// -decorators: (AT primary_expr NEWLINE)+ - -//////////// type //////////// -type: type_element (OR type_element)* -type_element: schema_type | basic_type | compound_type -schema_type: operand_name -basic_type: STRING_TYPE | INT_TYPE | FLOAT_TYPE | BOOL_TYPE -compound_type: list_type | dict_type -list_type: LEFT_BRACKETS (type)? RIGHT_BRACKETS -dict_type: LEFT_BRACE (type)? COLON (type)? RIGHT_BRACE - -//////////// check_block //////////// -check_block: CHECK COLON NEWLINE _INDENT check_expr+ _DEDENT -check_expr: check_test [COMMA primary_expr] NEWLINE -check_test: or_test [IF or_test] - -//////////// mixin_stmt //////////// -mixin_stmt: MIXIN LEFT_BRACKETS [mixins | multiline_mixins] RIGHT_BRACKETS NEWLINE -multiline_mixins: NEWLINE _INDENT mixins NEWLINE _DEDENT -mixins: operand_name (COMMA (NEWLINE mixins | operand_name))* - -//////////// expression_stmt //////////// -expr_stmt: expression -expression: test (COMMA test)* -test: if_expr | primary_expr | unary_expr | binary_expr -if_expr: test IF test ELSE test -unary_expr: (PLUS | MINUS | NOT) primary_expr | L_NOT test -binary_expr: test bin_op test -bin_op: L_OR - | L_AND - | EQUAL_TO | NOT_EQUAL_TO | LESS_THAN | GREATER_THAN | LESS_THAN_OR_EQUAL_TO | GREATER_THAN_OR_EQUAL_TO - | IN | L_NOT IN | IS | IS L_NOT - | OR - | XOR - | AND - | MINUS | PLUS - | MULTIPLY | MOD | DIVIDE | DOUBLE_DIVIDE - | SHIFT_LEFT | SHIFT_RIGHT - -primary_expr: operand | primary_expr select_suffix | primary_expr call_suffix | primary_expr subscript_suffix | primary_expr schema_expr -operand: operand_name | number | string - | TRUE | FALSE | NONE | list_expr | list_comp | dict_expr - | dict_comp | LEFT_PARENTHESES test RIGHT_PARENTHESES -operand_name: identifier | qualified_identifier - -select_suffix: DOT (identifier | dict_identifier_selector | list_identifier_selector) -dict_identifier_selector: MULTIPLY | LEFT_BRACE identifier (COMMA identifier)* RIGHT_BRACE -list_identifier_selector: subscript_suffix - -//////////// call_suffix //////////// -call_suffix: LEFT_PARENTHESES [arguments [COMMA]] RIGHT_PARENTHESES - -//////////// subscript_suffix //////////// -subscript_suffix: LEFT_BRACKETS (test | [test] COLON [test] [COLON [test]]) RIGHT_BRACKETS - -//////////// arguments //////////// -arguments: argument (COMMA argument)* -argument: test | NAME ASSIGN test | MULTIPLY test | DOUBLE_STAR test - - -//////////// operand //////////// -identifier: NAME -qualified_identifier: identifier DOT identifier - -//////////// list_expr //////////// -list_expr: LEFT_BRACKETS [list_items | NEWLINE _INDENT list_items _DEDENT] RIGHT_BRACKETS -list_items: list_item ((COMMA [NEWLINE] | NEWLINE) list_item)* [COMMA] [NEWLINE] -list_item: test | star_expr -list_comp: LEFT_BRACKETS (list_item comp_clause+ | NEWLINE _INDENT list_item comp_clause+ _DEDENT) RIGHT_BRACKETS -//////////// dict_expr //////////// -dict_expr: LEFT_BRACE [entries | NEWLINE _INDENT entries _DEDENT] RIGHT_BRACE -dict_comp: LEFT_BRACE (entry comp_clause+ | NEWLINE _INDENT entry comp_clause+ _DEDENT) RIGHT_BRACE -entries: entry ((COMMA [NEWLINE] | NEWLINE) entry)* [COMMA] [NEWLINE] -entry: test COLON test | double_star_expr -comp_clause: FOR loop_variables [COMMA] IN or_test [NEWLINE] [IF test [NEWLINE]] - -star_expr: MULTIPLY primary_expr -double_star_expr: DOUBLE_STAR primary_expr -loop_variables: primary_expr (COMMA primary_expr)* - -//////////// schema_expr //////////// -schema_expr: (LEFT_PARENTHESES [arguments] RIGHT_PARENTHESES)? dict_expr - -//////////// misc //////////// -number: DEC_NUMBER | HEX_NUMBER | BIN_NUMBER | OCT_NUMBER | FLOAT_NUMBER | IMAG_NUMBER -string: STRING | LONG_STRING - -// Tokens - -ASSIGN: "=" -COLON: ":" -SEMI_COLON: ";" -COMMA: "," -LEFT_PARENTHESES: "(" -RIGHT_PARENTHESES: ")" -LEFT_BRACKETS: "[" -RIGHT_BRACKETS: "]" -LEFT_BRACE: "{" -RIGHT_BRACE: "}" -PLUS: "+" -MINUS: "-" -MULTIPLY: "*" -DIVIDE: "/" -MOD: "%" -DOT: "." -AND: "&" -OR: "|" -XOR: "^" -NOT: "~" -LESS_THAN: "<" -GREATER_THAN: ">" -EQUAL_TO: "==" -NOT_EQUAL_TO: "!=" -GREATER_THAN_OR_EQUAL_TO: ">=" -LESS_THAN_OR_EQUAL_TO: "<=" -DOUBLE_STAR: "**" -DOUBLE_DIVIDE: "//" -SHIFT_LEFT: "<<" -SHIFT_RIGHT: ">>" -AT: "@" - -COMP_PLUS: "+=" -COMP_MINUS: "-=" -COMP_MULTIPLY: "*=" -COMP_DIVIDE: "/=" -COMP_MOD: "%=" -COMP_AND: "&=" -COMP_OR: "|=" -COMP_XOR: "^=" -COMP_DOUBLE_STAR: "**=" -COMP_DOUBLE_DIVIDE: "//=" -COMP_SHIFT_LEFT: "<<=" -COMP_SHIFT_RIGHT: ">>=" - -// Special tokens -IMPORT: "import" -AS: "as" -SCHEMA: "schema" -MIXIN: "mixin" -RELAXED: "relaxed" -CHECK: "check" -FOR: "for" -ASSERT: "assert" -IF: "if" -ELIF: "elif" -ELSE: "else" -L_OR: "or" -L_AND: "and" -L_NOT: "not" -IN: "in" -IS: "is" -FINAL: "final" -LAMBDA: "lambda" - -STRING_TYPE: "str" -INT_TYPE: "int" -FLOAT_TYPE: "float" -BOOL_TYPE: "bool" - -// Constant tokens -TRUE: "True" -FALSE: "False" -NONE: "None" - -NAME: /[a-zA-Z_]\w*/ -COMMENT: /#[^\n]*/ -NEWLINE: ( /\r?\n[\t ]*/ | COMMENT )+ - -STRING: /[ubf]?r?("(?!"").*?(? **note** -> -> Any character except the ASCII space, tab (`\t`) and formfeed (`\f`) is considered a none-space character. - -- A line ending in a backslash cannot carry a comment (, which will be introduced shortly afterwards). -- A backslash does not continue a comment. -- A backslash does not continue a token except for string literals (i.e., tokens other than string literals cannot be split across physical lines using a backslash). -- A backslash is illegal elsewhere on a line outside a string literal. - -### Implicit Line Joining - -Expressions in parentheses, square brackets or curly braces can be split over more than one physical line without using backslashes. - -- Implicitly continued lines can carry comments. -- The indentation of the continuation lines is not important. -- Blank continuation lines are allowed. -- There is no `NEWLINE` token between implicit continuation lines. -- Implicitly continued lines can also occur within triple-quoted strings (see below); in that case they cannot carry comments. - -### Blank Lines - -### Indentation - -### Comments - -Starting with a `#` character that is not part of a string literal is a comment. A comment ends at the end of the physical line. - -A comment signifies the end of the logical line unless the implicit line joining rules are invoked. - -Comments are ignored by the syntax. - -### Identifiers and Keywords - -Identifiers (also referred to as names) are described by the following lexical definitions. - -Within the ASCII range (from `U+0001` to `U+007F`), the valid characters for identifiers are the uppercase and lowercase letters `A` through `Z`, the underscore `_` and, except for the first character, the digits `0` through `9`. - -Identifiers are unlimited in length. The case is significant. - -### Keywords - -The following identifiers are used as reserved words, or keywords of the language, and cannot be used as ordinary identifiers. They must be spelled exactly as written here: - -``` -True False None Undefined import -and or in is not -as if else elif for -schema mixin protocol check assert -all any map filter final -lambda rule -``` - -The following tokens are not used, but they are reserved as possible future keywords: - -``` -pass return validate rule flow -def del raise except try -finally while from with yield -global nonlocal struct class -``` - -### Literals - -Literals are notations for constant values of some built-in types. - -### String Literals - -String literals are described by the following lexical definitions: - -``` -stringliteral ::= [stringprefix](shortstring | longstring) -stringprefix ::= "r" | "u" | "R" | "U" | "f" | "F" - | "fr" | "Fr" | "fR" | "FR" | "rf" | "rF" | "Rf" | "RF" -shortstring ::= "'" shortstringitem* "'" | '"' shortstringitem* '"' -longstring ::= "'''" longstringitem* "'''" | '"""' longstringitem* '"""' -shortstringitem ::= shortstringchar | stringescapeseq -longstringitem ::= longstringchar | stringescapeseq -shortstringchar ::= -longstringchar ::= -stringescapeseq ::= "\" -``` - -Multiple adjacent string or bytes literals (delimited by whitespace),possibly using different quoting conventions, are allowed, and their meaning is the same as their concatenation. - -### Numeric Literals - -There are two types of numeric literals: integers and floating-point numbers. - -Integer literals are described by the following lexical definitions: - -``` -integer ::= decinteger | bininteger | octinteger | hexinteger -decinteger ::= nonzerodigit (["_"] digit)* | "0"+ (["_"] "0")* -bininteger ::= "0" ("b" | "B") (["_"] bindigit)+ -octinteger ::= "0" ("o" | "O") (["_"] octdigit)+ -hexinteger ::= "0" ("x" | "X") (["_"] hexdigit)+ -nonzerodigit ::= "1"..."9" -digit ::= "0"..."9" -bindigit ::= "0" | "1" -octdigit ::= "0"..."7" -hexdigit ::= digit | "a"..."f" | "A"..."F" -``` - -Floating-point literals are described by the following lexical definitions: - -``` -floatnumber ::= pointfloat | exponentfloat -pointfloat ::= [digitpart] fraction | digitpart "." -exponentfloat ::= (digitpart | pointfloat) exponent -digitpart ::= digit (["_"] digit)* -fraction ::= "." digitpart -exponent ::= ("e" | "E") ["+" | "-"] digitpart -``` - -## Operators and Delimiters - -### Operators - -The following tokens are operators: - -``` -+ - * ** / // % -<< >> & | ^ < > -~ <= >= == != @ -``` - -### Delimiters - -The following tokens serve as delimiters in the grammar: - -``` -( ) [ ] { } -, : . ; = += --= *= **= /= //= %= -<<= >>= &= |= ^= -``` - -The period can also occur in floating-point literals. - -The following printing ASCII characters have special meaning as part of other tokens or are otherwise significant to the lexical analyzer: - -``` -' " # \ -``` - -The following printing ASCII characters are not used in KCL. Their occurrence outside string literals and comments is an unconditional error: - -``` -? ` -``` - -## Reference - -Since the lexical conventions of KCL is very similar to that of Python, we use the following document as the reference when writing this chapter. - -- [https://docs.python.org/3/reference/lexical_analysis.html](https://docs.python.org/3/reference/lexical_analysis.html) diff --git a/docs/reference/lang/lang/spec/modules.md b/docs/reference/lang/lang/spec/modules.md deleted file mode 100644 index 4c9bcbdf..00000000 --- a/docs/reference/lang/lang/spec/modules.md +++ /dev/null @@ -1,618 +0,0 @@ ---- -title: "Modules" -linkTitle: "Modules" -type: "docs" -weight: 2 -description: Modules ---- -## Modules and the Import System - -KCL code is organized in **modules**. For code in one module to access the code defined in another module, a process called **importing** must be used. - -Importing is undertaken at compile-time in KCL. The advantage is to have static checking enabled. - -A regular KCL module is a file on the file system. It is required to have a `.k` suffix. - -## Packages - -To help manage modules and provide a naming hierarchy, KCL has the concept of packages. In KCL, a package maps to exactly a file system directory, and a regular module maps to a file. - -Files directly under a package are considered parts of the package, instead of individual regular modules. - -Packages can have sub-packages. - -Packages are special modules: - -- All packages in KCL are modules. -- A single-file module can never be a package. - -All modules have a name. - -Sub package names are separated from their parent package name by dots. - -To summary, a regular KCL module is a `.k` file, and a package is a directory on the file system. All `.k` files directly under the directory are included in the package, other files are ignored. If the directory has subdirectories, they become sub-packages as long as there are `.k` files underneath. - -### Intra-Package Name Space Sharing - -Inside a package, all `.k` files are considered parts of the package, instead of regular modules. Code in these files share a single name space and can access names defined in other files, without explicitly granted. - -### Package Initialization - -A package can have the initialization code. The code must exist in only one of the `.k` files under this package. The interpreter guarantees that the initialization code is executed after all definitions. - -## Searching - -The searching begins when an `import` statement is used to import a module. - -### Module Cache - -In KCL, only standard system modules are cached. When a cached module is imported, the cached version is used. In other words, KCL runtime would not create another copy of the standard system module in memory. - -However, other modules are uncached. Importing a module multiple time would create multiple instances of the module. - -### Module Names - -An `import` statement specifies the name of the module to import. The syntax is: - -``` -import -``` - -The rule to search with the module name is very simple: - -- **Step 1**: Searches the module name from the **standard system modules**, then **plugins modules**. - - See **standard system modules** and **plugins modules** for more details. If matched, the module is imported. Otherwise, continue to **Step 2**. -- **Step 2**. Whether a module name starts with a `.` is checked. If yes, the name is a so-called relative pathname, and we go to **Step 5**. Otherwise, continue to **Step 3**. -- **Step 3**: If the module name does not start with any `.`, then the compiler searches the nearest `root path` directory from this directory to the parent, and find the module according to the name just from the `root path`. If no `root path` is found, find the module according to the name from the folder the `.k` file including this `import` statement exists. - - **root path**: the directory contains a `kcl.mod` file. If matched, the module is imported. Otherwise, continue to **Step 4**. -- **Step 4**: Then the compiler checks if the name is the name of any library module that requires explicit loading. If matched, the library module is imported. Otherwise, continue to **Step 6**. -- **Step 5**. For relative importing, find the module according to the name from the folder the `.k` file including this `import` statement exists. Interpret leading dots using the following rule: -- One dot: Ignore. -- Tow or more dots: Suppose there are `n` leading dots, then the searching starts at `n - 1` levels above this folder. If matched, the module is imported. Otherwise, continue to **Step 6**. -- **Step 6**. Module not found, report an error. - -Do case-sensitive search when the operating system allows. If case-sensitive search is not allowed, search directories before regular files. - -In KCL, the `from <> import <>` is unsupported, and relative import is performed with the `import <>` syntax. - -### Uniqueness of Module - -Each module has a unique location path in its scope, so that a module or package could be located with a unique location path, such as `a.b.c`. - -Searching by location path should be supported by the kcl compiler, which needs to provide corresponding searching features through the command line and api form. - -## Standard System Modules - -KCL supports a few standard system modules. The following is the full list of these standard system modules: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ModuleMember
datetimetoday
now
ticks
date
mathceil
exp
expm1
factorial
floor
gcd
isfinite
isinf
isnan
log
log1p
log2
log10
modf
pow
sqrt
regexreplace
match
compile
findall
search
split
unitsn
u
m
k
K
M
G
T
P
Ki
Mi
Gi
Ti
Pi
to_n
to_u
to_m
to_K
to_M
to_G
to_T
to_P
to_Ki
to_Mi
to_Gi
to_Ti
to_Pi
jsonencode
decode
dump_to_file
yamlencode
decode
dump_to_file
netsplit_host_port
join_host_port
fqdn
parse_IP
to_IP4
to_IP16
IP_string
is_IPv4
is_IP
is_loopback_IP
is_multicast_IP
is_interface_local_multicast_IP
is_link_local_multicast_IP
is_link_local_unicast_IP
is_global_unicast_IP
is_unspecified_IP
base64encode
decode
cryptomd5
sha1
sha224
sha256
sha384
sha512
- -- datetime - - ticks() -> float - Return the current time in seconds since the Epoch. Fractions of a second may be present if the system clock provides them. - - date() -> str - return the `%Y-%m-%d %H:%M:%S` format date. - - now() -> str - return the local time. e.g. `'Sat Jun 06 16:26:11 1998'` - - today() -> str - return the `%Y-%m-%d %H:%M:%S.%{ticks}` format date. -- math - - ceil(x) -> int - Return the ceiling of x as an Integral. This is the smallest integer >= x. - - factorial(x) -> int - Return x!. Raise a error if x is negative or non-integral. - - floor(x) -> int - Return the floor of x as an Integral. This is the largest integer <= x. - - gcd(a: int, b: int) -> int - Return the greatest common divisor of x and y - - isfinite(x) -> bool - Return True if x is neither an infinity nor a NaN, and False otherwise. - - isinf(x) -> bool - Return True if x is a positive or negative infinity, and False otherwise. - - isnan(x) -> bool - Return True if x is a NaN (not a number), and False otherwise. - - modf(x) -> Listfloat, float] - Return the fractional and integer parts of x. Both results carry the sign of x and are floats. - - exp(x) -> float - Return e raised to the power of x. - - expm1(x) -> float - Return exp(x)-1. This function avoids the loss of precision involved in the direct evaluation of exp(x)-1 for small x. - - log(x) -> float - Return the logarithm of x to the base e. - - log1p(x) -> float - Return the natural logarithm of 1+x (base e). The result is computed in a way which is accurate for x near zero. - - log2(x) -> float - Return the base 2 logarithm of x. - - log10(x) -> float - Return the base 10 logarithm of x. - - pow(x, y) -> float - Return x**y (x to the power of y). - - sqrt(x) -> float - Return the square root of x. -- regex - - replace(string: str, pattern: str, replace: str, count=0) -> str - Return the string obtained by replacing the leftmost non-overlapping occurrences of the pattern in string by the replacement. - - match(string: str, pattern: str) -> bool - Try to apply the pattern at the start of the string, returning a bool value True if any match was found, or False if no match was found. - - compile(pattern: str) -> bool - Compile a regular expression pattern, returning a bool value denoting whether the pattern is valid. - - findall(string: str, pattern: str) -> List[str] - Return a list of all non-overlapping matches in the string. - - search(string: str, pattern: str) -> bool - Scan through string looking for a match to the pattern, returning a bool value True if any match was found, or False if no match was found. - - split(string: str, pattern: str, maxsplit=0) -> List[str] - Scan through string looking for a match to the pattern, returning a Match object, or None if no match was found. -- units - - Unit constants - - Fixed point: `n`, `u`, `m`, `k`, `K`, `G`, `T` and `P`. - - Power of 2: `Ki`, `Mi`, `Gi`, `Ti` and `Pi`. - - Functions - - to_n(num: int) -> str - Int literal to string with `n` suffix - - to_u(num: int) -> str - Int literal to string with `u` suffix - - to_m(num: int) -> str - Int literal to string with `m` suffix - - to_K(num: int) -> str - Int literal to string with `K` suffix - - to_M(num: int) -> str - Int literal to string with `M` suffix - - to_G(num: int) -> str - Int literal to string with `G` suffix - - to_T(num: int) -> str - Int literal to string with `T` suffix - - to_P(num: int) -> str - Int literal to string with `P` suffix - - to_Ki(num: int) -> str - Int literal to string with `Ki` suffix - - to_Mi(num: int) -> str - Int literal to string with `Mi` suffix - - to_Gi(num: int) -> str - Int literal to string with `Gi` suffix - - to_Ti(num: int) -> str - Int literal to string with `Ti` suffix - - to_Pi(num: int) -> str - Int literal to string with `Pi` suffix -- json - - encode(data: any, sort_keys: bool = False, indent: int = None, ignore_private: bool = False, ignore_none: bool = False) -> str - Serialize a KCL object `data` to a JSON formatted str. - - decode(value: str) -> any - Deserialize `value` (a string instance containing a JSON document) to a KCL object. - - dump_to_file(data: any, filename: str, ignore_private: bool = False, ignore_none: bool = False) -> None - Serialize a KCL object `data` to a JSON formatted str and write it into the file `filename`. -- yaml - - encode(data: any, sort_keys: bool = False, ignore_private: bool = False, ignore_none: bool = False) -> str - Serialize a KCL object `data` to a YAML formatted str. - - decode(value: str) -> any - Deserialize `value` (a string instance containing a YAML document) to a KCL object. - - dump_to_file(data: any, filename: str, ignore_private: bool = False, ignore_none: bool = False) -> None - Serialize a KCL object `data` to a YAML formatted str and write it into the file `filename`. -- net - - split_host_port(ip_end_point: str) -> List[str] - Split the 'host' and 'port' from the ip end point. - - join_host_port(host, port) -> str - Merge the 'host' and 'port'. - - fqdn(name: str = '') -> str - Return Fully Qualified Domain Name (FQDN). - - parse_IP(ip) -> str - Parse 'ip' to a real IP address - - to_IP4(ip) -> str - Get the IP4 form of 'ip'. - - to_IP16(ip) -> int - Get the IP16 form of 'ip'. - - IP_string(ip: str | int) -> str - Get the IP string. - - is_IPv4(ip: str) -> bool - Whether 'ip' is a IPv4 one. - - is_IP(ip: str) -> bool - Whether ip is a valid ip address. - - is_loopback_IP(ip: str) -> bool - Whether 'ip' is a loopback one. - - is_multicast_IP(ip: str) -> bool - Whether 'ip' is a multicast one. - - is_interface_local_multicast_IP(ip: str) -> bool - Whether 'ip' is a interface, local and multicast one. - - is_link_local_multicast_IP(ip: str) -> bool - Whether 'ip' is a link local and multicast one. - - is_link_local_unicast_IP(ip: str) -> bool - Whether 'ip' is a link local and unicast one. - - is_global_unicast_IP(ip: str) -> bool - Whether 'ip' is a global and unicast one. - - is_unspecified_IP(ip: str) -> bool - Whether 'ip' is a unspecified one. -- base64 - - encode(value: str, encoding: str = "utf-8") -> str - Encode the string `value` using the codec registered for encoding. - - decode(value: str, encoding: str = "utf-8") -> str - Decode the string `value` using the codec registered for encoding. -- crypto - - md5(value: str, encoding: str = "utf-8") -> str - Encrypt the string `value` using `MD5` and the codec registered for encoding. - - sha1(value: str, encoding: str = "utf-8") -> str - Encrypt the string `value` using `SHA1` and the codec registered for encoding. - - sha224(value: str, encoding: str = "utf-8") -> str - Encrypt the string `value` using `SHA224` and the codec registered for encoding. - - sha256(value: str, encoding: str = "utf-8") -> str - Encrypt the string `value` using `SHA256` and the codec registered for encoding. - - sha384(value: str, encoding: str = "utf-8") -> str - Encrypt the string `value` using `SHA384` and the codec registered for encoding. - - sha512(value: str, encoding: str = "utf-8") -> str - Encrypt the string `value` using `SHA512` and the codec registered for encoding. - -### The Built-in System Module - -KCL provides a list of built-in system modules, which are loaded automatically and can be directly used without providing any module name. For example, `print` is a widely used built-in module. - -The following is the full list of these built-in system modules: - -- print() - - The print function. -- multiplyof(a, b) - - Check if the modular result of a and b is 0 -- isunique(inval) - - Check if a list has duplicated elements -- len(inval) - Return the length of a value -- abs(x) - Return the absolute value of the argument. -- all(iterable) - Return True if bool(x) is True for all values x in the iterable. If the iterable is empty, return True. -- any(iterable) - Return True if bool(x) is True for any x in the iterable. If the iterable is empty, return False. -- bin(number) - Return the binary representation of an integer. -- hex(number) - Return the hexadecimal representation of an integer. -- oct(number) - Return the octal representation of an integer. -- ord(c) -> int - Return the Unicode code point for a one-character string. -- sorted(iterable) - Return a new list containing all items from the iterable in ascending order. A custom key function can be supplied to customize the sort order, and the reverse flag can be set to request the result in descending order. -- range(start, end, step=1) - Return the range of a value with start, end and step parameter. -- min(iterable) - With a single iterable argument, return its smallest item. The default keyword-only argument specifies an object to return if the provided iterable is empty. With two or more arguments, return the smallest argument. -- max(iterable) - With a single iterable argument, return its biggest item. The default keyword-only argument specifies an object to return if the provided iterable is empty. With two or more arguments, return the largest argument. -- sum(iterable, start) - Return the sum of a 'start' value (default: 0) plus an iterable of numbers. When the iterable is empty, return the start value. This function is intended specifically for use with numeric values and may reject non-numeric types. -- pow(x, y, z) - Equivalent to `x**y` (with two arguments) or `x**y % z` (with three arguments). Some types, such as ints, are able to use a more efficient algorithm when invoked using the three argument form. -- round(number, ndigits) - Round a number to a given precision in decimal digits. The return value is an integer if ndigits is omitted or None. Otherwise the return value has the same type as the number. ndigits may be negative. -- typeof(x: any, *, full_name: bool = False) -> str - Return the type of the value 'x' at runtime. When the 'full_name' is 'True', return the full package type name such as `pkg.schema`. - -### Plugin Modules - -KCL compiler needs to provide the ability to dynamically expand and load plugin modules without modifying the compiler itself. KCL compiler needs to support flexible pluggable module extension mechanism, so that KCL users can use more abundant built-in function capabilities to simplify writing. - -KCL compiler needs to ensure the stability and safety of the expansion mechanism, without affecting the core of the compiler. - -Searching extended plugin module is performed after the standard system module. The standard system module has a higher priority in naming. If it exists a standard or built-in system module with the same name, the extended plugin module will be ignored. - -Importing and using the extended plugin module should be consistent with the standard or built-in system module. - -### Replacing Standard System Modules - -Replacing standard system modules is not allowed. - -## Examples - -We show more module features through an example. - -Suppose we have the following directories and files: - -``` - . - ├── mod1.k - ├── mod2.k - ├── pkg1 - │   ├── def1.k - │   ├── def2.k - │   └── def3init.k - └── pkg2 - ├── file2.k - └── subpkg3 - └── file3.k -``` - -From the structure we can see that `pkg1` and `pkg2` are two packages, `subpkg3` is a subpackage of `pkg2`, and `mod1.k` and `mod2.k` are regular modules. - -### Importing a Standard System Module - -The following statement can import the standard system module `math` - -```python -import math -``` - -This is the only way to import a standard system module. After importing a standard system module, functions, variables and schemas defined in it can be used. For example, the following statement uses the `log10` function -defined in `math` - -```python -a = math.log10(100) # a is 2 after computation. -``` - -### Importing a Regular Module - -In `mod1.k`, we can import `mod2` using one of the following syntaxes. - -```python -import mod2 -``` - -```python -import .mod2 -``` - -The difference is that in the first syntax, the KCL compiler will first try to check if `mod2` matches any of the standard system modules' name. Since it does not match any standard system module's name, the statement will check the directory where `mod1.k` resists in, like what the second statement does. - -Suppose in `mod2.k` there is a definition of a variable:: - -```python -a = 100 -``` - -After importing `mod2`, we can access `a` in `mod1.k` using the following syntax - -```python -b = mod2.a -``` - -### Importing a Package - -In `mod1.k`, we can import `pkg1` using one of the following syntaxes. - -```python -import pkg1 -``` - -```python -import .pkg1 -``` - -The difference is that in the first syntax, the KCL compiler will first try to check if `pkg1` matches any of the standard system modules' name. Since it does not match any standard system module's name, the statement will check the directory where `mod1.k` resists in, like what the second statement does. - -We can use similar statements to import `pkg2`. Note that importing `pkg2` will not import `subpkg3`. - -The name of the package is the name of the imported module. - -Suppose in `file2.k` that is inside `pkg2` there is a definition to variable `foo` - -```python -foo = 100 -``` - -This variable can be used in `mod1.k` after importing `pkg2` like the following - -```python -bar = pkg2.foo -``` - -### Importing a Subpackage - -To import `subpkg3` from `mod1.k`, one of the following statements can be used. - -```python -import pkg2.subpkg3 -``` - -```python -import .pkg2.subpkg3 -``` - -The behaviors of these statements are identical. - -The name of the subpackage is the name of the imported module. - -Suppose in `file3.k` that is inside `subpkg3` there is a definition to variable `foo` - -```python -foo = 100 -``` - -This variable can be used in `mod1.k` after importing `subpkg3` like the following - -```python -bar = subpkg3.foo -``` - -### Relative Importing - -Relative importing is useful when there is code trying to import modules that does not exist recursively inside the current directory. - -For example, the following statements, if written in `file3.k`, can be used to import `pkg2`, `pkg1` and `mod2` respectively. - -```python -import ...pkg2 # Go two levels up then import pkg2 -import ...pkg1 # Go two levels up then import pkg1 -import ...mod2 # Go two levels up then import mod2 -``` - -### Importing from a Root Path - -Suppose we have a `kcl.mod` file in the directory to mark it as a root path, then we have the following files: - -``` - . - |── kcl.mod - ├── mod1.k - ├── mod2.k - ├── pkg1 - │   ├── def1.k - │   ├── def2.k - │   └── def3init.k - └── pkg2 - ├── file2.k - └── subpkg3 - └── file3.k -``` - -In `pkg1` `def1.k`, we can import `pkg2.subpkg3` `file3` using the following syntaxes. - -```python -import pkg2.subpkg3.file3 -``` - -Importing from the root path is very convenient when the code is trying to import modules from a directory needs to look up multiple directories above this directory. At also, it is helpful to organize a large number of files in a root directory. - -### Importing a Module Inside a Package - -Note that `subpkg3` is only implemented with one file `file3.k`. The file can be regarded as a regular module and imported directly. - -In `mod1.k`, the importing statement would be:: - -```python -import pkg2.subpkg3.file3 -``` - -Different from importing `subpkg3`, now the name of the module is `file3`. We can access the variable `foo` defined in this module with the following -statement - -```python -bar = file3.foo -``` - -### Precedence of Importing - -When an import statement specifies a package to import, the virtual machine first looks for a directory named according to the import statement in the file system. - -If such a directory is not found, the virtual machine looks for a single file module. - -For example, when the statement `import a.b.c` appears, the virtual machine first looks for the directory `a/b/c` from the directory of the current file. If `a/b/c` is not found, the virtual machine looks for a file named `a/b/c.k`. If the file is also absent, an error is reported. - -### Package Implemented with Multiple Files - -Package `pkg1` is implemented with multiple KCL files. - -Multiple files can be used to define variables, schemas and functions, and they can access names defined in other files of this package. - -For example, suppose `def1.k` defines a variable `foo`, `def2.k` defines `bar`, and `def3init.k` defines a variable `baz`, when `pkg1` is imported by `mod1.k`, all these variable can be used - -```python -import pkg1 -a = pkg1.foo + pkg1.bar + pkg1.baz -``` - -Inside a module, names defined in a file can be accessed in another file without further importing. For example, suppose `bar` in `def2.k` would invoke `foo` defined in `def1.k`, it can directly use `foo` like the following - -```python -bar = foo + 1 -``` diff --git a/docs/reference/lang/lang/spec/schema.md b/docs/reference/lang/lang/spec/schema.md deleted file mode 100644 index 43a67761..00000000 --- a/docs/reference/lang/lang/spec/schema.md +++ /dev/null @@ -1,915 +0,0 @@ ---- -title: "Schema" -linkTitle: "Schema" -type: "docs" -weight: 2 -description: Schema ---- -## Syntax - -### Schema Definition - -A schema is a language element to define a type of configuration data. - -To define a schema, the syntax is the following: - -``` -schema_stmt: [decorators] "schema" ["relaxed"] identifier ["[" [arguments] "]"] ["(" operand_name ")"] ":" NEWLINE [schema_body] -schema_body: _INDENT (string NEWLINE)* [mixin_stmt] (schema_attribute_stmt | schema_index_signature | statement)* [check_block] _DEDENT -``` - -Attributes could be defined in a schema, the syntax is the following: - -``` -schema_attribute_stmt: [decorators] identifier ["?"] ":" type [(ASSIGN | augassign) test] NEWLINE -``` - -Index signature could be defined in a schema, the syntax is the following: - -``` -schema_index_signature: LEFT_BRACKETS [NAME COLON] [ELLIPSIS] basic_type RIGHT_BRACKETS COLON type [ASSIGN test] NEWLINE -``` - -Once defined, an attribute must have a valid type: - -``` -type: type_element ("|" type_element)* -type_element: schema_type | basic_type | list_type | dict_type -schema_type: operand_name -basic_type: "str" | "int" | "float" | "bool" -list_type: "[" (type)? "]" -dict_type: "{" (type)? COLON (type)? "}" -``` - -The followings are some basic examples: - -```python -# A person has a first name, a last name and an age. -schema person: - firstName: str - lastName: str - # fullName is generated by firstName and lastName - fullName: str = firstName + ' ' + lastName - # The default value of age is 0 - age: int = 0 - -# An employee IS a person, and has some additional information. -schema employee(person): - bankCard: int - nationality: str - -# A company has a name and many employees. -schema company: - name: str - employees: [employee] -``` - -More complex schema definitions will be elaborated after other concepts are -introduced. - -#### Optional Attribute - -Each attribute **must** be assigned with a not-None value as a schema instance unless it is modified by a question mark as an optional attribute. - -Examples: - -``` -schema employee(person): - bankCard?: int # bankCard is an optional attribute - nationality?: str # # nationality is an optional attribute -``` - -When there is an inheritance relationship: - -+ If the attribute is optional in the base schema, it could be optional or required in the sub-schema. -+ If the attribute is required in the base schema, it must be required in the sub-schema. - -### Configuration Definition - -A configuration is structured data stored in a dict-like structure. In KCL, we have introduced -the configuration definition syntax as a variant of dict definition syntax. - -``` -schema_expr: operand_name ("(" [arguments] ")")? dict_expr -``` - -As can be seen, apart from having an identifier as schema type, a configuration definition -is just an ordinary dict definition, and each key in the dict matches an attribute in the schema. - -To simplify configuration, schema attribute key is much easier to define as: - -- schema attribute key can be unquoted. When the attribute key has the same name as a variable, it must be quoted as a normal dict to avoid naming conflict. -- schema attribute key can be defined nested through `select expression`, such as `a.b.c`. - -The comma at the end of each line can be omitted. - -For example, we can define a `person` named `John Doe` using the following statement: - -```python -johnDoe = person { - # In the result, 'lastName' appears later than 'firstName', according the schema - lastName = 'Doe' - firstName = 'John' - # If we don't specify the 'age', the default value 0 is used. - # 'age': 20 -} -``` - -The result is a **dict**: - -```python -{ - 'firstName': 'John' - 'lastName': 'Doe' - 'age': 0 -} -``` - -Compared to the ordinary dict definition, a configuration definition has the following features: - -- Each attribute defined in the schema (or one of the schemas) could be configured, and config data has higher priority than the default value. -- When an attribute defined in the schema (or one of the schemas) is not configured in the configuration definition statement, and it has a default value, the default value is used. -- Unless the schema (or one of the schemas) is a **relaxed schema**, no more attributes can be defined. -- The quotation marks of dict key can be omitted. -- The comma at the end of each line can be omitted. -- Cases of **inheritance** will be discussed separately. - -For attributes of list, dict and schema types, the config data is added by **union** instead of reassignment. For instance: - -```python -schema Name: - firstName: str - lastName: str - -schema Person: - name: Name = { - firstNam = "John" - lastName = "default" - } - -JohnDoe = Person { - name.lastName = "Doe" -} -``` - -The result is a **dict**: - -```python -{ - 'firstName': 'John' - 'lastName': 'Doe' -} -``` - -#### Attribute Identify - -Each key identifier in the configuration expr identifies an element or a range of elements in a schema. The key identifier may consist of multiple attribute identifiers, and each attribute may be a basic type value, a list, a dict or schema. For example, the key identifier 'a.b.c' identifies the element 'c' in the 'A' schema: - -``` - -schema C: - c: int - -schema B: - b: C - -schema A: - a: B - -A { - a.b.c: 5 -} -``` - -To make the key identifier usage rules as clear as possible, we define the way of identifying with complex data types as follows. - -##### List - -Suppose we have a list attribute a. - -Identify an element in a: - -``` -a[0] # the first element -a[3] # the 4th element -a[-1] # the last element -a[-2] # the penultimate element -``` - -Identify a range of elements in the list: - -``` -a[2:5] # a slice of the third, 4th, and 5th elements -a[:5] # a slice of the first to 5th elements -``` - -#### Attribute Operator - -Once we identified the element(s), we can declare operation on it. It follows the pattern of `identifier op E`. - -#### Union - -Pattern: `identifier : E` - -The value of the expression `E` will be unioned into the element value. - -Examples: - -``` -a = A { - # union {d:4} into the element b.c, suppose c is a schema with an int type attribute d. - b.c : { - d : 4 - } -} -``` - -See 'union' in `expressions` spec for more details. - -#### Override - -Pattern: `identifier = E` - -The value of the expression `E` will override the element value. - -Examples: - -``` -a = A { - # override {c:4} to the element b, suppose b is a schema with an int type attribute c. - b = { - c: 4 - } -} -``` - -Unlike union, the override operation will reassign the element with a brand new value. -For basic type value, `union` and `override` have equivalent effects. - -Note: - -+ Especially, we can "delete" its content by overriding the element to `Undefined`, such as `{ a = Undefined }`. - -#### Insert - -Pattern: `identifier += E` -Insert only works for list type `identifier`. - -List `E` will be inserted just after the specified index of the list `identifier`, and the following elements after the index will be automatically shifted. - -Examples: - -``` -a = A { - # insert {c:4} to the `index=2` position(just after index=1), suppose b is a list of schema with an int type attribute c. - b[1] += { - c: 4 - } -} -``` - -If no index is specified, the last index will be used. - -The type of 'E' must be compatible with the type of list. See `types` for more details. - -#### Index Signature - -Index signatures can be defined in the KCL schema, and it means that the key-value constraints of the index signature can be used to construct a dict with the schema type, or additional checks can be added to the relaxed schema attributes to enhance the KCL type and semantic checks. - -- Use the form `[{attr_alias}: {key_type}]: {value_type}` to define an index signature in the schema, and `{attr_alias}` can be omitted. - -```python -schema Map: - """ - Map is a relaxed schema with a key of str type and a value of str type - """ - [str]: str # `{attr_alias}` can be omitted. - -data = Map { - key1 = "value1" - key2 = "value2" -} -``` - -- Mandatory all attributes of the schema key and value types - -```python -schema Person: - name: str - age: int # error, conflicts with the index signature definition `[str]: str` - [str]: str # The values of all attributes of the schema can only be strings -``` - -- Mandatory all attribute key and value types are defined in the schema, which is equivalent to restricting all attribute types except the relaxed attributes. - -```python -schema Person: - name: str - age: int - [...str]: str # Except for the `name` and `age` attributes, the key type of all other attributes of the schema must be `str`, and the value type must also be `str`. -``` - -- Define the index signature attribute alias and use it with the check block. - -```python -schema Data: - [dataName: str]: str - check: - dataName in ["Alice", "Bob", "John"] - -data = Data { - Alice = "10" - Bob = "12" - Jonn = "8" # error Jonn not in ["Alice", "Bob", "John"] -} -``` - -```python -import regex - -schema DataMap: - [attr: str]: str - check: - regex.match(attr, r'[-._a-zA-Z0-9]+') - -data = DataMap { - key1 = "value1" - "foo.bar" = "value2" # check error -} -``` - -### Schema Context - -The schema definition space can be regarded as a separate function context. - -Init statement could be defined inside the schema, the syntax is the following: - -``` -statement: small_stmt NEWLINE | if_stmt -``` - -The following is an example: - -```python -schema Person: - firstName: str = "John" - lastName: str - # fullName is generated by firstName and lastName in a separate init statement - fullName: str = firstName + ' ' + lastName - -JohnDoe = Person { - lastName = "Doe" -} -``` - -The result is a **dict**: - -```python -{ - 'firstName': 'John' - 'lastName': 'Doe' - 'fullName': 'John Doe' -} -``` - -If statement, expr statement and assert statement are supported as a schema init -statement. See more in statement spec. - -+ The attributes must be defined first, including inherited ones, and then used in the init statement. -+ Statements in the schema context will be executed sequentially. -+ The value of attributes referenced in the init statement will be evaluated at runtime. - See the **Configuration Definition** section for the assignment rules of non-referenced attributes. For example, `"fullName"` in Person is generated by `"firstName"` and `"lastName"` evaluated at runtime, in which firstName is 'John', and lastName is "Doe". - -The immutability of attributes in the schema context follows the same rules as the immutability of global variables: - -```python -schema Person: - age: int = 1 # Immutable attribute - _name: str = "Alice" # Mutable attribute - - age = 10 # Error - _name = "Bob" # Ok -``` - -#### Arguments - -Schema context can also have arguments. The following is an example. - -```python -schema Person[separator]: - firstName: str = "John" - lastName: str - fullName: str = firstName + separator + lastName - -JohnDoe = Person('_') { - lastName = "Doe" -} -``` - -The example is similar to the previous one, except that the separator character used in -the `"fullName"` member is passed in as an argument. The way to perform a schema generation -when the schema has an initialization function with arguments is demonstrated in the code. - -### Check Block - -Optionally, a check block can be added to a schema definition to allow -additional checking to be performed. - -The syntax is the following: - -``` -check_block: "check" ":" NEWLINE _INDENT check_expr+ _DEDENT -check_expr: test (IF test)? [":" primary_expr] NEWLINE -``` - -In terms of grammatical definition, a check block consists of a list of conditional expressions. The following is an example: - -```python -schema employee(person): - bankCard: int - gender: str - - check: - len(str(bankCard)) == 16 - gender in ['male', 'female'], "The gender {} is unsupported".format(gender) -``` - -The ability of KCL check expressions covers the abilities that can be defined by OpenAPI spec and is aligned with the ability of logical expressions. We consider further aligning the syntax with `CEL` spec. -Whether to support `lambda expressions` is still under discussion. - -Summary: - -- A check block consists of one or more logical **expressions**. -- When defining a configuration, the expressions in the check block are evaluated - in any order. If any of the expression is `False`, an error is reported. -- A custom error message can be provided after an expression. - -### Specifying Types - -Optionally, the type of any member of a schema can be specified. As previous examples have shown. - -A member can be of a basic type, such as a string (`str`), a floating-point number (`float`), a fixed-point number (`int`) or a boolean number (`bool`). - -A member can also be of a dictionary generated from another schema. In such a case, the name of the other schema is used as the type name. - -A member can also be a list or an ordinary dict: - -- A list with unspecified type of elements is `[]`. -- A list with elements of type `t` is `[t]`. Here `t` is another type. -- A dict with keys of type `kt` and values of type `vt` is `{kt:vt}`. -- `kt`, `vt` or both of them can be missing, like a list with unspecified type of elements. - -The followings are some more examples: - -- A list of lists of strings: `[[str]]`. -- A dict of keys with the type string and unspecified value types: `{str:}`. - -A member can be a **union type** defined by `|`, such as `a | b`, which means the type of the member could be a or b. - -A union type can include types of `int`, `str`, `float`, `bool`, `list` and `dict` and support type nesting e.g. `{str:str|int}` and `[[int|str]|str|float]`, etc. - -Examples: - -```python -schema x: - p: int | str # p could be defined as a int or string -``` - -### Immutability - -KCL pursues strict immutability of schema attributes. It's generally followed the rules: - -- For the attributes of the basic type, such as string, int and float, it's allowed to be reassigned - through the init statement in **schema context** or by the **configuration definition**. -- For the attributes of list, dict and schema type, it's allowed to be reassigned only by the init statement in **schema context**. The content of it is allowed to be operated in **schema context** or by the **configuration definition**. -- Any other attempt to reassign or modify schema attribute will report an error. - -#### Assign by Value - -When using a schema variable to assign the value to another variable, we can only get a deep copy of its value, not a pointer or reference. That is, modifying the assigned value will not change the assigned schema variable. - -```python -schema Person: - name: str - -person = { - name = "Alice" -} -personCopy = person # 'personCopy' is a deep copy of 'person' and modifying 'personCopy' will not affect 'person' -``` - -### Union Operator - -For list, dict and schema, we can union delta to existing data. For example: - -```python -schema Name: - firstName: str - lastName: str - -schema Person: - name: Name = { - firstName = "John" - } - - # union a schema and a dict - name: Name { - lastName = "Doe" - } - -person = Person {} -``` - -The result is a **dict**: - -```python -{ - 'person': { - 'name': { - 'firstName': 'Jhon', - 'lastName': 'Doe' - } - } -} -``` - -### Other Operators - -Except for `assignment` and `union assignment`, it's not support other operators on schema type data. -Report an error if trying to use other operators on schema type data. - -### Deprecated - -The schema attribute can be marked as deprecated once it's considered invalid. - -```python -schema Person: - @deprecated(version="1.1.0", reason="use fullName instead", strict=True) - name: str - ... # Omitted contents - -person = Person { - # report an error on configing a deprecated attribute - name = "name" -} -``` - -- Deprecated attributes cannot be configured under any circumstances. Report an error or warning once the attribute is assigned. -- Define the expired version of the attribute through **version**, and define the reason for the attribute expired through **reason**. -- When strict is true, the attribute assignment will cause an error, otherwise it will report a warning and ignore the attribute assignment. - -### Composition - -The composition is a common way to define complex structures. KCL provides simplified means for the configuration definition of combined structures. - -Assuming we have the following schemas, which is defined by a combination of multiple schemas. - -```python -schema Name: - firstName: str - lastName: str - -schema Person: - name: Name - age: int - -schema Group: - name: str - persons: [Person] -``` - -To config a group: - -```python -group = Group { - name = "group" - persons = [{ - name = { - firstName = "John" - lastName = "Doe" - } - age = 24 - }] -} -``` - -- Top-level schema name is required to config a schema. -- The schema of the attributes in the schema can be omitted. - -Multi-level nested schemas will make the configuration verbose. KCL supports defining attributes in the schema through `selector expression`. The selector form is **x.y.z**, see the following example: - -```python -group = Group { - name = "group" - persons = [{ - name.firstName = "John" - name.lastName = "Doe" - age = 24 - }] -} -``` - -- Selector can be used to represent attribute in a schema - -### Inheritance - -Inheritance is an effective means to define a hierarchical structure definition, and KCL supports limited **single inheritance** of the schema. - -```python -schema Person: - firstName: str - lastName: str - -# schema Scholar inherits schema Person -schema Scholar(Person): - fullName: str = firstName + '_' + lastName - subject: str - -JohnDoe = Scholar { - firstName = "John", - lastName = "Doe", - subject = "CS" -} -``` - -The result is a **dict**: - -```python -{ - 'JohnDoe': { - 'firstName': 'John' - 'lastName': 'Doe' - 'fullName': 'John Doe' - 'subject': 'CS' - } -} -``` - -Each schema can be treated as a separated function context. Statements, including attribute statements and init statements, in the context of schemas will be evaluated from base schema to subschema according to the inheritance order. Each schema context is evaluated only once sequentially. The same goes for expressions in the check block. In the example, firstName and lastName are configured in the context of Person schema, and fullName is formed by splicing firstName and lastName in the context of Scholar schema. - -The default value can be modified in each schema. Value defined in **Configuration Definition** has a higher priority than the default value. Attributes with default values in any schema context ​​will eventually be unioned by configuration data. References to attributes in the schema context statements will use the value with unioned configuration data on evaluating at runtime. For example: - -```python -schema a: - x = 1 - y = x * 2 - -schema b(a): - x = 2 - -v = a { - x = 3 -} - -``` - -The result is a **dict**: - -```python -{ - 'v': { - 'x': 3 - 'y': 6 - } -} -``` - -Notes: - -- Report an error if inheriting more than one base schema. -- The type of the base schema attribute cannot be modified in the subschema. -- Report an error if inheriting a **mixin**. -- Report an error when a circular dependency occurs. - -Limitations: - -Since inheritance will derive some complex demands, we are cautious about these complex demands. There are still some restrictions on inheritance, and it's still under discussion. - -- KCL provides limited and deterministic polymorphism support, more complex and flexible polymorphism support, such as **self**, **super** keywords, are temporarily not included in the schema definition. -- Currently, KCL only supports the polymorphism of the inherited attributes of the schema, and does not support the polymorphism of the expressions in the check block. -- For the case of multiple levels of schema inheritance, the schema arguments can only be passed to the last level of sub-schema. - -### Mixin - -In addition to **composition** and **inheritance**, KCL supports declarative reuse of schema code through the **mixin** mechanism. To use a mixin, we only need to declare the **mixin** in the schema definition. - -The **mixin** syntax is the following: - -``` -//////////// mixin_stmt //////////// -mixin_stmt: "mixin" "[" [mixins | multiline_mixins] "]" "\n" -multiline_mixins: "\n" _INDENT mixins "\n" _DEDENT -mixins: operand_name ("," ("\n" mixins | operand_name))* -``` - -Here is a simple example: - -```python -schema Person: - mixin [FullNameMixin] - firstName: str = "default" - lastName: str - -schema FullNameMixin: - fullName: str = "{} {}".format(firstName, lastName) - -JohnDoe = Person { - firstName = "John" - lastName = "Doe" -} -``` - -The result is a **dict**: - -```python -{ - 'JohnDoe': { - 'firstName': 'John' - 'lastName': 'Doe' - 'fullName': 'John Doe' - } -} -``` - -Multiple mixins can be added to a single schema, and mixins context will be evaluated after the host schema context at runtime. In the inheritance scenario, the mixin context can be regarded as a part of the host schema context, and the overall evaluation of schema context order is not affected. - -Notes: - -- The name of **mixin** schema must end with 'Mixin', otherwise an error will be reported. -- The attributes referenced in the **mixin** must be defined in the **mixin** itself or host schema, otherwise an error will be reported. - -### Protocol - -In addition to schema, an additional type definition method `protocol` is provided in KCL, and its properties are as follows: - -- In a protocol, only attributes and their types can be defined, complex logic and check expressions cannot be written, and mixins cannot be used. -- A protocol can only inherit or refer to other protocols, but cannot inherit or refer to other schemas. - -We can use **protocol** to add an optional host type to the dynamically inserted **mixin**. - -The **mixin** can define its host type through the `for` keyword, and internally it will query the type corresponding to the attribute from the host type. - -```python -protocol DataProtocol: # A mixin host type - data: str - -mixin DataMixin for DataProtocol: # Using the `for` keyword to define a mixin host type - x: int = data # The type of `data` is `str`, which is from `data` of `DataProtocol` -``` - -In `DataMixin`, the `data` attribute is obtained according to the `DataProtocol` host type as `str` type, and then a type error will occur when the value is assigned to `x` of type `int`: - -```python -protocol DataProtocol: - data: str - -mixin DataMixin for DataProtocol: - x: int = data # Error: expect int, got str - x: str = data # Ok -``` - -Please note that the host type **protocol** can only be used for **mixin** definitions (the suffix name is `Mixin`), otherwise an error will be reported. - -```python -protocol DataProtocol: - data: str - -schema Data for DataProtocol: # Error: only schema mixin can inherit from protocol - x: str = data -``` - -### Schema Context Evaluation - -The schema definition is composed of attribute statements, configuration data, init statements, mixins, and checks. In a separate schema context, the evaluation top-down order is as follows: - -``` -|------------------------------------------| -| attribute defaulting | -|------------------------------------------| -| configuration union | -|------------------------------------------| -| attribute templating | -|------------------------------------------| -| init statements in declaration order | -|------------------------------------------| -| mixins in declaration order | -|------------------------------------------| -| check expressions in any order | -|------------------------------------------| -``` - -In the case of schema inheritance, each schema context is evaluated from the base schema in the order of inheritance, and each context is evaluated only once. -Suppose there are schemas a, b, and c, where c inherits b and b inherits a. Schema contexts will be evaluated in top-down order as: - -``` -|-----------------| -| schema a | -|-----------------| -| schema b | -|-----------------| -| schema c | -|-----------------| -``` - -### Members - -Built-in function and members of schema - -+ instances() - Return the list of existing instances of a schema. - -### Irrelevant Order Calculation - -The irrelevant order calculation in the schema indicates the reference relationship between the internal attributes of the schema. For example, when we declare an expression of the form `a = b + 1`, the calculation of the value of `a` depends on the calculation of the value of `b`. When the compiler calculate the value of `a` and the value of `a` depends on the value of `b`, the compiler will choose to first calculate the value of `b`, and then calculate the value of a according to the expression `a = b + 1`, which is slightly different from the calculation method of traditional procedural language the difference. - -Since the calculation of values in the schema is based on dependencies, just like a directed acyclic graph traverses each node in the graph according to the order of topological sorting, the order of declaration of attributes in the schema is not so important, so the feature is called the irrelevant order calculation. - -Please note that there can be no circular references between different schema attribute values. - -We can see this feature through the following examples. - -```python -schema Person: - name?: str - age: int = _age - - _age = 10 - - if name == "Son": - _age = 18 - -schema Son(Person): - name: str = "Son" - -person = Person {} -son = Son {} -``` - -The output is - -```yaml -person: - name: null - age: 10 -son: - name: Son - age: 18 -``` - -Besides, we can achieve KCL polymorphism such as - -```python -schema Person: - name?: str - _age: int = _age - - _age = 10 - if name == "Son": - _age = 18 - elif name == "SonConf": - _age = 24 - -schema Son(Person): - name: str = "Son" - -person = Person() {} -son = Son() { - name = "SonConf" -} -``` - -The output is - -```yaml -person: - name: null - age: 10 -son: - name: SonConf - age: 24 -``` - -More examples: - -```python -schema Fib: - n1: int = n - 1 - n2: int = n1 - 1 - n: int - value: int = _value - - if n <= 2: - _value = 1 - else: - _value = (Fib {n = n1}).value + (Fib {n = n2}).value - -fib8 = (Fib {n = 8}).value -``` - -The output is - -```yaml -fib8: 21 -``` - -As in the above examples, we can see that in the schema, we only need to simply specify the dependency between attributes, and the compiler will automatically calculate the value based on the dependency, which can help us save a lot of boilerplate code and reduce configuration difficulty of writing. diff --git a/docs/reference/lang/lang/spec/statements.md b/docs/reference/lang/lang/spec/statements.md deleted file mode 100644 index 75b84cd6..00000000 --- a/docs/reference/lang/lang/spec/statements.md +++ /dev/null @@ -1,184 +0,0 @@ ---- -title: "Statements" -linkTitle: "Statements" -type: "docs" -weight: 2 -description: Statements ---- -## Syntax - -In KCL, statements consist of small statements and compound statements. The syntax is the following: - -``` -preamble_statement: preamble_small_stmt | preamble_compound_stmt -preamble_small_stmt: (small_stmt | import_stmt) NEWLINE -preamble_compound_stmt: compound_stmt | schema_stmt -statement: small_stmt NEWLINE | compound_stmt -compound_stmt: if_stmt -small_stmt: assign_stmt | expr_stmt | assert_stmt -``` - -The preamble statement is used to define the module level statements, consist of `statement`, `import_stmt`, and `schema_stmt`. The statement is used to define the block level statements, which are used in the `if` statement and `schema` statement. - -### Small Statements - -A small statement is comprised of a single logical line. Multiple statements in one-line are not allowed. - -#### Assignment Statements - -Generally, assign_stmt is divided into assignment and augmented assignment. The syntax is the following: - -``` -assign_stmt: target_primary ("=" target_primary)* "=" test | target_primary augassign test -augassign: "+=" | "-=" | "*=" | "**=" | "/=" | "//=" | "%=" | "&=" | "|=" | "^=" | "<<=" | ">>=" | "or" | "and" -target_primary: identifier | target_primary DOT identifier -``` - -An assignment statement has the form `lhs = rhs`. It evaluates the expression on the right-hand side then assigns its value (or values) to the variable (or variables) on the left-hand side. - -The **target_primary** on the left-hand side is an `identifier` or an `identifier` followed by select dots. - -Note: When using **target_primary** will cause collisions, use **primary_expr** as an alternative. - -Examples: - -```python -k = 1 -a.b = "a.b" -``` - -To keep it simple, the compound target is not supported as **target_primary**. - -The right value of an assignment statement is a conditional expression, which is discussed separately. - -An augmented assignment, which has the form `lhs op= rhs` updates the variable `lhs` by applying a binary arithmetic operator op (one of +, -, *, /, //, %, &, |, ^, <<, >>) to the previous value of `lhs` and the value of `rhs`. - -The **target_primary** on the left-hand side is the same as assignment statement. Examples: - -```python -_x -= 1 -_filename += ".k" -``` - -There is no concept of in-place modification in KCL. The `augassign` statement will modify a copy of the **target_primary** and assign the copy to **target_primary**. - -In particular, in KCL, the `|=` symbol represents the **union** operation, which is defined as follows: - -- The behavior of the **union** operation needs to be consistent with the behavior of the **configuration definition**. - -See **expressions** spec for more details of union operator in **Arithmetic Operations**. - -#### Expression Statements - -An expression statement evaluates an expression and discards its result. - -Syntax: - -``` -expr_stmt: expression -``` - -An expression statement supported in KCL is function invocation expression, which is discussed in **expression** spec. - -```python -print(k) # print a variable -``` - -#### Import Statements - -Import statements are used to **search** and **load** a module, and define a name or names in the local namespace for the scope where the import statement occurs. - -Syntax: - -``` -import_stmt: "import" dot_name ("as" NAME)? -dot_name: [leading_dots] identifier (DOT identifier)* -leading_dots: "."+ -``` - -Examples: - -```python -import math # import a built-in module math -import pkg # import pkg -import pkg.foo # import pkg.foo -import pkg.subpkg # import a subpkg in a pkg -import .pkg2.subpkg3 # import a subpkg in a pkg inside of current pkg -import ...pkg2 # Go two levels up then import pkg2 -``` - -See **module** spec for more details of module spec. - -#### Assert Statements - -Assert statements are a convenient way to insert debugging assertions into KCL code. - -The syntax is the following: - -``` -assert_stmt: ASSERT test ("," test)? -``` - -The conditional expression in assert will be evaluated and get a boolean. Report an error if returning a `False`. - -Examples: - -```python -assert: x > 1 # report an error on x <= 1 -``` - -#### Conditional Statements - -KCL allows using conditional statements to control the instructions to -be executed. They are also called the control-flow statements. - -The only type of control-flow syntax is the well-known `if-elif-else` syntax. - -The syntax of the `if-elif-else` statement is the following. - -``` -if_stmt: "if" test ":" suite ("elif" test ":" suite)* (ELSE ":" suite)? -suite: small_stmt | NEWLINE _INDENT statement+ _DEDENT -``` - -An `if` or `elif` statement evaluates a given expression. When the expression -is evaluated to `True`, a list of statements following `:` are executed. - -The following is an example: - -```python -a = 10 -if a == 0: - print("a is zero") -elif a < 100: - print("a < 100") - print("maybe a is negative") -else: - print("a >= 100") -``` - -`if-elif-else` statements can be nested. For example: - -```python -a = 10 -if a == 0: - print("a is zero") -elif a < 100: - print("a < 100") - if a < 0: - print("a is negative") - print("No matter a is negative or positive, this message is printed") -else: - print("a >= 100") -``` - -#### Schema Statements - -Schema statements are used to define a type of configuration data. The syntax is the following: - -``` -schema_stmt: [decorators] "schema" ["relaxed"] identifier ["[" [arguments] "]"] ["(" operand_name ")"] ":" NEWLINE [schema_body] -schema_body: _INDENT (string NEWLINE)* [mixin_stmt] (schema_attribute_stmt | statement)* [check_block] _DEDENT -``` - -See **schema** spec for more details of schema spec. diff --git a/docs/reference/lang/lang/spec/variables.md b/docs/reference/lang/lang/spec/variables.md deleted file mode 100644 index 73e7ec3e..00000000 --- a/docs/reference/lang/lang/spec/variables.md +++ /dev/null @@ -1,67 +0,0 @@ ---- -title: "Variables" -linkTitle: "Variable" -type: "docs" -weight: 2 -description: Variable ---- -In KCL, variables can be defined using assign statements. For example, the following statement defines a variable `spam` to a string `"ham"`. - -```python -spam = "ham" -``` - -There are two types of variables, which are global variables and list comprehension local variables. - -- A global variable is defined not within any context. -- A comprehension local variable is defined inside a comprehension. - -A variable can be used after definition, until the end of the current scope. - -For a global variable, the scope is the module it is defined in. Note that a module can consists of multiple source files. - -For a list comprehension local variable, the scope is the list comprehension it is defined in. - -More information on modules, list comprehensions and scopes will be discussed in later chapters. - -## Immutability - -Global variables are immutable. In other words, once defined such a variable cannot be redefined (or, i.e., modified). - -The following code is illegal, and KCLVM will report an error during evaluation. - -```python -spam = "ham" -spam = "eggs" # Error: The immutability rule is violated! -``` - -- A variable starts with the `_` character is mutable. - -```python -_spam -cond = True -if cond: - _spam = "ham" -else: - _spam = "eggs" -``` - -## Variable Exporting - -As shown in the preview chapter, KCLVM is able to export evaluation results to the standard output according to a target data format. - -The rules are the followings: - -- Living global variables at the end of an evaluation will be dumped out. -- If the name of a variable starts with the `_` character, it will not be dumped out. - -## Uniqueness of Exported Variable Identifier - -Each exported variable identifier must be unique in its package, so that an exported variable could be located uniquely by package location path and variable identifier, such as 'a.b.c:var', in which 'a.b.c' locates a package. - -Two variable identifiers are different if: - -- they are spelled differently -- they are defined in different packages and are not compiled in a single execution - -Identifying an exported variable should be supported by the kcl compiler, which needs to provide corresponding identifying features through the command line and api form. diff --git a/docs/reference/lang/lang/tour.md b/docs/reference/lang/lang/tour.md deleted file mode 100644 index 3846789b..00000000 --- a/docs/reference/lang/lang/tour.md +++ /dev/null @@ -1,3324 +0,0 @@ ---- -title: "KCL Tour" -sidebar_position: 1 ---- - -This page shows how to use major KCL features, from variables and operators to schemas and libraries, with the assumption that you have already known how to program in another language. KCL is mainly inspired by Python, and knowing Python is very helpful for learning KCL. - -### Important Concepts - -As we learn about the KCL language, keep these facts and concepts in mind: - -- KCL is a configuration and policy language. It provides simplified and self-contained language design and library support for writing configurations and policies. It cannot be used for application development or other purposes supported by General Purpose Language (GPL). -- KCL absorbs classic **OOP** elements and provides simple, developer-friendly and reliable configuration writing practice with **type**, **reusing**, and **union**. -- KCL prefers **immutability** and recommend to add up incremental updates through the **union**. Immutability reduces side effects like unpredictable issues. -- KCL **schema** struct defines strict attributes, static types, and it also supports validation expressions. The **schema** struct is mainly composed of typed attributes, the schema context and the check block. -- KCL **config** is a **json**-like expression, by which we can reuse a full definition of the schema. KCL provides support for definition and configuration by separating schema and config. -- KCL **rule** is a structure for writing rule constraint expressions, which can be used for data verification and policy writing. -- KCL code files are managed as packages(directories) and modules(files). The schema types in the same package are visible to each other; the data cross packages need to be imported through the **import statement**. The package-level variables can be exported, but they are immutable for other packages. -- The KCL syntax definition mainly uses declarative expressions, and only provides a small number of necessary and imperative statements, such as import, if .. else, assert, assignment and schema. -- No main function, each `.k` file could be executed as a separate configuration. -- **Built-in functions** and **plugins** are supported to simplify coding. - -### Keywords - -The following table lists the words that the KCL language treats specially. - -``` - True False None Undefined import - and or in is not - as if else elif for - schema mixin protocol check assert - all any map filter lambda - rule -``` - -### Identifiers - -In KCL, an identifier is a name, may with selectors, that identifies a value. - -- Identifiers consist of letters, numbers, underscores or the prefix `$`. -- Identifiers cannot be repeated with keywords unless they have a `$` prefix. -- Identifiers must not contain any embedded spaces or symbols. -- Letters and underscores can be used anywhere in the identifier. -- Numbers cannot be placed in the first place of the identifier. -- The `$` character can only be placed in the first position of the identifier. - -Examples: - -```python -x -a -b1 -b_2 -_c -$if -``` - -To simplify the definition of the qualified identifier, such as `pkg.type`, we additionally define `qualified identifier`: - -Examples: - -```python -pkg.a -``` - -The package name in `qualified identifier` must be imported. - -#### Identifier Prefix - -Use the `$` character prefix to define keyword identifiers. - -```python -$if = 1 -$else = "s" -``` - -Please note: whether the non-keyword identifier is prefixed with `$` has the same effect. - -```python -_a = 1 -$_a = 2 # equal to `_a = 2` -``` - -### Variables - -Here’s an example of how to create a variable and initialize it: - -```python -name = "Foo" # Declare a variable named `name` and its value is a string literal "Foo" -``` - -It corresponds to the following YAML output: - -```yaml -name: Foo -``` - -In KCL, we can export variables as config data by defining package-level variables. To make it direct, clear, and maintainable. Exported variables are immutable so that once we declare it, we can't modify it. For example, assume we have a config file named `example.k`, the variable `name` can't be modified after the declaration, just like the standard imperative language. - -```python -name = "Foo" # exported - -... - -name = "Bar" # error: a exported declaration variable can only be set once. -``` - -As a complement, we can define a non-exported variable in module level which is mutable, which won't show up in YAML output: - -```python -_name = "Foo" # _ variables are not output to YAML and are mutable -_name = "Bar" -``` - -Please note that the variable name cannot be one of `True`, `False`, `None`, `Undefined` because of ambiguity. - -```python -False = 1 # Error -True = False # Error -None = Undefined # Error -Undefined = None # Error -``` - -### Built-in Types - -The KCL language has special support for the following types: - -- number -- string -- boolean -- list -- dict - -#### Number - -KCL number comes into two flavors: - -- **Int**: 64 bits signed integer values. Values can be from -9223372036854775808~9223372036854775807. -- **Float**: 64-bit floating-point numbers, as specified by the IEEE 754 standard. We do not recommend using the float type in the configuration, we can use a string instead and parse it during runtime processing. - -Both int and float support basic operators such as `+`, `-`, `/`, and `*`, while complex operations, such as `abs()`, `ceil()`, and `floor()`, are supported through the built-in math library. - -Integers are numbers without a decimal point. Here are some examples of defining integer literals: - -```python -a = 1 -b = -1 -c = 0x10 # hexadecimal literal -d = 0o10 # octal literal -e = 010 # octal literal -f = 0b10 # binary literal -g = int("10") # int constructor -``` - -If a number includes a decimal point, it is a float number. Here are some examples of defining float literals: - -```python -a = 1.10 -b = 1.0 -c = -35.59 -d = 32.3e+18 -f = -90. -h = 70.2E-12 -i = float("112") # float constructor -``` - -Built-in math libraries can be used with numbers: - -```python -import math - -assert abs(-40) == 40 -assert round(70.23456) == 70 -assert min(80, 100, 1000) == 80 -assert max(80, 100, 1000) == 1000 -assert sum([0,1,2]) == 3 -assert math.ceil(100.12) == 101.0 -assert math.floor(100.12) == 100.0 -assert math.pow(100, 2) == 10000.0 -``` - -In addition, please note that the KCL number is 64-bit by default. We can perform a stricter 32-bit range check by adding the `-r` parameter to the KCL command-line tool. - -``` -kcl main.k -r -d -``` - -Please note that the value range check is only enabled in `debug` mode - -##### Units - -In KCL, we can add a unit suffix to an integer denomination to indicate that it does not affect its true value as follows. - -- General integer or fixed-point number form: `P`, `T`, `G`, `M`, `K`, `k`, `m`, `u`, `n` -- Corresponding power of 2: `Pi`, `Ti`, `Gi`, `Mi`, `Ki` - -```python -# SI -n = 1n # 1e-09 -u = 1u # 1e-06 -m = 1m # 1e-03 -k = 1k # 1000 -K = 1K # 1000 -M = 1M # 1000000 -G = 1G # 1000000000 -T = 1T # 100000000000 -P = 1P # 1000000000000000 -# IEC -Ki = 1Ki # 1024 -Mi = 1Mi # 1024 ** 2 -Gi = 1Gi # 1024 ** 3 -Ti = 1Ti # 1024 ** 4 -Pi = 1Pi # 1024 ** 5 -``` - -Besides, And we can also use the unit constants defined in the `units` module as follows: - -```python -import units - -n = 1 * units.n # 1e-09 -u = 1 * units.u # 1e-06 -m = 1 * units.m # 1e-03 -k = 1 * units.k # 1000 -K = 1 * units.K # 1000 -M = 1 * units.M # 1000000 -G = 1 * units.G # 1000000000 -T = 1 * units.T # 1000000000000 -P = 1 * units.P # 1000000000000000 -# IEC -Ki = 1 * units.Ki # 1024 -Mi = 1 * units.Mi # 1024 ** 2 -Gi = 1 * units.Gi # 1024 ** 3 -Ti = 1 * units.Ti # 1024 ** 4 -Pi = 1 * units.Pi # 1024 ** 5 -``` - -We can also use the methods in the `units` module to convert between integers and unit strings. - -```python -import units -# SI -K = units.to_K(1000) # "1K" -M = units.to_M(1000000) # "1M" -G = units.to_G(1000000000) # "1G" -T = units.to_T(1000000000000) # "1T" -P = units.to_P(1000000000000000) # "1P" -# IEC -Ki = units.to_Ki(1024) # "1Ki" -Mi = units.to_Mi(1024 ** 2) # "1Mi" -Gi = units.to_Gi(1024 ** 3) # "1Gi" -Ti = units.to_Ti(1024 ** 4) # "1Ti" -Pi = units.to_Pi(1024 ** 5) # "1Pi" -``` - -```python -import units -# SI -K = units.to_K(int("1M")) # "1000K" -M = units.to_M(int("1G")) # "1000M" -G = units.to_G(int("1T")) # "1000G" -T = units.to_T(int("1P")) # "1000T" -P = units.to_P(int("10P")) # "10P" -# IEC -Ki = units.to_Ki(int("1Mi")) # "1024Ki" -Mi = units.to_Mi(int("1Gi")) # "1024Mi" -Gi = units.to_Gi(int("1Ti")) # "1024Gi" -Ti = units.to_Ti(int("1Pi")) # "1024Ti" -Pi = units.to_Pi(int("10Pi")) # "10Pi" -``` - -The unit value type is defined in the units module, and the unit value type does not allow any four calculations. - -```python -import units - -type NumberMultiplier = units.NumberMultiplier - -x0: NumberMultiplier = 1M # Ok -x1: NumberMultiplier = x0 # Ok -x2 = x0 + x1 # Error: unsupported operand type(s) for +: 'number_multiplier(1M)' and 'number_multiplier(1M)' -``` - -We can use the `int()`, `float()` function and `str()` function to convert the numeric unit type to the normal integer type and string type. - -```python -a: int = int(1Ki) # 1024 -b: float = float(1Ki) # 1024.0 -c: str = str(1Mi) # "1Mi" -``` - -#### String - -The string is an immutable sequence of Unicode characters. We can use either single or double quotes to create a string: - -```python -'allows embedded "double" quotes' # Single quotes -"allows embedded 'single' quotes" # Double quotes -'''Three single quotes''', """Three double quotes""" # Triple quoted -``` - -Triple quoted strings may span multiple lines. - -```python -"""This is a long triple quoted string -may span multiple lines. -""" -``` - -Please note that there is almost no difference in the use of KCL single-quoted and double-quoted strings. The only thing that can be simplified is that we don’t need to escape double quotes in single quoted strings, and we don’t need to escape single quotes in double quoted strings. - -```python -'This is my book named "foo"' # Don’t need to escape double quotes in single quoted strings. -"This is my book named 'foo'" # Don’t need to escape single quotes in double quoted strings. -``` - -We can concatenate strings using the `+` operator: - -```python -x = 'The + operator ' + 'works, as well.' -``` - -We can cast an int or float to a string using the built-in function `str`: - -```python -x = str(3.5) # "3.5" -``` - -A lot of handy built-in functions and members of a string could be used: - -```python -x = "length" -assert len(x) == 6 # True -assert x.capitalize() == "Length" -assert x.count("gt") == 1 -assert x.endswith("th") == True -assert x.find("gth") == 3 -assert "{} {}".format("hello", "world") == 'hello world' -assert x.index("gth") == 3 -assert x.isalnum() == True -assert x.isalpha() == True -assert "123456".isdigit() == True -assert x.islower() == True -assert " ".isspace() == True -assert "This Is Title Example".istitle() == True -assert x.isupper() == False -assert "|".join(["a", "b", "c"]) == "a|b|c" -assert "LENGTH".lower() == "length" -assert ' spacious '.lstrip() == 'spacious ' -assert x.replace("th", "ht") == "lenght" -assert "lengthlength".rfind("le") == 6 -assert "lengthlength".rindex("le") == 6 -assert "length length".rsplit() == ["length", "length"] -assert "length ".rstrip() == "length" -assert "length length".split() == ["length", "length"] -assert 'ab c\n\nde fg\rkl\r\n'.splitlines() == ['ab c', '', 'de fg', 'kl'] -assert "length".startswith('len') == True -assert "***length***".strip('*') == "length" -assert "length length".title() == "Length Length" -assert x.upper() == "LENGTH" -``` - -There are 2 different ways to format a string: to use the `"{}".format()` built-in function, or to specify the variable between the curly braces and use a `$` mark to tell KCL to extract its value. This is called **string interpolation** in KCL. In following example, both `a` and `b` will be assigned to string `"hello world"`. - -Besides, the variable to serialized can be extracted in special data format, such as YAML or JSON. In this case, a `#yaml` or `#json` can be included within the curly braces. - -Specifically, when the dollar sign `$` itself is needed in a **string interpolation**, it needs to be escaped and use `$$` instead. Or in another way, `+` can be used to concat the dollar sign with the **string interpolation** to avoid that escape. In following example, both `c` and `c2` will be assigned to string `$hello world$` - -```python -world = "world" -a = "hello {}".format(world) # "hello world" -b = "hello ${world}" # "hello world" -c = "$$hello ${world}$$" # "$hello world$" -c2 = "$" + "hello ${world}" + "$" # "$hello world$" - -myDict = { - "key1" = "value1" - "key2" = "value2" -} - -d = "here is myDict in json: ${myDict: #json}" -# d: 'here is myDict in json: {"key1": "value1", "key2": "value2"}' - -e = "here is myDict in yaml:\n${myDict: #yaml}" -# e: | -# here is myDict in yaml: -# key1: value1 -# key2: value2 -``` - -Besides, we can see some symbols in the example code output **YAML string** above such as `|`, `>`, `+`, `-`. - -- `|` denotes the **block literal style** that indicates how newlines inside the block should behave. -- `>` denotes the **block folded style** in the block scalar that the newlines will be replaced by spaces. -- `+` and `-` are the **block chomping indicators** that control what should happen with newlines at the end of the string. The default value **clip** puts a single newline at the end of the string. To remove all newlines, **strip** them by putting a `-` after the style indicators `|` or `>`. Both clip and strip ignore how many newlines are actually at the end of the block; to **keep** them all put a `+` after the style indicator. - -For example, a **strip block literal style** yaml string is - -```yaml -example: |- - Several lines of text, - with some "quotes" of various 'types', - and also a blank line: - - plus another line at the end. - - -``` - -The result is - -```plain -Several lines of text, -with some "quotes" of various 'types', -and also a blank line: - -plus another line at the end. -``` - -See [Yaml Multiline String](https://yaml-multiline.info/) and [YAML Specification v1.2](https://yaml.org/spec/1.2.1/) for more information. - -##### Raw String - -KCL raw string is created by prefixing a string literal with `'r'` or `'R'`. KCL raw string treats backslash (`\`) and string interpolation (`${}`) as a literal character. This is useful when we want to have a string that contains backslash, string interpolation and don’t want them to be treated as an escape character. - -- For backslash (`\`), the KCL code and output YAML are as follows: - -```python -s = "Hi\nHello" -raw_s = r"Hi\nHello" # This is a KCL raw string with the `r` prefix. -``` - -```yaml -s: |- - Hi - Hello -raw_s: Hi\nHello -``` - -- For string interpolation (`${}`), the KCL code and output YAML are as follows: - -```python -worldString = "world" -s = "Hello ${worldString}" -raw_s = r"Hello ${worldString}" # This is a KCL raw string with the `r` prefix. -``` - -```yaml -worldString: world -s: Hello world -raw_s: Hello ${worldString} -``` - -In addition, the most common scenario for raw strings is to be used with regular expressions: - -```python -import regex - -key = "key" -result = regex.match(key, r"[A-Za-z0-9_.-]*") # True -``` - -#### Boolean - -Boolean values are the two constant objects `False` and `True`. - -```python -a = True -b = False -``` - -#### List - -The list is a sequence, typically used to store collections of homogeneous items. Here’s a simple KCL list: - -```python -list = [1, 2, 3] -assert len(list) == 3 # True -assert list[0] == 1 # True -``` - -We can declare a list with list comprehension: - -```python -list = [ _x for _x in range(20) if _x % 2 == 0] -assert list == [0, 2, 4, 6, 8, 10, 12, 14, 16, 18] # True -``` - -We can perform nested list comprehension: - -```python -matrix = [[1, 2], [3,4], [5,6], [7,8]] -transpose = [[row[_i] for row in matrix] for _i in range(2)] -assert transpose == [[1, 3, 5, 7], [2, 4, 6, 8]] # True -``` - -Besides, we can use two variables in the list comprehension, the first variable denotes the list index and the second variable denotes the list item. - -```python -data = [1000, 2000, 3000] -# Single variable loop -dataLoop1 = [i * 2 for i in data] # [2000, 4000, 6000] -dataLoop2 = [i for i in data if i == 2000] # [2000] -dataLoop3 = [i if i > 2 else i + 1 for i in data] # [1000, 2000, 3000] -# Double variable loop -dataLoop4 = [i + v for i, v in data] # [1000, 2001, 3002] -dataLoop5 = [v for i, v in data if v == 2000] # [2000] -# Use `_` to ignore loop variables -dataLoop6 = [v if v > 2000 else v + i for i, v in data] # [1000, 2001, 3000] -dataLoop7 = [i for i, _ in data] # [0, 1, 2] -dataLoop8 = [v for _, v in data if v == 2000] # [2000] -``` - -We can merge list like this: - -```python -_list0 = [1, 2, 3] -_list1 = [4, 5, 6] -joined_list = _list0 + _list1 # [1, 2, 3, 4, 5, 6] -``` - -We can also use the list unpacking operator `*` to merge multiple lists: - -```python -_list0 = [1, 2, 3] -_list1 = [4, 5, 6] -union_list = [*_list0, *_list1] # [1, 2, 3, 4, 5, 6] -``` - -We can use `if expressions` to dynamically add elements to the list element, elements that meet the conditions are added to the list, and elements that do not meet the conditions are ignored. - -```python -a = 1 # 1 -data = [ - 1 - if a == 1: 2 - if a > 0: 3 - if a < 0: 4 -] # [1, 2, 3] -``` - -```python -a = 1 # 1 -data1 = [ - 1 - if a == 1: - 2 - elif a == 2: - 3 - else: - 3 -] # [1, 2] -data2 = [ - 1 - if a == 1: 2 - elif a == 2: 2 - else: 3 -] # [1, 2] -``` - -Please note that in the above `if expressions`, nested use is not supported. - -We can union two lists like this: - -```python -_list0 = [1, 2, 3] -_list1 = [4, 5, 6] -union_list = _list0 | _list1 # [4, 5, 6] -``` - -We can use the expression `for k in list_var` to traverse a list. - -```python -data = [1, 2, 3] -dataAnother = [val * 2 for val in data] # [2, 4, 6] -``` - -#### Dict - -Dict is a mapping object that maps hashable values to arbitrary objects. Dict is ordered. The order of the keys follows the order of their declaration. - -Here are a couple of simple KCL dict, created using dict literals: - -```python -a = {"one" = 1, "two" = 2, "three" = 3} -b = {'one' = 1, 'two' = 2, 'three' = 3} -assert a == b # True -assert len(a) == 3 # True -``` - -We can ignore the comma `,` at the end of the line for writing dict key-value pairs in multiple lines: - -```python -data = { - "key1" = "value1" # Ignore the comma ',' at the end of line - "key2" = "value2" -} # {"key1": "value1", "key2": "value2"} -``` - -We can ignore the key quotation marks when we writing simple literals on the key. - -```python -data = { - key1 = "value1" # Ignore key quotation '"' - key2 = "value2" -} # {"key1": "value1", "key2": "value2"} -``` - -In addition, the **config selector expressions** can be used to init a dict instance with nested keys. - -```python -person = { - base.count = 2 - base.value = "value" - labels.key = "value" -} # {"base": {"count": 2, "value": "value"}, "labels": {"key": "value"}} -``` - -The output YAML is - -```yaml -person: - base: - count: 2 - value: value - labels: - key: value -``` - -We can declare a dict with dict comprehension: - -```python -x = {str(i): 2 * i for i in range(3)} -assert x == {"0" = 0, "1" = 2, "2" = 4} -``` - -Besides, we can use two variables in the dict comprehension, the first variable denotes the dict key and the second variable denotes the dict value of the key. - -```python -data = {key1 = "value1", key2 = "value2"} -# Single variable loop -dataKeys1 = {k: k for k in data} # {"key1": "key1", "key2": "key2"} -dataValues1 = {k: data[k] for k in data} # {"key1": "value1", "key2": "value2"} -# Double variable loop -dataKeys2 = {k: k for k, v in data} # {"key1": "key1", "key2": "key2"} -dataValues2 = {v: v for k, v in data} # {"value1": "value1", "value2": "value2"} -dataFilter = {k: v for k, v in data if k == "key1" and v == "value1"} # {"key1": "value1"} -# Use `_` to ignore loop variables -dataKeys3 = {k: k for k, _ in data} # {"key1": "key1", "key2": "key2"} -dataValues3 = {v: v for _, v in data} # {"value1": "value1", "value2": "value2"} -``` - -We can **merge** dict using the dict unpacking operator `**` like this: - -```python -_part1 = { - a = "b" -} - -_part2 = { - c = "d" -} - -a_dict = {**_part1, **_part2} # {"a: "b", "c": "d"} -``` - -In addition, the same effect can be achieved by using the union operator `|`: - -```python -_part1 = { - a = "b" -} - -_part2 = { - c = "d" -} - -a_dict = _part1 | _part2 # {"a: "b", "c": "d"} -``` - -We can use `if expressions` to dynamically add elements to the dict element, elements that meet the conditions are added to the dict, and elements that do not meet the conditions are ignored. - -```python -a = 1 # 1 -data = { - key1 = "value1" - if a == 1: key2 = "value2" - if a > 0: key3 = "value3" - if a < 0: key4 = "value4" -} # {"key1": "value1", "key2": "value2", "key3": "value3"} -``` - -```python -a = 1 # 1 -data1 = { - key1 = "value1" - if a == 1: - key2 = "value2" - elif a > 0: - key3 = "value3" - else: - key4 = "value4" -} # {"key1": "value1", "key2": "value2"} -data2 = { - key1 = "value1" - if a == 1: key2 = "value2" - elif a > 0: key3 = "value3" - else: key4 = "value4" -} # {"key1": "value1", "key2": "value2"} -``` - -We can use the expression `for k in dict_var` to traverse a dict, and we can use the `in` operator to determine whether a dict contains a certain key - -```python -data = {key1 = "value1", key2 = "value2"} -dataAnother = {k: data[k] + "suffix" for k in data} # {"key1": "value1suffix", "key2": "value2suffix"} -containsKey1 = "key1" in data # True -containsKey2 = "key" in data # False -``` - -#### None - -In KCL, `None` can indicate that the value of the object is empty, which is similar to `nil` in Go or `null` in Java, and corresponds to `null` in YAML. - -```python -a = None -b = [1, 2, None] -c = {key1 = value1, key2 = None} -``` - -The output is as follows: - -```yaml -a: null -b: -- 1 -- 2 -- null -c: - key1: value1 - key2: null -``` - -Please note that `None` cannot participate in the four arithmetic operations, but it can participate logical operators and comparison operators to perform calculations. - -```python -a = 1 + None # error -b = int(None) # error -c = not None # True -d = None == None # True -e = None or 1 # 1 -f = str(None) # None -``` - -#### Undefined - -`Undefined` is similar to `None`, but its semantics is that a variable is not assigned any value and will not be output to YAML - -```python -a = Undefined -b = [1, 2, Undefined] -c = {key1 = "value1", key2 = Undefined} -``` - -The output is as follows: - -```yaml -b: -- 1 -- 2 -c: - key1: value1 -``` - -Please note that `Undefined` cannot participate in the four arithmetic operations, but it can participate logical operators and comparison operators to perform calculations. - -```python -a = 1 + Undefined # error -b = int(Undefined) # error -c = not Undefined # True -d = Undefined == Undefined # True -e = Undefined or 1 # 1 -f = str(Undefined) # Undefined -``` - -### Operators - -The following character sequences represent operators: - -``` - + - * ** / // % - << >> & | ^ < > - ~ <= >= == != @ \ -``` - -#### Arithmetic Operators - -KCL supports the common arithmetic operators: - -```python -assert 2 + 3 == 5 -assert 2 - 3 == -1 -assert 2 * 3 == 6 -assert 5 / 2 == 2.5 -assert 5 // 2 == 2 -assert 5 % 2 == 1 -``` - -#### Equality and Relational Operators - -KCL supports the meanings of equality and relational operators: - -```python -assert 2 == 2 -assert 2 != 3 -assert 3 > 2 -assert 2 < 3 -assert 3 >= 3 -assert 2 <= 3 -``` - -#### Logical Operators - -We can invert or combine boolean expressions using the logical operators e.g., `and` and `or`: - -```python -if not done and (col == 0 or col == 3): - # ...Do something... - -``` - -#### Bitwise and Shift Operators - -Here are examples of using bitwise and shift operators: - -```python -value = 0x22 -bitmask = 0x0f - -assert (value & bitmask) == 0x02 -assert (value & ~bitmask) == 0x20 -assert (value | bitmask) == 0x2f -assert (value ^ bitmask) == 0x2d -assert (value << 4) == 0x220 -assert (value >> 4) == 0x02 -``` - -The `|` operator likewise computes bitwise, unions basic types and unions collection and schema data, such as **list**, **dict** and **schema**. - -Computing bitwise examples: - -```python -0x12345678 | 0xFF # 0x123456FF -``` - -Unioning basic types examples: - -```python -schema x: - a: int | str # attribute a could be a int or string -``` - -#### Assignment Operators - -The following tokens serve as delimiters in the grammar: - -``` - ( ) [ ] { } - , : . ; = -> - += -= *= /= //= %= - &= ^= >>= <<= **= -``` - -The following examples use assignment and argument assignment operators: - -```python -_a = 2 -_a *= 3 -_a += 1 -assert _a == 7 -``` - -#### Identity Operators - -The following keywords serve as identity operators in the grammar: - -```python -is, is not -``` - -The identity operators check whether the right hand side and the left hand side are the very same object. They are usually used to check if some variable is `None/Undefined/True/False`. Here are some examples: - -```python -empty_String = "" -empty_String is not None # True -``` - -#### Membership Operators - -The following keywords serve as membership operators in the grammar: - -```python -in, not in -``` - -- The `in` operator reports whether its first operand is a member of its second operand, which must be a list, dict, schema, or string. -- The `not in` operator is its negation. Both return a Boolean. - -The meaning of membership varies by the type of the second operand: the members of a list are its elements; the members of a dict are its keys; the members of a string are all its substrings. - -```python -1 in [1, 2, 3] # True - -d = {one = 1, two = 2} -"one" in d # True -"three" in d # False -1 in d # False -[] in d # False - -"nasty" in "dynasty" # True -"a" in "banana" # True -"f" not in "way" # True - -d = Data {one = 1, two = 2} # Data is a schema with attributes one and two -"one" in d # True -"three" in d # False -``` - -#### Comprehension - -A comprehension constructs a new list or dictionary value by looping over one or more iterables and evaluating a body expression that produces successive elements of the result. - -We can declare list and dict by comprehension as: - -```python -listVar = [_x for _x in range(20) if _x % 2 == 0] # list comprehension -dictVar = {str(_i): 2 * _i for _i in range(3)} # dict comprehension -``` - -#### Other Operators - -We can: - -- Represents a function call with **()**, like `"{} {}".format("hello", world)` -- Refers to the value at the specified index in the list with **[]** -- Define a type hint with **:** -- Refers to a member field with **.** -- Use the line continuation symbol `\` to write long expressions - -```python -longString = "Too long expression " + \ - "Too long expression " + \ - "Too long expression " -``` - -### Expressions - -#### Conditional Expressions - -A conditional expression has the form `a if cond else b`. It first evaluates the condition `cond`. If it's true, it evaluates `a` and yields its value; otherwise, it yields the value of `b`. - -Examples: - -```python -x = True if enabled else False # If enabled is True, x is True, otherwise x is False -``` - -#### Index Expressions - -An index expression `a[i]` yields the `i` th element of an indexable type such as a string or list. The index `i` must be an `int` value in the range `-n` ≤ `i` < `n`, where `n` is `len(a)`; any other index results in an error. - -A valid negative index `i` behaves like the non-negative index `n+i`, allowing for convenient indexing relative to the end of the sequence. - -```python -val = "abc" -list = ["zero", "one", "two"] -str_0 = val[0] # "a" -str_1 = val[1] # "b" -str_n1 = val[-1] # "c" - -list_0 = list[0] # "zero" -list_1 = list[1] # "one" -list_n1 = list[-1] # "two" -``` - -An index expression `d[key]` may also be applied to a dictionary `d`, to obtain the value associated with the specified key. It returns `Undefined` if the dictionary contains no such key. - -An index expression appearing on the left side of an assignment causes the specified list or dictionary element to be updated: - -```python -d = {key1 = "value1", key2 = "value2"} -key1value = d["key1"] # value1 -key2value = d["key2"] # value2 -``` - -It is a dynamic error to attempt to update an element of an immutable type, such as a list or string, or a frozen value of a mutable type. - -#### Slice Expressions - -A slice expression `a[start:stop:step]` yields a new value containing a sub-sequence of `a`, which must be a string, or list. - -Each of the `start`, `stop`, and `step` operands is optional; if present, each must be an integer. The `step` value defaults to 1. If the step is not specified, the colon preceding it may be omitted too. It is an error to specify a step of zero. - -Conceptually, these operands specify a sequence of values `i` starting at start and successively adding 'step' until `i` reaches or passes `stop`. The result consists of the concatenation of values of `a[i]` for which `i` is valid. - -The effective start and stop indices are computed from the three operands as follows. Let `n` be the length of the sequence. - -```python -val = "abc" -len = len(val) -a = val[1:len] # "bc" (remove first element) -b = val[0:-1] # "ab" (remove last element) -c = val[1:-1] # "b" (remove first and last element) -``` - -```python -"abc"[1:] # "bc" (remove first element) -"abc"[:-1] # "ab" (remove last element) -"abc"[1:-1] # "b" (remove first and last element) -"banana"[1::2] # "aaa" (select alternate elements starting at index 1) -"banana"[4::-2] # "nnb" (select alternate elements in reverse, starting at index 4) -``` - -It's not allowed to define a slice expression as a left value in KCL. Cause list and string are immutable, re-slicing can directly operate to operand to ensure better performance. - -#### Function Invocations - -KCL allows calling built-in functions and functions from built-in and system modules. - -To call a function, the basic way is shown as the following code excerpt: - -```python -import math - -a = math.pow(2, 3) # 2 powers 3 is 8. -b = len([1, 2, 3]) # the length of [1, 2, 3] is 3 -``` - -As you can see, arguments are separated with `,`, and KCL also supports positional arguments and key-value arguments. - -```python -print("hello world", end="") -``` - -Note that: - -- Some functions have parameters with default values. -- Some functions accept variadic arguments. - -When an argument is not supplied for a parameter without a default value, an error will be reported. - -#### Selector Expressions - -A selector expression selects the attribute or method of the value. KCL provides a wealth of ways to identify or filter attributes. - -`x.y` - -- dict: it denotes the value of the key `y` in the dict `x` -- schema: it denotes the attribute value of a schema `x` identified by `y` -- package: it denotes the identifier of a package `x` identified by `y` - -Examples: - -```python -schema Person: - name: str - age: int - -person = Person { - name = "Alice" - age = 18 -} -name = person.name # "Alice" -age = person.age # 18 - -myDict = { - key = "value" -} -result = myDict.key # "value" -``` - -`x?.y` - -`x` can be a schema instance or a dict. This is extremely helpful when the value of `x` might be `None` or when the key `y` might not exist in `x`. - -```python -# Example of dict: -data = {key = "value"} -a = data?.key # "value" -b = data?.name # Undefined - -# example of schema instance: -schema Company: - name: str - address: str - -schema Person: - name: str - job?: Company - -alice = Person { - name = "alice" -} - -if alice?.job?.name == "Group": - print("work in Group") -``` - -#### Quantifier Expressions - -Quantifier expressions act on collection: list or dict, generally used to obtain a certain result after processing the collection, mainly in the following four forms: - -- **all** - - Used to detect that all elements in the collection satisfy the given logical expression, and return a boolean value as the result. - - Only when all elements in the collection satisfy the expression true, the `all` expression is true, otherwise it is false. - - If the original collection is empty, return true. - - Supports short-circuiting of logical expressions during expression execution. -- **any** - - Used to detect that at least one element in the collection satisfies the given logical expression, and returns a boolean value as the result. - - When at least one element in the collection satisfies the expression true, the `any` expression is true, otherwise it is false. - - If the original collection is empty, return false. - - Supports short-circuiting of logical expressions during expression execution. -- **map** - - Generate a new **list** by mapping the elements in the original collection. - - The length of the new list is exactly the same as the original collection. -- **filter** - - By logically judging and filtering the elements in the original collection, and returning the filtered sub-collection. - - Only when the element judges the expression to be true, it is added to the sub-collection. - - The type (list, dict and schema) of the new collection is exactly the same as the original collection, and the length range is `[0, len(original-collection)]`. - -**all** and **any** expression sample codes: - -```python -schema Config: - volumes: [{str:}] - services: [{str:}] - - check: - all service in services { - service.clusterIP == "NONE" if service.type == "ClusterIP" - }, "invalid cluster ip" - - any volume in volumes { - volume.mountPath in ["/home/admin", "/home/myapp"] - } -``` - -**map** and **filter** expression sample codes: - -```python -a = map e in [{name = "1", value = 1}, {name = "2", value = 2}] { - {name = e.name, value = int(e.value) ** 2} -} # [{"name": "1", value: 1}, {"name": "2", "value": 4}] - -b = map k, v in {a = "foo", b = "bar"} { v } # ["foo", "bar"] - -c = filter e in [{name = "1", value = 1}, {name = "2", value = 2}] { - int(e.value) > 1 -} # [{"name": "2", "value": 2}] - -d = filter _, v in {a = "foo", b = "bar"} { - v == "foo" -} # {"a": "foo"} -``` - -Please pay attention to distinguish the difference between any expression and any type. When `any` is used in type annotations, it means that the value of the variable is arbitrary, while the any expression means that one of the elements in a set satisfies the condition. - -### Control Flow Statements - -#### If and Else - -KCL supports `if` statements with optional `elif` and `else` statements, as the next sample shows. - -```python -a = 10 -if a == 0: - print("a is zero") -elif a < 100: - print("a < 100") - print("maybe a is negative") -else: - print("a >= 100") -``` - -The `elif` example: - -```python -_result = 0 -if condition == "one": - _result = 1 -elif condition == "two": - _result = 2 -elif condition == "three": - _result = 3 -else: - _result = 4 -``` - -`if-elif-else` statements can be nested. For example: - -```python -a = 10 -if a == 0: - print("a is zero") -elif a < 100: - print("a < 100") - if a < 0: - print("a is negative") - print("No matter a is negative or positive, this message is printed") -else: - print("a >= 100") -``` - -In addition, for simple `if` statements as follows: - -```python -if success: - _result = "success" -else: - _result = "failed" -``` - -We can have it in one line using the ` if else ` pattern: - -```python -_result = "success" if success else "failed" -``` - -An `if` or `elif` statement evaluates a given expression. When the expression is evaluated to `True`, a list of statements following `:` are executed and when the expression is evaluated to `False` and statements will not be executed. - -Please note that the false name constant `False`, `None`, the zero number `0`, the empty list `[]`, the empty dict `{}` and the empty string `""` are all seen as `False` expressions. - -```python -_emptyStr = "" -_emptyList = [] -_emptyDict = {} -isEmptyStr = False if _emptyStr else True -isEmptyList = False if _emptyList else True -isEmptyDict = False if _emptyDict else True -``` - -The output is - -```yaml -isEmptyStr: true -isEmptyList: true -isEmptyDict: true -``` - -### Assert - -When errors happen, developers should be able to detect the error and abort execution. Thus, KCL introduce the `assert` syntax. The following is an example: - -```python -a = 1 -b = 3 -# a != b evaluates to True, therefore no error should happen. -assert a != b -# a == b is False, in the reported error message, the message "SOS" should be printed. -assert a == b, "SOS" -``` - -In addition, we can declare a condition for the assert statement and make an assertion when the condition is met. The usual way of writing is - -```python -a = None -if a: - assert a > 2: -``` - -In KCL, it can also be simplified to the following form using the **if** expression to compose more complex conditional assert logic: - -```python -a = None -assert a > 2 if a -``` - -### Function - -KCL supports using the lambda keyword to define a function. - -```python -func = lambda x: int, y: int -> int { - x + y -} -a = func(1, 1) # 2 -``` - -- The value of the last expression is used as the return value of the function, and the empty function body returns `None`. -- The return value type annotation can be omitted, and the return value type is the type of the last expression value. -- There is no order-independent feature in the function body, all expressions are executed in order. - -```python -_func = lambda x: int, y: int -> int { - x + y -} # Define a function using the lambda expression -_func = lambda x: int, y: int -> int { - x - y -} # Ok -_func = lambda x: int, y: int -> str { - str(x + y) -} # Error (int, int) -> str can't be assigned to (int, int) -> int -``` - -The function type variables cannot participate in any calculations and can only be used in assignment statements and call statements. - -```python -func = lambda x: int, y: int -> int { - x + y -} -x = func + 1 # Error: unsupported operand type(s) for +: 'function' and 'int(1)' -``` - -The lambda function supports the capture of external variables, which can be passed as parameters of other functions. - -```python -a = 1 -func = lambda x: int { - x + a -} -funcOther = lambda f, para: int { - f(para) -} -r0 = funcOther(func, 1) # 2 -r1 = funcOther(lambda x: int { - x + a -}, 1) # 2 -``` - -The output is - -```yaml -a: 1 -r: 2 -``` - -Further, we can define an anonymous function through lambda expression and call it. - -```python -result = (lambda x, y { - z = 2 * x - z + y -})(1, 1) # 3 -``` - -We can also use anonymous functions in the for loop. - -```python -result = [(lambda x, y { - x + y -})(x, y) for x in [1, 2] for y in [1, 2]] # [2, 3, 3, 4] -``` - -Note that the functions defined in the KCL are pure functions: - -- The return result of a function depends only on its arguments. -- There are no side effects in the function execution process. - -Therefore, KCL functions cannot modify external variables, but can only reference external variables. For example, the following code will cause an error: - -```python -globalVar = 1 -func = lambda { - x = globalVar # Ok - globalVar = 1 # Error -} -``` - -### Type System - -#### Type Annotation - -Type annotations can be used on top level variables, schema attributes and arguments. - -- An attribute can be of a basic type, such as a string (`str`), a floating-point number (`float`), a fixed-point number (`int`) or a boolean (`bool`). -- An attribute can be a literal type, such as a string literal (`"TCP"` and `"UDP"`), a number literal (`"1"` and `"1.2"`), a boolean literal (`True` and `False`) -- An attribute can also be a list or an ordinary dict: - - A list with unspecified type of elements is `[]`. - - A list with elements of type `t` is `[t]`. Here `t` is another type. - - A dict with keys of type `kt` and values of type `vt` is `{kt:vt}`. - - `kt`, `vt` or both of them can be missing, like a list with unspecified type of elements. -- An attribute can be a **union type** defined by `|`, such as `a | b`, which means the type of the member could be a or b. - - A union type can include any types of `int`, `str`, `float`, `bool`, `list`, `dict`, literal and schema type, and supports type nesting e.g. `{str:str|int}`, `[[int|str]|str|float]` and `2 | 4 | 6`, etc. -- An attribute can also be of a type generated from other schema. In such a case, the name of the other schema (including the package path prefix) is used as the type name. -- An attribute can annotated an any type e.g., `any`. - -Examples: - -- Basic type - -```python -"""Top level variable type annotation""" -a: int = 1 # Declare a variable `a` that has the type `int` and the value `1` -b: str = "s" # Declare a variable `b` that has the type `str` and the value `"s"` -c: float = 1.0 # Declare a variable `c` that has the type `float` and the value `1.0` -d: bool = True # Declare a variable `d` that has the type `bool` and the value `True` -``` - -- List/Dict/Schema Type - -```python -schema Person: - name: str = "Alice" - age: int = 10 - -a: [int] = [1, 2, 3] # Declare a variable `a` that has the list type `[int]` and the value `[1, 2, 3]` -b: {str:str} = {k1 = "v1", k2 = "v2"} # Declare a variable `b` that has the dict type `{str:str}` and the value `{k1 = "v1", k2 = "v2"}` -c: Person = Person {} # Declare a variable `c` that has the schema type `Person` and the value `Person {}` -``` - -- Union Type - -```python -# Basic union types -schema x[argc: int]: # Schema argument type annotation - p: int | str # Schema attribute type annotation -``` - -```python -# Literal union types -schema LiteralType: - # String literal union types, x_01 can be one of "TCP" and "UDP" - x_01: "TCP" | "UDP" - # Number literal union types, x_02 can be one of 2, 4, and 6 - x_02: 2 | 4 | 6 - # Unit union types, x_03 can be one of 1Gi, 2Gi and 4Gi - x_03: 1Gi | 2Gi | 4Gi - -x = LiteralType { - x_01 = "TCP" - x_02 = 2 - x_03 = 1Gi -} -``` - -The compiler throws an error when the value of a property does not conform to the union type definition: - -```python -# Literal union types -schema LiteralType: - # String literal union types, x_01 can be one of "TCP" and "UDP" - x_01: "TCP" | "UDP" - -x = LiteralType { - x_01 = "HTTP" # Error: the type got is inconsistent with the type expected, expect str(TCP)|str(UDP), got str(HTTP) -} -``` - -- Any Type - -```python -# Any type -schema Config: - literalConf: any = 1 - dictConf: {str:any} = {key = "value"} - listConf: [any] = [1, "2", True] - -config = Config {} -``` - -In KCL, changing the type of a variable is not allowed. If the type is not satisfied when reassigning the value, the type error will be raised. - -```python -_a = 1 # The type of `_a` is `int` -_a = "s" # Error: expect int, got str(s) -``` - -The type of a variable can be assigned to its upper bound type, but cannot be assigned to its specialized type. - -`None` and `Undefined` can be assigned to any type: - -- All types can be assigned to `any` type, `None` and `Undefined` can be assigned to `any` type. - -```python -a: int = None -b: str = Undefined -c: any = 1 -d: any = "s" -e: any = None -``` - -- The `int` type can be assigned to the `float` type, and the `float` type cannot be assigned to the `int` type. - -```python -a: float = 1 -b: int = 1.0 # Error: expect int, got float(1.0) -``` - -- The `int` type can be assigned to the `int|str` type, and the `int|str` type cannot be assigned to the `int` type. - -```python -a: int | str = 1 -b: int = 1 if a else "s" # Error: expect int, got int(1)|str(s) -``` - -Note that although the any type is provided in the KCl, it is still a static type, and the types of all variables are immutable during compilation. - -#### Type Inference - -If a variable or constant declaration in the top level or in the schema is not annotated explicitly with a type, the declaration's type is inferred from the initial value. - -- Integer literals are inferred to type `int`. - -```python -a = 1 # The variable `a` has the type `int` -``` - -- Float literals are inferred to type `float`. - -```python -a = 1.0 # The variable `a` has the type `float` -``` - -- String literals are inferred to type `str`. - -```python -a = "s" # The variable `a` has the type `str` -``` - -- Boolean literals are inferred to type `bool` - -```python -a = True # The variable `a` has the type `bool` -b = False # The variable `b` has the type `bool` -``` - -- `None` and `Undefined` are inferred to type `any` - -```python -a = None # The variable `a` has the type `any` -b = Undefined # The variable `b` has the type `any` -``` - -- List literals are inferred based on the elements of the literal, and to be variable-size. - -```python -a = [1, 2, 3] # The variable `a` has the type `[int]` -b = [1, 2, True] # The variable `b` has the list union type `[int|bool]` -c = ["s", 1] # The variable `c` has the list union type `[int|str]` -``` - -Please note that a empty list will be inferred to `[any]` - -```python -a = [] # The variable `a` has the type `[any]` -``` - -- Dict literals are inferred based on the keys and values of the literal, and to be variable-size. - -```python -a = {key = "value"} # The variable `a` has the type `{str:str}` -b = {key = 1} # The variable `b` has the type `{str:int}` -c = {key1 = 1, key2 = "s"} # The variable `c` has the type `{str:int|str}` -``` - -Please note that a empty dict will be inferred to `{any:any}` - -```python -a = {} # The variable `a` has the type `{any:any}` -``` - -- The type of the if conditional expression carrying the runtime value will be statically inferred as a union type of all possible results. - -```python -a: bool = True # The variable `a` has the type `bool` -b = 1 if a else "s" # The variable `b` has the type `int|str` -``` - -When a variable is deduced to a certain type, its type cannot be changed - -```python -_a = 1 -_a = "s" # Error: expect int, got str(1) -``` - -#### Type Alias - -We can use the `type` keyword to declare a type alias for all types in KCL to simplify the writing and use of complex types. - -```python -type Int = int -type String = str -type StringOrInt = String | Int -type IntList = [int] -type StringAnyDict = {str:} -``` - -We can import a type through import and define an alias for it. - -```py -import pkg - -type Data = pkg.Data -``` - -In addition, we can use type aliases and union types to achieve similar enumeration functions. - -```python -# A type alias of string literal union types -type Color = "Red" | "Yellow" | "Blue" - -schema Config: - color: Color = "Red" # The type of color is `"Red" | "Yellow" | "Blue"`, and it has an alias `Color`, whose default value is `"Red"` - -config = Config { - color = "Blue" -} -``` - -The output YAML is - -```yaml -config: - color: Blue -``` - -Please note that the type alias name cannot be one of `any`, `int`, `float`, `bool` and `str` because of ambiguity. - -```python -type any = int | str # Error -type int = str # Error -type float = int # Error -type bool = True # Error -type str = "A" | "B" | "C" # Error -``` - -#### Type Guards - -KCL supports the `typeof` function which can give very basic information about the type of values we have at runtime. In KCL, checking against the value returned by `typeof` is a type guard. KCL expects this to return a certain set of strings: - -Example: - -```python -import sub as pkg - -_a = 1 - -t1 = typeof(_a) -t2 = typeof("abc") - -schema Person: - name?: any - -_x1 = Person {} -t3 = typeof(_x1) - -_x2 = pkg.Person {} -t4 = typeof(_x2) -t5 = typeof(_x2, full_name=True) - -t6 = typeof(_x1, full_name=True) - -# Output -# t1: int -# t2: str -# t3: Person -# t4: Person -# t5: sub.Person -# t6: __main__.Person -``` - -In addition, we can use the `as` keyword in conjunction with type guards to complete defensive type conversion programming. - -Only types with partial order can be downcast converted, the use of the as keyword is as follows: - -- Basic types of partial order relations, e.g., `float -> int` -- Partial order relation of union type, e.g., `int | str -> str` and `[int | str] -> [str]` -- Contains the partial order relation of the upper bound of the type, e.g., `any -> int` -- Partial order relationship of structure type, e.g., `base-schema -> sub-schema` - -```python -schema Data1: - id?: int - -schema Data2: - name?: str - -data: Data1 | Data2 = Data1 {} - -if typeof(a) == "Data1": - data1 = data as Data1 # The type of `data1` is `Data1` -elif typeof(a) == "Data2": - data2 = data as Data2 # The type of `data2` is `Data2` -``` - -When a runtime error occurs in the `as` type conversion, a runtime error is thrown. - -```python -a: any = "s" -b: int = a as int # Error: The `str` type cannot be converted to the `int` type -``` - -If we don’t want to throw a runtime error, we can use the type guard for defensive coding with `if` expressions. - -```python -a: any = "s" -b = a as int if typeof(a) == "int" else None # The type of b is `int` -``` - -Note that the `as` conversion of literal type and union type is not supported, because they are not a certain runtime object, only int, float and other objects at runtime, there is no int literal, float literal object, and no union object. - -### Schema - -#### Overview - -A schema is a language element to define a complex configuration. -We can define typed attributes, initialization assignment, and verification rules. In addition, KCL supports schema single inheritance, mixin and protocol to realize the reuse of complex configuration. - -#### Basic - -##### Attribute - -The followings are some basic examples: - -```python -# A person has a first name, a last name and an age. -schema Person: - firstName: str - lastName: str - # The default value of age is 0 - age: int = 0 -``` - -In KCL, we can use type annotations to define some attributes in the schema, each attribute can be set with an optional default value (such as the `age` attribute in the above code, its default value is `0`), attributes that are not set default values have an initial value of `Undefined`, which are not output in YAML. - -Note, the immutability of attributes in the schema follows the same rules as the immutability of global variables, only mutable attributes in the schema can be modified in the schema. - -```python -schema Person: - age: int = 1 # Immutable attribute - _name: str = "Alice" # Mutable attribute - - age = 10 # Error - _name = "Bob" # Ok -``` - -###### Optional Attribute - -Each attribute **must** be assigned with a not `None`/`Undefined` value as a schema instance unless it is modified by a question mark as an optional attribute. - -Examples: - -```python -schema Employee: - bankCard: int # bankCard is a required attribute, and it can NOT be None or Undefined - nationality?: str # nationality is an optional attribute, and it can be None or Undefined - -employee = Employee { - bankCard = None # Error, attribute 'bankCard' of Employee is required and can't be None or Undefined - nationality = None # Ok -} -``` - -##### Irrelevant Order Calculation - -The irrelevant order calculation in the schema indicates the reference relationship between the internal attributes of the schema. For example, when we declare an expression of the form `a = b + 1`, the calculation of the value of `a` depends on the calculation of the value of `b`. When the compiler calculate the value of `a` and the value of `a` depends on the value of `b`, the compiler will choose to first calculate the value of `b`, and then calculate the value of a according to the expression `a = b + 1`, which is slightly different from the calculation method of traditional procedural language the difference. - -Since the calculation of values in the schema is based on dependencies, just like a directed acyclic graph traverses each node in the graph according to the order of topological sorting, the order of declaration of attributes in the schema is not so important, so the feature is called the irrelevant order calculation. - -Please note that there can be no circular references between different schema attribute values. - -We can see this feature through the following examples. - -```python -schema Fib: - n1: int = n - 1 # Refers to the attribute `n` declared after `n1` - n2: int = n1 - 1 - n: int - value: int = 1 if n <= 2 else Fib {n = n1}.value + Fib {n = n2}.value - -fib8 = Fib {n = 8}.value -``` - -The output is - -```yaml -fib8: 21 -``` - -We can see that in the schema, we only need to simply specify the dependency between attributes, and the compiler will automatically calculate the value based on the dependency, which can help us save a lot of boilerplate code and reduce configuration difficulty of writing. - -##### Schema Context - -We can define the context of the schema to manage the attributes of the schema, and we can write schema parameters, temporary variables and expressions directly in the schema: - -```python -schema Person[_name: str]: # define a schema argument - name: str = _name # define a schema attribute - age: int = 10 # define a schema attribute with default value - hands: [int] = [i for i in [1, 2, 3]] # define a for statement -``` - -##### Validation - -In addition to using **static typing** (the type annotation) and **immutability** in KCL schema mentioned earlier to ensure code stability, a bunch of validation rules are supported in a simple **check** block (KCL supports almost all authentication capabilities of [OpenAPI](https://www.openapis.org/)): - -```python -import regex - -schema Sample: - foo: str - bar: int - fooList: [str] - - check: - bar > 0 # minimum, also support the exclusive case - bar < 100 # maximum, also support the exclusive case - len(fooList) > 0 # min length, also support exclusive case - len(fooList) < 100 # max length, also support exclusive case - regex.match(foo, "^The.*Foo$") # regex match - isunique(fooList) # unique - bar in range(100) # range - bar in [2, 4, 6, 8] # enum - multiplyof(bar, 2) # multipleOf -``` - -With the schema, all instances will be validated at compile time - -```python -# Ok -goodSample = Sample { - foo = "The Foo" - bar = 2 - fooList = ["foo0", "foo1"] -} - -# Error: validation failure: Check failed on check conditions: bar < 100. -badSample = Sample { - foo = "The Foo" - bar = 123 - fooList = ["foo0", "foo1"] -} -``` - -In addition, we can use **and**, **or**, **if** to compose more complex conditional check logic: - -```python -schema Sample: - bar: int - foo: str - doCheck: bool - - check: - regex.match(foo, "^The.*Foo$") and bar in [2, 4, 6, 8] if doCheck -``` - -In order to ensure that all check rules can play their corresponding roles well, we can test the rationality and correctness of different data combinations by writing KCL test cases, and run all test cases through the kcl test tool. - -##### Documents - -Usually after we write the schema model, we will write documentation comments for the schema, which can be completed by using a three-quoted string as follows: - -```python -schema Server: - """Server is the common user interface for long-running - services adopting the best practice of Kubernetes. - - Attributes - ---------- - workloadType : str, default is Deployment - Use this attribute to specify which kind of long-running service you want. - Valid values: Deployment, CafeDeployment. - See also: kusion_models/core/v1/workload_metadata.k. - name : str, default is None - A Server-level attribute. - The name of the long-running service. - See also: kusion_models/core/v1/metadata.k. - labels : {str:str}, optional, default is None - A Server-level attribute. - The labels of the long-running service. - See also: kusion_models/core/v1/metadata.k. - - Examples - ---------------------- - myCustomApp = AppConfiguration { - name = "componentName" - } - """ - workloadType: str = "Deployment" - name: str - labels?: {str:str} -``` - -##### Config - -Suppose we have the following schema definition: - -```python -schema Person: - firstName: str - lastName: str -``` - -A config could be defined with a JSON-like expression: - -```python -person = Person { - firstName = "firstName" - lastName = "lastName" -} -``` - -At the same time, the schema adheres to strict attribute definitions, -and configuring undefined attributes will trigger a compilation error. - -```python -person = Person { - firstName = "firstName" - lastName = "lastName" - fullName = "fullName" # Error: Cannot add member 'fullName' to schema 'Person', 'fullName' is not defined in schema 'Person' -} -``` - -We can use `if expressions` to dynamically add elements to the schema config, elements that meet the conditions are added to the schema config, and elements that do not meet the conditions are ignored. Besides, the **config selector expressions** can be used to init a schema instance. - -```python -schema Base: - count: int - value: str - -schema Person: - base: Base - labels: {str:str} - name?: str - -env = "prod" - -person1 = Person { - base.count = 2 # Config selector expression - base.value = "value" # A schema variable in schema can use selector expressions - labels.key = "value" # A dict variable in schema can use selector expressions -} - -person2 = Person { - base = { - count = 1 - value = "value" - } - labels.key = "value" - if env == "prod": - labels.env = env - else: - labels.env = "other" -} -``` - -The output YAML is - -```yaml -person1: - base: - count: 2 - value: value - labels: - key: value -person2: - base: - count: 1 - value: value - labels: - key: value - env: prod -``` - -When we instantiate a schema without config parameters, we can generate schema instances in the following three forms: - -```python -schema Data: - id: int = 1 - -data1 = Data {} -data2 = Data() {} -data3 = Data() -``` - -In addition to using a schema type to instantiate a schema, we can also use a schema instance to get a new instance using the config expression. - -```python -schema Config: - id: int - values: [int] - -configOrigin = Config { - id = 1 - values = [0, 1] -} -configNew = configOrigin { - id = 2 - values += [2, 3] -} -``` - -The output is - -```yaml -configOrigin: - id: 1 - values: - - 0 - - 1 -configNew: - id: 2 - values: - - 0 - - 1 - - 2 - - 3 -``` - -In addition, schema attribute default values can be modified by schema config. - -```python -schema Person: - age: int = 1 - name: str = "Alice" - - age = 2 # Error, can't change the default value of the attribute `age` in the schema context - -person = Person { - age = 3 # Ok, can change the default value of the attribute `age` in the schema config -} -``` - -#### Advanced - -##### Protocol & Mixin - -In addition to schema, an additional type definition method `protocol` is provided in KCL, and its properties are as follows: - -- In a protocol, only attributes and their types can be defined, complex logic and check expressions cannot be written, and mixins cannot be used. -- A protocol can only constrain properties that do not start with `_`. -- A protocol can only inherit or refer to other protocols, but cannot inherit or refer to other schemas. - -Besides, we can declare a complex assembly schema with optional **mixin** support and use **protocol** to add an optional host type to the dynamically inserted **mixin**.: - -```python -schema Person: - mixin [FullNameMixin] - - firstName: str # Required - lastName: str # Required - fullName?: str # Optional -``` - -A fullName mixin which generates a fullName as a simple sample: - -```python -protocol PersonProtocol: - firstName: str - lastName: str - fullName?: str - -mixin FullNameMixin for PersonProtocol: - fullName = "{} {}".format(firstName, lastName) -``` - -Then we can get the schema instance by: - -```python -person = Person { - firstName = "John" - lastName = "Doe" -} -``` - -The output is - -```yaml -person: - firstName: John - lastName: Doe - fullName: John Doe -``` - -Please note that the host type **protocol** can only be used for **mixin** definitions (the suffix name is `Mixin`), otherwise an error will be reported. - -```python -protocol DataProtocol: - data: str - -schema Data for DataProtocol: # Error: only schema mixin can inherit from protocol - x: str = data -``` - -##### Index Signature - -Index signatures can be defined in the KCL schema, and it means that the key-value constraints of the index signature can be used to construct a dict with the schema type, or additional checks can be added to the schema attributes to enhance the KCL type and semantic checks. - -- Use the form `[{attr_alias}: {key_type}]: {value_type}` to define an index signature in the schema, and `{attr_alias}` can be omitted. - -```python -schema Map: - """ - Map is a schema with a key of str type and a value of str type - """ - [str]: str # `{attr_alias}` can be omitted. - -data = Map { - key1 = "value1" - key2 = "value2" -} -``` - -- Mandatory all attributes of the schema key and value types - -```python -schema Person: - name: str - age: int # error, conflicts with the index signature definition `[str]: str` - [str]: str # The values of all attributes of the schema can only be strings -``` - -- Mandatory all attribute key and value types are defined in the schema, which is equivalent to restricting all attribute types except the additional attributes. - -```python -schema Person: - name: str - age: int - [...str]: str # Except for the `name` and `age` attributes, the key type of all other attributes of the schema must be `str`, and the value type must also be `str`. -``` - -- Define the index signature attribute alias and use it with the check block. - -```python -schema Data: - [dataName: str]: str - check: - dataName in ["Alice", "Bob", "John"] - -data = Data { - Alice = "10" - Bob = "12" - Jonn = "8" # Error: Jonn not in ["Alice", "Bob", "John"] -} -``` - -```python -import regex - -schema DataMap: - [attr: str]: str - check: - regex.match(attr, r'[-._a-zA-Z0-9]+') - -data = DataMap { - key1 = "value1" - "foo.bar" = "value2" # check error -} -``` - -##### Inheritance - -Like some other object-oriented languages, KCL provides fundamental but limited object-oriented support, such as **attribute reuse**, **private and public variables**, and **single inheritance**. Besides, KCL does NOT support multiple inheritances for the schema. - -The following is an example of schema inheritance: - -```python -# A person has a first name, a last name and an age. -schema Person: - firstName: str - lastName: str - # The default value of age is 0 - age: int = 0 - -# An employee **is** a person, and has some additional information. -schema Employee(Person): - bankCard: int - nationality?: str - -employee = Employee { - firstName = "Bob" - lastName = "Green" - age = 18 - bankCard = 123456 -} -``` - -The output is - -```yaml -employee: - firstName: Bob - lastName: Green - age: 18 - bankCard: 123456 - nationality: null -``` - -Please note that KCL only allows **single inheritance** on schemas. - -In addition, when the schema has an inheritance relationship, the properties of optional attributes are as follows: - -- If the attribute is optional in the base schema, it could be optional or required in the sub-schema. -- If the attribute is required in the base schema, it must be required in the sub-schema. - -```python -schema Person: - bankCard?: int - nationality: str - -schema Employee(Person): - bankCard: int # Valid, both `bankCard: int` and `bankCard?: int` are allowed - nationality?: str # Error, only `nationality: str` is allowed -``` - -##### Schema Function - -Schema map very nicely onto functions; it can have any number of input and output parameters. For example, the Fibonacci function can be written as follows using the recursive schema config: - -```python -schema Fib[n: int]: - n1 = n - 1 - n2 = n - 2 - if n == 0: - value = 0 - elif n == 1: - value = 1 - else: - value = Fib(n1).value + Fib(n2).value - -fib8 = Fib(8).value # 21 -``` - -##### Decorators - -Just like Python, KCL supports the use of decorators on the schema. KCL Decorators dynamically alter the functionality of a schema without having to directly use sub schema or change the source code of the schema being decorated. And like a function call, the decorator supports passing in additional parameters. - -Built-in decorators of schema - -- `@deprecated` - Mark whether a schema or schema attribute is deprecated. The `@deprecated` decorator supports three parameters: - - **version** - string type, indicating the version information. The default value is empty. - - **reason** - string type, indicating the deprecated reason. The default value is empty. - - **strict** - bool type, indicating whether to report an error or warning. The default value is true. If `strict` is `True` and the error is thrown, the program will be interrupted. If `strict` is `False`, a warning will be output and the program will not be interrupted. - -Examples: - -```python -@deprecated -schema ObsoleteSchema: - attr: str - -schema Person: - name: str = "John" - attrs: ObsoleteSchema = { - attr = "value" - } - -person = Person {} # Error: ObsoleteSchema was deprecated -``` - -```python -schema Person: - firstName: str = "John" - lastName: str - @deprecated(version="1.16", reason="use firstName and lastName instead", strict=True) - name: str - -JohnDoe = Person { # Error: name was deprecated since version 1.16, use firstName and lastName instead - name = "deprecated" -} -``` - -Note that the current version of KCL does not yet support user-defined decorators. - -##### Members - -Built-in functions and members of schema - -- instances() - Return the list of existing instances of a schema. - -```python -schema Person: - name: str - age: int - -alice = Person { - name = "Alice" - age = 18 -} - -bob = Person { - name = "Bob" - age = 10 -} - -aliceAndBob = Person.instances() # Person is a schema type, instances() is its member method -``` - -The output is - -```yaml -alice: - name: Alice - age: 18 -bob: - name: Bob - age: 10 -aliceAndBob: -- name: Alice - age: 18 -- name: Bob - age: 10 -``` - -### Config Operations - -#### Config Unification - -##### | Operators - -In KCL, we can use the union operator `|` to achieve the merging of configurations, the types supported by the union operator are as follows: - -``` -SchemaInstance | SchemaInstance -SchemaInstance | Dict -Dict | Dict -List | List -``` - -Unioning collection and schema data: - -- Unioning List. Overwrite the list expression on the right side of the operator `|` to the list variable on the left side of the operator one by one according to the **index**. - -```python -_a = [1, 2, 3] -_b = [4, 5, 6, 7] -x = _a | _b # [4, 5, 6, 7] 1 -> 4; 2 -> 5; 3 -> 6; Undefined -> 7 -``` - -Unioning to the specific index or all elements is still under discussion. - -- Unioning Dict. Union the dict expression on the right side of the operator `|` one by one to the dict variable on the left side of the operator according to the **key** - -```python -_a = {key1 = "value1"} -_b = {key1 = "overwrite", key2 = "value2"} -x = _a | _b # {"key1": "overwrite", "key2": "value2"} -``` - -The union of collection and schema is a new one whose attributes are unioning b to a, preserving the order of the attributes of the operands, left before right. - -- Unioning Schema. The union operation for schema is similar to dict. - -Schema union could be done as: - -```python -schema Person: - firstName?: str - lastName?: str - -_a = Person { - firstName = "John" -} -_b = {lastName = "Doe"} -_c = _a | _b # {"firstName": "John", "lastName": "Doe"} -_d = _a | None # {"firstName": "John"} -_e = _a | Undefined # {"firstName": "John"} -_f = None | _a # {"firstName": "John"} -_g = Undefined | _a # {"firstName": "John"} -``` - -Please note that when one of the left and right operands of the union operator is None, the other operand is returned immediately. - -```python -data1 = {key = "value"} | None # {"key": "value"} -data2 = None | [1, 2, 3] # [1, 2, 3] -data3 = None | None # None -``` - -The output is - -```yaml -data1: - key: value -data2: -- 1 -- 2 -- 3 -data3: null -``` - -##### : Operators - -Pattern: `identifier : E` or `identifier : T E` - -The value of the expression `E` with optional type annotation `T` will be unioned into the element value. - -Examples: - -```python -data = { - labels: {key1: "value1"} - # union {key2: "value2"} into the attribute labels. - labels: {key2: "value2"} -} -``` - -Output: - -```yaml -data: - labels: - key1: value1 - key2: value2 -``` - -In addition to using attribute operators on the schema config attributes, variables inside and outside the schema can use attribute operators to perform different operations on the configuration. - -- Using `:` outside the schema - -```python -schema Data: - d1?: int - d2?: int - -schema Config: - data: Data - -# This is one configuration that will be merged. -config: Config { - data.d1 = 1 -} -# This is another configuration that will be merged. -config: Config { - data.d2 = 2 -} -``` - -Its equivalent configuration code can be expressed as - -```python -schema Data: - d1?: int - d2?: int - -schema Config: - data: Data - -config: Config { - data.d1 = 1 - data.d2 = 1 -} -``` - -The output is - -```yaml -config: - data: - d1: 1 - d2: 1 -``` - -- Using `:` inside the schema - -```python -schema Data: - d1?: int - d2?: int - -schema Config: - # This is one configuration that will be merged. - data: Data { - d1 = 1 - } - # This is another configuration that will be merged. - data: Data { - d2 = 1 - } - -config: Config {} -``` - -#### Config Override - -##### = Operators - -Pattern: `identifier = E` or `identifier : T = E` - -The value of the expression `E` with optional type annotation `T` will override the attribute value. - -Examples: - -```python -schema Data: - labels: {str:} = {key1 = "value1"} - -data = Data { - # override {key2: "value2"} into the attribute labels of the schema Data. - labels = {key2 = "value2"} -} -``` - -Output: - -```yaml -data: - labels: - key2: value2 -``` - -Note: - -- Especially, we can "delete" its content by overriding the attribute to `Undefined`, such as `{ a = Undefined }`. - -#### Insert - -##### += Operators - -Pattern: `identifier += E` or `identifier : T += E` - -Insert only works for list type `identifier`. - -`E` will be inserted just after the specified index of the list `identifier`, and the following attributes after the index will be automatically shifted. - -Examples: - -```python -schema Data: - labels: {str:} = {key1 = [0]} - -data = Data { - # insert [1] into the attribute labels.key1 of the schema Data. - labels: {key1 += [1]} -} -``` - -Output: - -```yaml -data: - labels: - key1: - - 0 - - 1 -``` - -If no index is specified, the last index will be used. - -#### Notice - -Please note that the calculations of the `=` and `+=` attribute operators of the same attribute are sequential, and the latter ones have a higher priority. - -```python -x = { - a = 1 # 1 -} | { - a = 2 # 1 -> 2 -} | { - a = 3 # 2 -> 3 -} # The final value of attribute `a` is 3 -``` - -Please note that the `:` attribute operator represents an idempotent merge operation, and an error will be thrown when the values that need to be merged conflict. - -Therefore, when we need a configuration to override or add and delete operations, it is best to use the `=` and `+=` operators - -```python -data0 = {id: 1} | {id: 2} # Error:conflicting values between {'id': 2} and {'id': 1} -data1 = {id: 1} | {id = 2} # Ok, the value of `data` is {"id": 2} -``` - -The check rules for `:` operator for KCL value conflicts are as follows: - -- For `None` and `Undefined` variables, they do not conflict with any value. - -```python -data0 = None | {id: 1} # Ok -``` - -- For `int`, `float`, `str` and `bool` types, when their values are different, they are considered as conflicts. - -```python -data0 = 1 | 1 # Ok -data1 = 1 | "s" # Error -``` - -- For list type - - When their lengths are not equal, they are regarded as conflicts. - - When their lengths are equal, as long as there is a conflict in the value of a child element, it is regarded as a conflict. - -```python -data0 = [1] | [1] # Ok -data1 = [1, 2] | [1] # Error -``` - -- For dict/schema type - - When the values of the same key conflict, they are regarded as conflicts - -```python -data0 = {id: 1} | {id: 1} # Ok -data1 = {id: 1} | {id: 2} # Error -data1 = {id: 1} | {idAnother: 1} # Ok -``` - -### Rule - -In addition to using the check keyword for verification and writing in the schema, KCL also supports the use of the `rule` keyword to define a set of rules for policy verification - -The KCL rule is the same as the schema/mixin/protocol and it is defined by indentation. We need write a rule per line and we can write if filter conditions and verification failure information for each rule. Different conditions are connected with logic `and` (similar to the way of writing in check block). - -```python -rule SomeRule: - age > 0, "rule check failure message" -``` - -We can call a KCL rule like instantiating a schema: - -```python -age = 1 -name = "Alice" - -rule SomeRule: - age > 0, "rule check failure message" - name == "Alice" - -rule1 = SomeRule() # Rule call -rule2 = SomeRule {} # Rule call -``` - -We can use protocol to implement type checking of rule structure: - -```python -# Schema definition -protocol Service: - clusterIp: str - $type: str - -# Schema definition -protocol Volume: - mountPath: [str] - -# Protocol -protocol SomeProtocol: - id: int - env: {str: any} - services: [Service] - volumes: [Volume] - -rule SomeChecker for SomeProtocol: - id > 0, "id must >0" - - all service in services { - service.clusterIP == "NONE" if service.type == "ClusterIP" - } - - any volume in volumes { - volume.mountPath in ["/home/admin", "/home/myapp"] - } - -# Call rule to check with config parameter -SomeChecker { - id = 1 - env = { - MY_ENV = "MY_ENV_VALUE" - } - services = [ - { - type = "ClusterIP" - clusterIP = "NONE" - } - ] - volumes = [ - { - mountPath = "/home/admin" - } - { - mountPath = "/home/myapp" - } - ] -} -``` - -Please note that the combination of `protocol` and `rule` can separate attributes from their constraint definitions. We can define different rules and protocols in different packages and combine them as needed. This is different from check expressions in schema, which can only be combined with schema attributes. - -Besides, the following two ways can be used to achieve the multiplexing of different Rules: - -- Inline Call - -```python -weather = "sunny" -day = "wednesday" - -rule IsSunny: - weather == "sunny" - -rule IsWednesday: - day == "wednesday" - -rule Main: - IsSunny() # Rule inline call - IsWednesday() # Rule inline call - -Main() # Rule call -``` - -- Inherit - -```python -weather = "sunny" -day = "wednesday" - -rule IsSunny: - weather == "sunny" - -rule IsWednesday: - day == "wednesday" - -rule Main(IsSunny, IsWednesday): - id == 1 - -Main() -``` - -We can obtain external data or input from the `option` function and the CLI parameter `-D` for verification: - -- A simple example - -```python -schema Day: - day: str - homework: str - -days: [Day] = option("days") - -rule Main: - filter d in days { - d.day not in ["saturday", "sunday"] and d.homework - } - -Main() -``` - -- A complex example - -```python -data = option("data") -input = option("input") - -rule Allow: - UserIsAdmin() - any grant in UserIsGranted() { - input.action == grant.action and input.type == grant.type - } - -rule UserIsAdmin: - any user in data.user_roles[input.user] { - user == "admin" - } - -rule UserIsGranted: - [ - grant - for role in data.user_roles[input.user] - for grant in data.role_grants[role] - ] - -allow = Allow() or False -``` - -Further, the above KCL rule code can be compiled into a target such as WASM and used at runtime. - -### Module - -KCL config files are organized as **modules**. A single KCL file is considered as a module, and a directory is considered as a package, which is a special module. - -The modules in the same package are visible and cross-package references need to be visible through import. - -Code structure: - -``` -. -└── root - ├── model - │ ├── model1.k - | ├── model2.k - │ └── main.k - ├── service - │ └── service1.k - └── mixin - └── mixin1.k -``` - -model1.k: - -```python -# schema CatalogItem in model1.k - -schema CatalogItem: - id: int - image: CatalogItemImage # CatalogItemImage is defined in the module of the same package e.g., model2.k in package model - title: str -``` - -service1.k: - -```python -import ..model as model # cross-package references - -schema ImageService: - image: model.CatalogItemImage # CatalogItemImage is imported from another package e.g., model2.k in package model - name: str -``` - -#### Relative Path Import - -We can use the operator `.` to realize the relative path import of KCL entry files. - -main.k: - -```python -import .model1 # Current directory module -import ..service # Parent directory -import ...root # Parent of parent directory - -s = service.ImageService {} -m = root.Schema {} -``` - -#### Absolute Path Import - -The semantics of `import a.b.c.d` is - -1. Search the path `./a/b/c/d` from the current directory. -2. If the current directory search fails, search from the root path `ROOT_PATH/a/b/c/d` - -The definition of the root path `ROOT_PATH` is - -1. Look up the directory corresponding to the `kcl.mod` file from the current directory. -2. If `kcl.mod` is not found, read from the environment variable `KCL_MODULE_ROOT` e.g., `kclvm/lib/*`. - -Code structure: - -``` -. -└── root - ├── kcl.mod - ├── model - │ ├── model1.k - | ├── model2.k - │ └── main.k - ├── service - │ └── service1.k - └── mixin - └── mixin1.k -``` - -main.k: - -```python -import service # `root package` and `kcl.mod` are in the same directory -import mixin # `root package` and `kcl.mod` are in the same directory - -myModel = model.CatalogItem {} -``` - -Note that for the KCL entry file `main.k`, it cannot be imported into the folder where it is located, otherwise a recursive import error will occur: - -```python -import model # Error: recursively loading -``` - -### Top-Level Argument - -Assume some field need to be passed in dynamically like user input, we can define a top-level argument in a module: - -```python -bankCard = option("bankCard") # Get bankCard through the option function. -``` - -Then we can use the module as below: - -``` -kcl -DbankCard=123 employee.k -``` - -Currently, supported types of top-level argument are number, string, bool, list and dict. - -``` -kcl main.k -D list_key='[1,2,3]' -D dict_key='{"key":"value"}' -``` - -We need to pay attention to the escape of quotation marks `"` and other symbols in the command line - -#### Arguments with Setting Files - -In addition, it also supports inputting a YAML file as top-level arguments. - -```yaml -kcl_options: - - key: key_number - value: 1 - - key: key_dict - value: - innerDictKey: innerDictValue - - key: key_list - value: - - 1 - - 2 - - 3 - - key: bankCard - value: 123 -``` - -``` -kcl -Y setting.yaml employee.k -``` - -In addition, the setting file also supports configuring command-line compilation parameters as follows: - -```yaml -kcl_cli_configs: - files: - - file1.k - - file2.k - disable_none: true - strict_range_check: true - debug: 1 - verbose: 1 - output: ./stdout.golden -kcl_options: - - key: image - value: docker.io/kusion:latest -``` - -KCL CLI -Y parameters also support multi-file configuration, and support separate writing and merging of compilation parameters and option top level arguments parameter configuration. - -``` -kcl -Y compile_setting.yaml option_setting.yaml -``` - -- `compile_setting.yaml` - -```yaml -kcl_cli_configs: - files: - - file1.k - - file2.k - disable_none: true - strict_range_check: true - debug: 1 - verbose: 1 - output: ./stdout.golden -``` - -- `option_setting.yaml` - -```yaml -kcl_options: - - key: image - value: docker.io/kusion:latest -``` - -We can use the following command line to get the meaning of each configuration parameter or see KCL Quick Start - -``` -kcl --help -``` - -#### Option Functions - -We can use the `option` function in the KCL code to get the top-level arguments. - -```python -value = option(key="key", type='str', default="default_value", required=True, help="Set key value") -``` - -Parameters - -- **key**: The argument key. -- **type**: The argument type to be converted. -- **default**: The argument default value when the key-value argument is not provided -- **required**: Report an error when the key-value argument is not provided and required is True. -- **help**: The help message. - -### Multi-file Compilation - -In addition to the above KCL single file execution, we can compile multiple KCL entry files at the same time using the following command: - -``` -kcl main_1.k main_2.k ... main_n.k -``` - -main_1.k - -```python -a = 1 -b = 2 -``` - -main_2.k - -```python -c = 3 -d = 4 -``` - -The output is: - -```yaml -a: 1 -b: 2 -c: 3 -d: 4 -``` - -Taking advantage of the **multi-file combination**, we can assemble multiple KCL files without the need to use import management files. Let us see an example of combining **multi-file compilation** and **schema instance**. - -model.k - -```python -schema Model: - name: str - labels?: {str:} - annotations?: {str:} - replicas: int - -_model1 = Model { - name = "model1" - labels.key1 = "value1" - labels.key2 = "value2" - annotations.key = "value" - replicas = 2 -} - -_model2 = Model { - name = "model2" - replicas = 3 -} -``` - -backend.k - -```python -import yaml - -schema Backend: - apiVersion: str = "v1" - kind: str = "Deployment" - metadata: {str:} - spec: {str:} = { - minReadySeconds = 0 - paused = False - progressDeadlineSeconds = 600 - replicas = 1 - revisionHistoryLimit = 10 - selector = {} - } - -_backends = [Backend { - metadata.name = model.name - metadata.labels = model.labels - metadata.annotations = model.annotations - spec.selector.matchLabels: model.labels - spec.replicas = model.replicas -} for model in Model.instances()] # Schema Model is defined in model.k -print("---\n".join([yaml.encode(_b, ignore_private=True) for _b in _backends])) -``` - -The command is - -``` -kcl model.k backend.k -``` - -The output is - -```yaml -apiVersion: v1 -kind: Deployment -metadata: - name: model1 - labels: - key1: value1 - key2: value2 - annotations: - key: value -spec: - minReadySeconds: 0 - paused: false - progressDeadlineSeconds: 600 - replicas: 2 - revisionHistoryLimit: 10 - selector: - matchLabels: - key1: value1 - key2: value2 ---- -apiVersion: v1 -kind: Deployment -metadata: - name: model2 -spec: - minReadySeconds: 0 - paused: false - progressDeadlineSeconds: 600 - replicas: 3 - revisionHistoryLimit: 10 - selector: {} -``` - -### KCL CLI Path Selector - -We can use KCL CLI `-S|--path-selector` parameter to select one or more values out of a KCL model. - -The path selector looks like this: - -`pkg:var.name` - -- Select node by name in the package `pkg` - -`pkg:var.{name1,name2}` - -- Select multiple nodes in the package `pkg` - -`pkg:var.*` - -- Select all nodes at a given level in the package `pkg` - -`pkg:var.[index]` - -- Select the element of the list `var` indexed by `index` in the package `pkg` - -It should be noted that KCL variables ensure global uniqueness through the combination of the package name and variable identifier `pkg:identifier`. Therefore, we need to specify both `pkg` and `identifier`. When the parameter `pkg` is omitted, it means to find the variable from the entry file in the current path. - -#### Examples - -Code structure: - -``` -. -├── kcl.mod -└── main.k - └── pkg - └── model.k -``` - -pkg/model.k: - -```python -schema Person: - name: str - age: int - -var = Person { - name = "Alice" - age = 18 -} -``` - -main.k - -```python -import pkg - -var = pkg.Person { - name = "Bob" - age = 10 -} -``` - -The command is - -``` -kcl main.k -S pkg:var -S :var.name -``` - -The output is - -```yaml -var: - name: Bob ---- -var: - name: Alice - age: 18 -``` - -### KCL CLI Variable Override - -In addition to **Variable Selector**, KCL also allows us to directly modify the values in the configuration model through the KCL CLI `-O|--overrides` parameter. - -The use of **Variable Override** is similar to [**Variable Selector**](#variable-selector), and the parameter contains three parts e.g., `pkg`, `identifier`, `attribute` and `override_value`. - -``` -kcl main.k -O override_spec -``` - -- `override_spec`: 表示需要修改的配置模型字段和值的统一表示 - -``` -override_spec: [[pkgpath] ":"] identifier ("=" value | "-") -``` - -- `pkgpath`: Indicates the path of the package whose identifier needs to be modified, usually in the form of `a.b.c`. For the main package, `pkgpath` is expressed as `__main__`, which can be omitted. If omitted, it means the main package. -- `identifier`: Indicates the identifier that needs to modify the configuration, usually in the form of `a.b.c`. -- `value`: Indicates the value of the configuration that needs to be modified, which can be any legal KCL expression, such as number/string literal value, list/dict/schema expression, etc. -- `=`: means to modify the value of identifier. - - When the identifier exists, modify the value of the existing identifier to value. - - When identifier does not exist, add the identifier attribute and set its value to value. -- `-`: means to delete the identifier attribute. - - When the identifier exists, delete it directly. - - When the identifier does not exist, no modification is made to the configuration. - -Note: When `identifier` appears multiple times, modify/delete all `identifier` values - -#### Examples - -##### Override Update Sample - -KCL code: - -```python -schema Person: - name: str - age: int - -person = Person { - name = "Alice" - age = 18 -} -``` - -The command is - -``` -kcl main.k -O :person.name=\"Bob\" -O :person.age=10 -``` - -The output is - -```yaml -person: - name: Bob - age: 10 -``` - -Besides, when we use KCL CLI `-d` argument, the KCL file will be modified to the following content at the same time - -``` -kcl main.k -O :person.name=\"Bob\" -O :person.age=10 -d -``` - -```python -schema Person: - name: str - age: int - -person = Person { - name = "Bob" - age = 10 -} -``` - -Another more complicated example: - -```python -schema Person: - name: str - age: int - ids?: [int] - -person = Person { - name = "Alice" - age = 10 -} -``` - -The command is - -``` -kcl main.k -O :person.ids=\[1,2\] -``` - -The output is - -```yaml -person: - name: Alice - age: 10 - ids: - - 1 - - 2 -``` - -##### Override Delete Sample - -KCL code: - -```python -schema Config: - x?: int = 1 - y?: str = "s" - -config = Config { - x = 2 -} -``` - -The command is - -``` -kcl main.k -O config.x- -``` - -The output is - -```yaml -config: - x: 1 - y: s -``` - -### Summary - -This page summarized the commonly used features in the KCL language. As a new language, KCL will gradually increase the functional features according to the requirements of the configuration scenario. - -For more information, please try further resources: - -- KCL codelabs -- KCL language specification -- KCL OpenAPI specification diff --git a/docs/reference/lang/model/_category_.json b/docs/reference/lang/model/_category_.json deleted file mode 100644 index 445ad048..00000000 --- a/docs/reference/lang/model/_category_.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "label": " System Module", - "position": 2 -} diff --git a/docs/reference/lang/model/base64.md b/docs/reference/lang/model/base64.md deleted file mode 100644 index 40773485..00000000 --- a/docs/reference/lang/model/base64.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -title: "base64" -linkTitle: "base64" -type: "docs" -description: base64 system module - base64 encode and decode function -weight: 100 ---- -## encode - -`encode(value: str, encoding: str = "utf-8") -> str` - -Encode the string `value` using the codec registered for encoding. - -## decode - -`decode(value: str, encoding: str = "utf-8") -> str` - -Decode the string `value` using the codec registered for encoding. diff --git a/docs/reference/lang/model/builtin.md b/docs/reference/lang/model/builtin.md deleted file mode 100644 index 19bf3bed..00000000 --- a/docs/reference/lang/model/builtin.md +++ /dev/null @@ -1,392 +0,0 @@ ---- -title: "builtin" -sidebar_position: 1 ---- -KCL provides a list of built-in functions that are automatically loaded and can be used directly without providing any module name. For example, `print` is a function provided by a widely used built-in module. - -## Type Conversion Functions - -KCL's `bool`, `int`, `float`, `str`, `list`, `dict` and other types have built-in conversion functions of the same name. Among them, `int` can not only be used to truncate floating-point numbers, but also can be used to convert strings to integers (decimal when parsing, other values can also be specified). - -The following are common uses of type-related functions: - -```py -b1 = bool(1) # true -b2 = bool(1.5) # true -b3 = bool("true") # true -b4 = bool("") # false -b5 = bool([]) # false -b6 = bool({}) # false - -i1 = int("11") # 11 -i2 = int("11", base=8) # 9 -i3 = int("11", base=2) # 3 - -f1 = float(1) # 1.0 -f2 = float("1.5") # 1.5 - -s1 = str(1) # 1 - -l1 = list([1, 2, 3]) -``` - -## print - -`print(*args:any, end:str='\n')` - -The built-in print function, which provides different types of variable parameter printing, adds a newline at the end by default. The following are common usages: - -```python -print("hello KCL") -print() -print(None, end=':') -print(None) -print(True) -print(False) -print(123) -print(123.0) -print('abc ${123}') -print("abc ${456}") -print([1,'a', True]) -print(1,'a', True) -print({}) -print({a: 123}) -``` - -The output is: - -```shell -hello KCL - -None:None -True -False -123 -123.0 -abc 123 -abc 456 -[1, 'a', True] -1 a True -{} -{'a': 123} -``` - -If you do not want the default newline, you can re-specify the ending string with the `end=''` named parameter. - -```python -print("Hello KCL", end='') -``` - -## multiplyof - -`multiplyof(a:int, b:int) -> bool` - -Check whether the integer `a` is an integer multiple of `b`, and return a boolean value: - -```python -print(multiplyof(2, 1)) # True -print(multiplyof(1, 2)) # False -print(multiplyof(0, 1)) # True -print(multiplyof(0, 2)) # True -print(multiplyof(1, 0)) # Error -``` - -`0` is a multiple of any number. But `b` cannot be `0`, otherwise an exception will be thrown. - -## isunique - -`isunique(list: [any]) -> bool` - -Check if there are duplicate elements in an array, and return a boolean value: - -```python -print(isunique([])) # True -print(isunique([1])) # True -print(isunique([1, 2])) # True - -print(isunique([1, 1])) # False -print(isunique([1, 1.0])) # False -print(isunique([1.1, 1.1])) # False - -print(isunique(['abc', "abc"])) # False -print(isunique(['abc', "a${'bc'}"])) # False -``` - -It should be noted that integers and floating-point numbers ignore the type difference and judge whether the values are equal. - -## len - -`len(x: str | [any] | {:}) -> int` - -Return the length of strings, lists, and arrays: - -```python -print(len([])) # 0 -print(len({})) # 0 - -print(len([1])) # 1 -print(len({abc:123})) # 1 - -print("abc") # 3 -``` - -Note: Calculating lengths on `schema` objects is not supported. - -## abs - -`abs(x: number) -> number` - -Calculate the absolute value of `x`. - -## all_true - -`all_true(x:str|[]|{:}) -> bool` - -Judging that all elements of a list or dictionary class are true, the usage is as follows: - -```python -print(all_true([])) # True -print(all_true({})) # True - -print(all_true([True])) # True -print(all_true([1])) # True - -print(all_true([True, False])) # False -print(all_true([True, None])) # False -``` - -Returns true when the list is empty. - - - -## any_true - -`any_true(x:str|[]|{:}) -> bool` - -Judging that at least one element in the iterable object is true, the usage is as follows: - -```python -print(any_true([])) # False -print(any_true([1])) # True -``` - -## bin - -`bin(x:number) -> str` - -A string that returns the binary representation of an integer, used as follows: - -```python -print(bin(8)) # 0b1000 -``` - -## hex - -`hex(number)` - -A string that returns the hexadecimal representation of an integer, used as follows: - -```python -print(hex(18)) # 0x12 -``` - -## oct - -`oct(number)` - -A string that returns the octal representation of an integer, used as follows: - -```python -print(oct(10)) # 0o12 -``` - -## option - -`option(key:str, type:str='', required=False, default=None, help="") -> any` - -Gets the value of the command line top level argument input. - -## ord - -`ord(c) -> int` - -Get the Unicode code point value of the character, the usage is as follows: - -```python -print(ord('A')) # 65 -print(ord('B')) # 66 -print(ord('C')) # 67 -``` - -## sorted - -`sorted(x: []) -> []` - -Returns the sorted list, used as follows: - -```python -_a = [] -_b = [2, 1] - -_c = sorted(_a) -_d = sorted(_b) - -print(_a) # [] -print(_b) # [2, 1] -print(_c) # [] -print(_d) # [1, 2] -``` - -## range - -`range(start:int, end:int, step=1) -> [int]` - -Generates an iterable list, used as follows: - -```python -print(range(1,5)) # [1, 2, 3, 4] -print(range(1,5, 2)) # [1, 3] -print(range(5, 1, -1)) # [5, 4, 3, 2] -``` - -## min - -`min(x:[number]) -> number` - -Returns the smallest element in the list, used as follows: - -```python -print(min([1,2])) # 1 -print(min([2,1])) # 1 -``` - - - -## max - -`max(x:[number]) -> number` - -Returns the largest element in the list, used as follows: - -```python -print(max([1,2])) # 2 -print(max([2,1])) # 2 -``` - -## sum - -`sum(x:[number], init_value=0) -> number` - -Returns the sum of all elements in the list, used as follows: - -``` -print(sum([1,2])) # 3 -print(sum([2,1], 1000)) # 1003 -``` - -## pow - -`pow(x: number, y: number, z: number = None) -> number` - -Computes `x**y`, or `(x**y)%z` if `z` is not empty, supports integer and floating point numbers, used as follows: - -```python -print(pow(2,3)) # 8 -print(pow(2, 3, 5)) # 8%5 == 3 - -print(pow(2, 0.5)) # 1.414 -``` - -## round - -`round(number: int|float, ndigits:int|None) -> float | int` - -Returns the rounded approximation of `number`. If `ndigits` is not `None` returns a float with the specified number of decimal places (cannot be negative), otherwise returns an integer structure, used as follows: - -```python -print(round(1)) # 1 -print(round(1.4)) # 1 -print(round(1.5)) # 2 - -print(round(1.5555, 1)) # 1.6 -print(round(1.5555, 2)) # 1.56 - -print(round(1.5555)) # 2 -print(round(1.5555, 0)) # 2.0 -``` - -It should be noted that the difference between `ndigits` being `None` and `0` is that the prefix returns `int` type, the latter returns `float` type. - -## typeof - -`typeof(x: any, full_name: bool = False) -> str` - -Output the type of `x` at runtime. When the `full_name` parameter is set to `True`, the package prefix of the form `pkg.schema` will be returned, used as follows: - -```python -import sub as pkg - -_a = 1 - -t1 = typeof(_a) -t2 = typeof("abc") - -schema Person: - name?: any - -_x1 = Person{} -t3 = typeof(_x1) - -_x2 = pkg.Person{} -t4 = typeof(_x2) -t5 = typeof(_x2, full_name=True) - -t6 = typeof(_x1, full_name=True) - -# 输出 -# t1: int -# t2: str -# t3: Person -# t4: Person -# t5: sub.Person -# t6: __main__.Person -``` - -## zip - -`zip(*args: str|list|dict)` - -It is used to take an iterable object as a parameter, pack the corresponding elements in the object into tuples, and then return a list composed of these tuples, used as follows: - -```py -a = zip([0, 1, 2], [3, 4, 5]) -b = zip([0, 1], [3, 4, 5]) -c = zip([0, 1, 2], [3, 4, 5, 6]) - -# 输出 -# a: -# - - 0 -# - 3 -# - - 1 -# - 4 -# - - 2 -# - 5 -# b: -# - - 0 -# - 3 -# - - 1 -# - 4 -# c: -# - - 0 -# - 3 -# - - 1 -# - 4 -# - - 2 -``` diff --git a/docs/reference/lang/model/crypto.md b/docs/reference/lang/model/crypto.md deleted file mode 100644 index 557135f9..00000000 --- a/docs/reference/lang/model/crypto.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -title: "crypto" -linkTitle: "crypto" -type: "docs" -description: crypto system module -weight: 100 ---- -## md5 - -`md5(value: str, encoding: str = "utf-8") -> str` - -Encrypt the string `value` using `MD5` and the codec registered for encoding. - -## sha1 - -`sha1(value: str, encoding: str = "utf-8") -> str` - -Encrypt the string `value` using `SHA1` and the codec registered for encoding. - -## sha224 - -`sha224(value: str, encoding: str = "utf-8") -> str` - -Encrypt the string `value` using `SHA224` and the codec registered for encoding. - -## sha256 - -`sha256(value: str, encoding: str = "utf-8") -> str` - -Encrypt the string `value` using `SHA256` and the codec registered for encoding. - -## sha384 - -`sha384(value: str, encoding: str = "utf-8") -> str` - -Encrypt the string `value` using `SHA384` and the codec registered for encoding. - -## sha512 - -`sha512(value: str, encoding: str = "utf-8") -> str` - -Encrypt the string `value` using `SHA512` and the codec registered for encoding. diff --git a/docs/reference/lang/model/datetime.md b/docs/reference/lang/model/datetime.md deleted file mode 100644 index a1757a28..00000000 --- a/docs/reference/lang/model/datetime.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -title: "datetime" -linkTitle: "datetime" -type: "docs" -description: datetime system module -weight: 100 ---- -- datetime - - ticks() -> float - Return the current time in seconds since the Epoch. Fractions of a second may be present if the system clock provides them. - - date() -> str - Return the `%Y-%m-%d %H:%M:%S` format date. - - now() -> str - Return the local time. e.g. `'Sat Jun 06 16:26:11 1998'` - - today() -> str - Return the `%Y-%m-%d %H:%M:%S.%{ticks}` format date. - -## time - -`ticks() -> float` - -Return the current time in seconds since the Epoch. Fractions of a second may be present if the system clock provides them. - -## date - -`date() -> str` - -Return the `%Y-%m-%d %H:%M:%S` format date. - -## now - -`now() -> str` - -Return the local time. e.g. `'Sat Jun 06 16:26:11 1998'` - -## today - -`today() -> str` - -Return the `%Y-%m-%d %H:%M:%S.%{ticks}` format date. diff --git a/docs/reference/lang/model/index.md b/docs/reference/lang/model/index.md deleted file mode 100644 index 5e509fdd..00000000 --- a/docs/reference/lang/model/index.md +++ /dev/null @@ -1,3 +0,0 @@ -# System Module - -KCL provides engineering extensibility through system modules, user modules and plug-in modules. This section describes the basic concepts of system modules. Plugin modules refer to [plugin system](/docs/reference/lang/plugin). diff --git a/docs/reference/lang/model/json.md b/docs/reference/lang/model/json.md deleted file mode 100644 index 5edc603e..00000000 --- a/docs/reference/lang/model/json.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -title: "json" -linkTitle: "json" -type: "docs" -description: json system module -weight: 100 ---- -## encode - -``` -encode( - data: any, - sort_keys: bool = False, - indent: int = None, - ignore_private: bool = False, - ignore_none: bool = False -) -> str -``` - -Serialize a KCL object `data` to a JSON formatted str. - -## decode - -`decode(value: str) -> any` - -Deserialize `value` (a string instance containing a JSON document) to a KCL object. - -## dump_to_file - -``` -dump_to_file( - data: any, - filename: str, - ignore_private: bool = False, - ignore_none: bool = False -) -> None -``` - -Serialize a KCL object `data` to a JSON formatted str and write it into the file `filename`. diff --git a/docs/reference/lang/model/math.md b/docs/reference/lang/model/math.md deleted file mode 100644 index e8f850bd..00000000 --- a/docs/reference/lang/model/math.md +++ /dev/null @@ -1,101 +0,0 @@ ---- -title: "math" -linkTitle: "math" -type: "docs" -description: math system module -weight: 100 ---- -## ceil - -`ceil(x) -> int` - -Return the ceiling of x as an Integral. This is the smallest integer >= x. - -## factorial - -`factorial(x) -> int` - -Return x!. Raise a error if x is negative or non-integral. - -## floor - -`floor(x) -> int` - -Return the floor of x as an Integral. This is the largest integer <= x. - -## gcd - -`gcd(a: int, b: int) -> int` - -Return the greatest common divisor of x and y - -## isfinite - -`isfinite(x) -> bool` - -Return True if x is neither an infinity nor a NaN, and False otherwise. - -## isinf - -`isinf(x) -> bool` - -Return True if x is a positive or negative infinity, and False otherwise. - -## isnan - -`isnan(x) -> bool` - -Return True if x is a NaN (not a number), and False otherwise. - -## modf - -`modf(x) -> Listfloat, float]` - -Return the fractional and integer parts of x. Both results carry the sign of x and are floats. - -## exp - -`exp(x) -> float` - -Return e raised to the power of x. - -## expm1 - -`expm1(x) -> float` - -Return exp(x)-1. This function avoids the loss of precision involved in the direct evaluation of exp(x)-1 for small x. - -## log - -`log(x) -> float` - -Return the logarithm of x to the base e. - -## log1p - -`log1p(x) -> float` - -Return the natural logarithm of 1+x (base e). The result is computed in a way which is accurate for x near zero. - -## log2 - -`log2(x) -> float` -Return the base 2 logarithm of x. - -## log10 - -`log10(x) -> float` - -Return the base 10 logarithm of x. - -## pow - -`pow(x, y) -> float` - -Return x**y (x to the power of y). - -## sqrt - -`sqrt(x) -> float` - -Return the square root of x. diff --git a/docs/reference/lang/model/net.md b/docs/reference/lang/model/net.md deleted file mode 100644 index 877e2134..00000000 --- a/docs/reference/lang/model/net.md +++ /dev/null @@ -1,102 +0,0 @@ ---- -title: "net" -linkTitle: "net" -type: "docs" -description: net system module -weight: 100 ---- -## split_host_port - -`split_host_port(ip_end_point: str) -> List[str]` - -Split the 'host' and 'port' from the ip end point. - -## join_host_port - -`join_host_port(host, port) -> str` - -Merge the 'host' and 'port'. - -## fqdn - -`fqdn(name: str = '') -> str` - -Return Fully Qualified Domain Name (FQDN). - -## parse_IP - -`parse_IP(ip) -> str` - -Parse 'ip' to a real IP address - -## to_IP4 - -`to_IP4(ip) -> str` - -Get the IP4 form of 'ip'. - -## to_IP16 - -`to_IP16(ip) -> int` - -Get the IP16 form of 'ip'. - -## IP_string - -`IP_string(ip: str | int) -> str` - -Get the IP string. - -## is_IPv4 - -`is_IPv4(ip: str) -> bool` - -Whether 'ip' is a IPv4 one. - -## is_IP - -`is_IP(ip: str) -> bool` - -Whether ip is a valid ip address. - -## is_loopback_IP - -`is_loopback_IP(ip: str) -> bool` - -Whether 'ip' is a loopback one. - -## is_multicast_IP - -`is_multicast_IP(ip: str) -> bool` - -Whether 'ip' is a multicast one. - -## is_interface_local_multicast_IP - -`is_interface_local_multicast_IP(ip: str) -> bool` - -Whether 'ip' is a interface, local and multicast one. - -## is_link_local_multicast_IP - -`is_link_local_multicast_IP(ip: str) -> bool` - -Whether 'ip' is a link local and multicast one. - -## is_link_local_unicast_IP - -`is_link_local_unicast_IP(ip: str) -> bool` - -Whether 'ip' is a link local and unicast one. - -## is_global_unicast_IP - -`is_global_unicast_IP(ip: str) -> bool` - -Whether 'ip' is a global and unicast one. - -## is_unspecified_IP - -`is_unspecified_IP(ip: str) -> bool` - -Whether 'ip' is a unspecified one. diff --git a/docs/reference/lang/model/overview.md b/docs/reference/lang/model/overview.md deleted file mode 100644 index 36365ac1..00000000 --- a/docs/reference/lang/model/overview.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -sidebar_position: 0 ---- - -# Overview - -KCL provides engineering extensibility through built-in modules, system modules and plug-in modules. - -![](/img/docs/reference/lang/model/kcl-module.png) - -The user code does not need to import functions that directly use builtin functions (such as calculating the length of a list with `len`, obtaining the type of value through `typeof`, etc.), and for basic types such as strings, it also provides some built-in methods (such as converting the case of strings, etc.). - -For relatively complex general logic, it is provided through the system modules. For example, by importing the `math` module, we can use related mathematical functions, and we can use the regular expression by importing the `regex` module. For KCL code, it can also be organized into different user modules. - -In addition, Python and Go can be used to develop plug-ins for KCL through the plugin mechanism. For example, there are the app-context plug-in can be used to obtain the context information of the current application to simplify code writing. diff --git a/docs/reference/lang/model/regex.md b/docs/reference/lang/model/regex.md deleted file mode 100644 index 57ab72b0..00000000 --- a/docs/reference/lang/model/regex.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -title: "regex" -linkTitle: "regex" -type: "docs" -description: regex system module -weight: 100 ---- -## replace - -`replace(string: str, pattern: str, replace: str, count=0) -> str` - -Return the string obtained by replacing the leftmost non-overlapping occurrences of the pattern in string by the replacement. - -## match - -`match(string: str, pattern: str) -> bool` - -Try to apply the pattern at the start of the string, returning a bool value True if any match was found, or False if no match was found. - -## compile - -`compile(pattern: str) -> bool` - -Compile a regular expression pattern, returning a bool value denoting whether the pattern is valid. - -## findall - -`findall(string: str, pattern: str) -> List[str]` - -Return a list of all non-overlapping matches in the string. - -## search - -`search(string: str, pattern: str) -> bool` - -Scan through string looking for a match to the pattern, returning a bool value True if any match was found, or False if no match was found. - -## split - -`split(string: str, pattern: str, maxsplit=0) -> List[str]` - -Scan through string looking for a match to the pattern, returning a Match object, or None if no match was found. diff --git a/docs/reference/lang/model/units.md b/docs/reference/lang/model/units.md deleted file mode 100644 index 416d5892..00000000 --- a/docs/reference/lang/model/units.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -title: "units" -linkTitle: "units" -type: "docs" -description: units system module - Unit handlers -weight: 100 ---- -## Constants - -- Fixed point unit constants: `n`, `u`, `m`, `k`, `K`, `G`, `T` and `P`. -- Power of 2 unit constants: `Ki`, `Mi`, `Gi`, `Ti` and `Pi`. - -## Functions - -- `to_n(num: int) -> str` - Int literal to string with `n` suffix -- `to_u(num: int) -> str` - Int literal to string with `u` suffix -- `to_m(num: int) -> str` - Int literal to string with `m` suffix -- `to_K(num: int) -> str` - Int literal to string with `K` suffix -- `to_M(num: int) -> str` - Int literal to string with `M` suffix -- `to_G(num: int) -> str` - Int literal to string with `G` suffix -- `to_T(num: int) -> str` - Int literal to string with `T` suffix -- `to_P(num: int) -> str` - Int literal to string with `P` suffix -- `to_Ki(num: int) -> str` - Int literal to string with `Ki` suffix -- `to_Mi(num: int) -> str` - Int literal to string with `Mi` suffix -- `to_Gi(num: int) -> str` - Int literal to string with `Gi` suffix -- `to_Ti(num: int) -> str` - Int literal to string with `Ti` suffix -- `to_Pi(num: int) -> str` - Int literal to string with `Pi` suffix diff --git a/docs/reference/lang/model/yaml.md b/docs/reference/lang/model/yaml.md deleted file mode 100644 index 4b2c43a4..00000000 --- a/docs/reference/lang/model/yaml.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -title: "yaml" -linkTitle: "yaml" -type: "docs" -description: yaml encode and decode function -weight: 300 ---- -## encode - -``` -encode( - data: any, - sort_keys: bool = False, - ignore_private: bool = False, - ignore_none: bool = False -) -> str -``` - -Serialize a KCL object `data` to a YAML formatted str. - -## decode - -`decode(value: str) -> any` - -Deserialize `value` (a string instance containing a YAML document) to a KCL object. - -## dump_to_file - -``` -dump_to_file( - data: any, - filename: str, - ignore_private: bool = False, - ignore_none: bool = False -) -> None -``` - -Serialize a KCL object `data` to a YAML formatted str and write it into the file `filename`. diff --git a/docs/reference/lang/plugin/_category_.json b/docs/reference/lang/plugin/_category_.json deleted file mode 100644 index e77dfb1c..00000000 --- a/docs/reference/lang/plugin/_category_.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "label": "Plugin System", - "position": 4 -} diff --git a/docs/reference/lang/plugin/index.md b/docs/reference/lang/plugin/index.md deleted file mode 100644 index df299e93..00000000 --- a/docs/reference/lang/plugin/index.md +++ /dev/null @@ -1 +0,0 @@ -# Plugin System diff --git a/docs/reference/lang/plugin/overview.md b/docs/reference/lang/plugin/overview.md deleted file mode 100644 index 7af66005..00000000 --- a/docs/reference/lang/plugin/overview.md +++ /dev/null @@ -1,183 +0,0 @@ ---- -sidebar_position: 1 ---- - -# Introduction - -KCL provides plugin support through a plugin agent and auxiliary command line tools, and the KCL plugin framework supports different general-purpose languages to develop plugins. Here we take Python as an example to briefly explain the use of plugins. - -KCL plugin Git repository: [https://github.com/KusionStack/kcl-plugin](https://github.com/KusionStack/kcl-plugin) - -## 1. Hello Plugin - -KCL plugins are installed in the `plugins` subdirectory of KCLVM (usually installed in the `$HOME/.kusion/kclvm/plugins` directory), or set through the `$KCL_PLUGINS_ROOT` environment variable. KCL plugins are managed in the Git repository: [https://github.com/KusionStack/kcl-plugin](https://github.com/KusionStack/kcl-plugin), we can clone the repository for development. - -Enter the `kcl-plugin info` command to view the plugin directory (replace `/Users/kcl_user` with the local `$HOME` path): - -```shell -$ kcl-plugin info -# plugin_root: /Users/kcl_user/.kusion/kclvm/plugins -``` - -View the list of plugins with the `kcl-plugin list` subcommand: - -```shell -$ kcl-plugin list -hello: hello doc - 0.0.1 -``` - -Where `hello` is an example builtin plugin (do not modify the plugin). - -In KCL code, the `hello` plugin can be imported via `import kcl_plugin.hello`. `main.k` code is as follows: - -```python -import kcl_plugin.hello - -name = "kcl" -three = hello.add(1,2) -``` - -The output result is - -```shell -$ kcl main.k -name: kcl -three: 3 -``` - -## 2. `kcl-plugin` Command - -`kcl-plugin` is a plugin helper command line tool, the command line help is as follows: - -```shell -$ kcl-plugin -usage: kcl-plugin [-h] {list,info,init,gendoc,test} ... -positional arguments: - {list,info,init,gendoc,test} - kcl plugin sub commands - list list all plugins - info show plugin document - init init a new plugin - gendoc gen all plugins document - test test plugin -optional arguments: - -h, --help show this help message and exit -``` - -- The `list` subcommand is used to view the list of plugins. -- The `info` subcommand is used to view the plugin directory and information about each plugin. -- The `init` subcommand is used to initialize new plugins. -- The `gendoc` subcommand is used to update the API documentation of all plugins. -- The `test` subcommand is used to test specified plugins. - -## 3. Plugin Information and Documentation - -Enter `kcl-plugin info hello` to view the `hello` plugin information: - -```shell -$ kcl-plugin info hello -{ - "name": "hello", - "describe": "hello doc", - "long_describe": "long describe", - "version": "0.0.1", - "method": { - "add": "add two numbers, and return result", - "foo": "no doc", - "list_append": "no doc", - "say_hello": "no doc", - "tolower": "no doc", - "update_dict": "no doc" - } -} -``` - -The information of the plugin mainly includes the name and version information of the plugin, and the function information provided by the plugin. This information is consistent with the automatically generated `api.md` file in the plugin directory (regenerate the `api.md` file for all plugins via `kcl-plugin gendoc` when the plugin API document changes). - -## 4. Plugin Directory Structure - -The directory structure of the plugin is as follows (replace `/Users/kcl_user` with the local `$HOME` path): - -```shell -$ tree /Users/kcl_user/.kusion/kclvm/plugins/ -/Users/kcl_user/.kusion/kclvm/plugins/ -├── _examples -├── _test -└── hello - ├── api.md - ├── plugin.py - └── plugin_test.py -$ -``` - -The `_examples` directory is the sample code of the plugin, the `_test` directory is the KCL test code of the plugin, and the other directories starting with letters are ordinary plugins. The content of the plugin is as follows: - -```shell -$ cat ./hello/plugin.py -# Copyright 2020 The KCL Authors. All rights reserved. -INFO = { - 'name': 'hello', - 'describe': 'hello doc', - 'long_describe': 'long describe', - 'version': '0.0.1', -} -def add(a: int, b: int) -> int: - """add two numbers, and return result""" - return a + b -... -``` - -Where `INFO` specifies the name of the plugin, a brief description, a detailed description and version information. And all the functions whose names start with letters are the functions provided by the plugin, so the `add` function can be called directly in KCL. - -> Note: KCL plugins are implemented in an independent pure Python code file, and plugins cannot directly call each other. - -## 5. Create Plugin - -An plugin can be created with the `kcl-plugin init` command: - -``` -$ kcl-plugin init hi -$ kcl-plugin list -hello: hello doc - 0.0.1 -hi: hi doc - 0.0.1 -``` - -The `kcl-plugin init` command will construct a new plugin from the built-in template, and then we can view the created plugin information with the `kcl-plugin list` command. - -## 6. Remove Plugin - -KCL plugins are located in the `plugins` subdirectory of KCLVM (usually installed in the `$HOME/.kusion/kclvm/plugins` directory). -We can query the plugin installation directory with the command `kcl-plugin info`. - -```shell -$ kcl-plugin info -/Users/kcl_user/.kusion/kclvm/plugins/ -$ tree /Users/kcl_user/.kusion/kclvm/plugins/ -/Users/kcl_user/.kusion/kclvm/plugins/ -├── _examples -├── _test -└── hello -- Delete this directory to delete the hello plugin - ├── api.md - ├── plugin.py - └── plugin_test.py -$ -``` - -## 7. Test Plugin - -There is a `plugin_test.py` file in the plugin directory, which is the unit test file of the plugin (based on the `pytest` testing framework). Also placed in the `_test` directory are plugin integration tests for KCL files. The `plugin_test.py` unit test is required, and the KCL integration tests in the `_test` directory can be added as needed. - -Unit tests for plugins can be executed via `kcl-plugin test`: - -```shell -$ kcl-plugin test hello -============================= test session starts ============================== -platform darwin -- Python 3.7.6+, pytest-5.3.5, py-1.9.0, pluggy-0.13.1 -rootdir: /Users/kcl_user -collected 5 items -.kusion/kclvm/plugins/hello/plugin_test.py ..... [100%] -============================== 5 passed in 0.03s =============================== -$ -``` - -Integration tests can be tested by executing the `python3 -m pytest` command in the `_test` directory. diff --git a/docs/reference/lang/plugin/project_context.md b/docs/reference/lang/plugin/project_context.md deleted file mode 100644 index 6171f883..00000000 --- a/docs/reference/lang/plugin/project_context.md +++ /dev/null @@ -1,59 +0,0 @@ -# project_context - -project_context extract base info from project.yaml&stack.yaml - -*version: 0.0.1* - -## `get_project_current_path` - -return the relative path of first file - -Example: - -```py -import kcl_plugin.project_context as ctx - -path = ctx.get_project_current_path() -print(path) -``` - -## `get_project_input_file` - -return compiling file list - -Example: - -```py -import kcl_plugin.project_context as ctx - -input_file = ctx.get_project_input_file() -print(input_file) -``` - -## `get_project_context` - -return the current project context from project.yaml - -Example: - -```py -import kcl_plugin.project_context as ctx - -project = ctx.get_project_context() -# Get project name -print(project?.name) -``` - -## `get_stack_context` - -return the current stack context from stack.yaml - -Example: - -```py -import kcl_plugin.project_context as ctx - -stack = ctx.get_stack_context() -# Get stack name -print(stack?.name) -``` diff --git a/docs/reference/lang/use_case/_category_.json b/docs/reference/lang/use_case/_category_.json deleted file mode 100644 index 49d92292..00000000 --- a/docs/reference/lang/use_case/_category_.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "label": "Use Case", - "position": 5 -} diff --git a/docs/reference/lang/use_case/index.md b/docs/reference/lang/use_case/index.md deleted file mode 100644 index 1b2042db..00000000 --- a/docs/reference/lang/use_case/index.md +++ /dev/null @@ -1 +0,0 @@ -# Use Case diff --git a/docs/reference/lang/use_case/validation.md b/docs/reference/lang/use_case/validation.md deleted file mode 100644 index cd5fe3fd..00000000 --- a/docs/reference/lang/use_case/validation.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -sidebar_position: 1 ---- -# KCL Validation - -In addition to using KCL code to generate configuration formats such as JSON/YAML, KCL also supports format validation of JSON/YAML data. As a configuration language, KCL covers almost all features of [OpenAPI](https://www.openapis.org/). - -In KCL, a structure definition can be used to validate configuration data. At the same time, it supports user-defined constraint rules through the check block, and writes validation expressions in the schema to check and validate the attributes defined by the schema. It is very clear and simple to check whether the input JSON/YAML satisfies the corresponding schema structure definition and constraints. - -## Introduction - -In the schema we can use the `check` keyword to write the validation rules of every schema attribute. Each line in the check block corresponds to a conditional expression. When the condition is satisfied, the validation is successful. The conditional expression can be followed by `, "check error message"` to indicate the message to be displayed when the check fails. Here is an example of a schema with constraint expressions. - -```python -import regex - -schema Sample: - foo: str # Required, cannot be None/Undefined, and the type must be str - bar: int # Required, cannot be None/Undefined, and the type must be int - fooList: [int] # Required, cannot be None/Undefined, and the type must be int list - color: "Red" | "Yellow" | "Blue" # Required, literal union type, and must be one of "Red", "Yellow", "Blue". - id?: int # Optional, can be None/Undefined, the type must be int - - check: - 0 <= bar < 100 # bar must be greater than or equal to 0 and less than 100 - 0 < len(fooList) < 100 # fooList cannot be None/Undefined, and the length must be greater than 0 and less than 100 - regex.match(foo, "^The.*Foo$") # regular expression matching - bar in range(100) # bar can only range from 1 to 99 - bar in [2, 4, 6, 8] # bar can only take 2, 4, 6, 8 - bar % 2 == 0 # bar must be a multiple of 2 - all foo in fooList { - foo > 1 - } # All elements in fooList must be greater than 1 - any foo in fooList { - foo > 10 - } # At least one element in fooList must be greater than 10 - abs(id) > 10 if id # check expression with if guard, when id is not empty, the absolute value of id must be greater than 10 -``` - -To sum up, the validation kinds supported in KCL schema are: - -| Kind | Method | -| ----------------- | ----------------------------------------------------------------------------------------- | -| Range | Using comparison operators such as `<`, `>` | -| Regex | Using methods such as `match` from the `regex` system module | -| Length | Using the `len` built-in function to get the length of a variable of type `list/dict/str` | -| Enum | Using literal union types | -| Optional/Required | Using optional/required attributes of schema | -| Condition | Using the check if conditional expression | - -In addition, KCL provides a corresponding [validation tool](/reference/cli/kcl/vet.md) to directly validate JSON/YAML data. - -## Future Plan - -The improvement of KCL validation capabilities will gradually focus on the "static" aspect, that is, at compile time, combined with the ability of formal validation, it can directly analyze whether the data meets the constraints, whether the constraints conflict with each other, etc., and can be exposed in real time through the IDE. diff --git a/docs/reference/lang/xlang-api/_c-api.md b/docs/reference/lang/xlang-api/_c-api.md deleted file mode 100644 index 454c1ce4..00000000 --- a/docs/reference/lang/xlang-api/_c-api.md +++ /dev/null @@ -1,4 +0,0 @@ -# C API - -- 运行时 C API -- 编译 C API diff --git a/docs/reference/lang/xlang-api/_category_.json b/docs/reference/lang/xlang-api/_category_.json deleted file mode 100644 index c2aafb21..00000000 --- a/docs/reference/lang/xlang-api/_category_.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "label": "Multi-Language", - "position": 4 -} diff --git a/docs/reference/lang/xlang-api/go-api.md b/docs/reference/lang/xlang-api/go-api.md deleted file mode 100644 index bd0a61e1..00000000 --- a/docs/reference/lang/xlang-api/go-api.md +++ /dev/null @@ -1,568 +0,0 @@ - - -# kclvm - -```go -import "github.com/KusionStack/kclvm-go" -``` - -### KCLVM binding for Go - -``` -┌─────────────────┐ ┌─────────────────┐ ┌─────────────────┐ -│ kcl files │ │ KCLVM-Go-API │ │ KCLResultList │ -│ ┌───────────┐ │ │ │ │ │ -│ │ 1.k │ │ │ │ │ │ -│ └───────────┘ │ │ │ │ ┌───────────┐ │ ┌───────────────┐ -│ ┌───────────┐ │ │ ┌───────────┐ │ │ │ KCLResult │──┼────────▶│x.Get("a.b.c") │ -│ │ 2.k │ │ │ │ Run(path) │ │ │ └───────────┘ │ └───────────────┘ -│ └───────────┘ │────┐ │ └───────────┘ │ │ │ -│ ┌───────────┐ │ │ │ │ │ ┌───────────┐ │ ┌───────────────┐ -│ │ 3.k │ │ │ │ │ │ │ KCLResult │──┼────────▶│x.Get("k", &v) │ -│ └───────────┘ │ │ │ │ │ └───────────┘ │ └───────────────┘ -│ ┌───────────┐ │ ├───▶│ ┌───────────┐ │──────────▶│ │ -│ │setting.yml│ │ │ │ │RunFiles() │ │ │ ┌───────────┐ │ ┌───────────────┐ -│ └───────────┘ │ │ │ └───────────┘ │ │ │ KCLResult │──┼────────▶│x.JSONString() │ -└─────────────────┘ │ │ │ │ └───────────┘ │ └───────────────┘ - │ │ │ │ │ -┌─────────────────┐ │ │ │ │ ┌───────────┐ │ ┌───────────────┐ -│ Options │ │ │ ┌───────────┐ │ │ │ KCLResult │──┼────────▶│x.YAMLString() │ -│WithOptions │ │ │ │MustRun() │ │ │ └───────────┘ │ └───────────────┘ -│WithOverrides │────┘ │ └───────────┘ │ │ │ -│WithWorkDir │ │ │ │ │ -│WithDisableNone │ │ │ │ │ -└─────────────────┘ └─────────────────┘ └─────────────────┘ -``` - -
Example -

- -```go -{ - const k_code = ` -import kcl_plugin.hello - -name = "kcl" -age = 1 - -two = hello.add(1, 1) - -schema Person: - name: str = "kcl" - age: int = 1 - -x0 = Person {} -x1 = Person { - age = 101 -} -` - - yaml := kclvm.MustRun("testdata/main.k", kclvm.WithCode(k_code)).First().YAMLString() - fmt.Println(yaml) - - fmt.Println("----") - - result := kclvm.MustRun("./testdata/main.k").First() - fmt.Println(result.JSONString()) - - fmt.Println("----") - fmt.Println("x0.name:", result.Get("x0.name")) - fmt.Println("x1.age:", result.Get("x1.age")) - - fmt.Println("----") - - var person struct { - Name string - Age int - } - fmt.Printf("person: %+v\n", result.Get("x1", &person)) -} -``` - -

-
- -## Index - -- [func FormatCode(code interface{}) ([]byte, error)](<#func-formatcode>) -- [func FormatPath(path string) (changedPaths []string, err error)](<#func-formatpath>) -- [func InitKclvmRuntime(n int)](<#func-initkclvmruntime>) -- [func LintPath(path string) (results []string, err error)](<#func-lintpath>) -- [func OverrideFile(file string, specs []string) (bool, error)](<#func-overridefile>) -- [func RunPlayground(address string) error](<#func-runplayground>) -- [func ValidateCode(data, code string, opt *ValidateOptions) (ok bool, err error)](<#func-validatecode>) -- [type KCLResult](<#type-kclresult>) - - [func EvalCode(code string) (*KCLResult, error)](<#func-evalcode>) -- [type KCLResultList](<#type-kclresultlist>) - - [func MustRun(path string, opts ...Option) *KCLResultList](<#func-mustrun>) - - [func Run(path string, opts ...Option) (*KCLResultList, error)](<#func-run>) - - [func RunFiles(paths []string, opts ...Option) (*KCLResultList, error)](<#func-runfiles>) -- [type KclType](<#type-kcltype>) - - [func GetSchemaType(file, code, schemaName string) ([]*KclType, error)](<#func-getschematype>) -- [type Option](<#type-option>) - - [func WithCode(codes ...string) Option](<#func-withcode>) - - [func WithDisableNone(disableNone bool) Option](<#func-withdisablenone>) - - [func WithKFilenames(filenames ...string) Option](<#func-withkfilenames>) - - [func WithOptions(key_value_list ...string) Option](<#func-withoptions>) - - [func WithOverrides(override_list ...string) Option](<#func-withoverrides>) - - [func WithPrintOverridesAST(printOverridesAST bool) Option](<#func-withprintoverridesast>) - - [func WithSettings(filename string) Option](<#func-withsettings>) - - [func WithWorkDir(workDir string) Option](<#func-withworkdir>) -- [type ValidateOptions](<#type-validateoptions>) - - -## func [FormatCode]() - -```go -func FormatCode(code interface{}) ([]byte, error) -``` - -FormatCode returns the formatted code\. - -
Example -

- -```go -{ - out, err := kclvm.FormatCode(`a = 1+2`) - if err != nil { - log.Fatal(err) - } - fmt.Println(string(out)) - -} -``` - -#### Output - -``` -a = 1 + 2 -``` - -

-
- -## func [FormatPath]() - -```go -func FormatPath(path string) (changedPaths []string, err error) -``` - -FormatPath formats files from the given path path: if path is \`\.\` or empty string\, all KCL files in current directory will be formatted\, not recursively if path is \`path/file\.k\`\, the specified KCL file will be formatted if path is \`path/to/dir\`\, all KCL files in the specified dir will be formatted\, not recursively if path is \`path/to/dir/\.\.\.\`\, all KCL files in the specified dir will be formatted recursively - -the returned changedPaths are the changed file paths \(relative path\) - -
Example -

- -```go -{ - changedPaths, err := kclvm.FormatPath("testdata/fmt") - if err != nil { - log.Fatal(err) - } - fmt.Println(changedPaths) -} -``` - -

-
- -## func [InitKclvmRuntime]() - -```go -func InitKclvmRuntime(n int) -``` - -InitKclvmRuntime init kclvm process\. - -## func [LintPath]() - -```go -func LintPath(path string) (results []string, err error) -``` - -LintPath lint files from the given path - -
Example -

- -```go -{ - - results, err := kclvm.LintPath("testdata/lint/import.k") - if err != nil { - log.Fatal(err) - } - for _, s := range results { - fmt.Println(s) - } - -} -``` - -#### Output - -``` -Unable to import abc. -a is reimported multiple times. -a imported but unused. -``` - -

-
- -## func [OverrideFile]() - -```go -func OverrideFile(file string, specs []string) (bool, error) -``` - -OverrideFile rewrites a file with override spec file: string\. The File that need to be overridden specs: \[\]string\. List of specs that need to be overridden\. Each spec string satisfies the form: \:\=\ or \:\\- When the pkgpath is '\_\_main\_\_'\, it can be omitted\. - -## func [RunPlayground]() - -```go -func RunPlayground(address string) error -``` - -RunPlayground start KCL playground on given address\. - -
Example -

- -```go -{ - addr := "localhost:2022" - fmt.Printf("listen at http://%s\n", addr) - - kclvm.RunPlayground(addr) -} -``` - -

-
- -## func [ValidateCode]() - -```go -func ValidateCode(data, code string, opt *ValidateOptions) (ok bool, err error) -``` - -ValidateCode validate data match code - -## type [KCLResult]() - -```go -type KCLResult = kcl.KCLResult -``` - -
Example -

- -```go -{ - const k_code = ` -import kcl_plugin.hello - -name = "kcl" -age = 1 - -two = hello.add(1, 1) - -schema Person: - name: str = "kcl" - age: int = 1 - -x0 = Person {name = "kcl-go"} -x1 = Person {age = 101} -` - - result := kclvm.MustRun("testdata/main.k", kclvm.WithCode(k_code)).First() - - fmt.Println("x0.name:", result.Get("x0.name")) - fmt.Println("x1.age:", result.Get("x1.age")) - -} -``` - -#### Output - -``` -x0.name: kcl-go -x1.age: 101 -``` - -

-
- -
Example ('et_struct) -

- -```go -{ - const k_code = ` -schema Person: - name: str = "kcl" - age: int = 1 - X: int = 2 - -x = { - "a": Person {age = 101} - "b": 123 -} -` - - result := kclvm.MustRun("testdata/main.k", kclvm.WithCode(k_code)).First() - - var person struct { - Name string - Age int - } - fmt.Printf("person: %+v\n", result.Get("x.a", &person)) - fmt.Printf("person: %+v\n", person) - -} -``` - -#### Output - -``` -person: &{Name:kcl Age:101} -person: {Name:kcl Age:101} -``` - -

-
- -### func [EvalCode]() - -```go -func EvalCode(code string) (*KCLResult, error) -``` - -## type [KCLResultList]() - -```go -type KCLResultList = kcl.KCLResultList -``` - -### func [MustRun]() - -```go -func MustRun(path string, opts ...Option) *KCLResultList -``` - -MustRun is like Run but panics if return any error\. - -
Example -

- -```go -{ - yaml := kclvm.MustRun("testdata/main.k", kclvm.WithCode(`name = "kcl"`)).First().YAMLString() - fmt.Println(yaml) - -} -``` - -#### Output - -``` -name: kcl -``` - -

-
- -
Example (Settings) -

- -```go -{ - yaml := kclvm.MustRun("./testdata/app0/kcl.yaml").First().YAMLString() - fmt.Println(yaml) -} -``` - -

-
- -### func [Run]() - -```go -func Run(path string, opts ...Option) (*KCLResultList, error) -``` - -Run evaluates the KCL program with path and opts\, then returns the object list\. - -
Example (Get Field) -

- -```go -{ - - x, err := kclvm.Run("./testdata/app0/kcl.yaml") - assert(err == nil, err) - - fmt.Println(x.First().Get("deploy_topology.1.zone")) - -} -``` - -#### Output - -``` -RZ24A -``` - -

-
- -### func [RunFiles]() - -```go -func RunFiles(paths []string, opts ...Option) (*KCLResultList, error) -``` - -RunFiles evaluates the KCL program with multi file path and opts\, then returns the object list\. - -
Example -

- -```go -{ - result, _ := kclvm.RunFiles([]string{"./testdata/app0/kcl.yaml"}) - fmt.Println(result.First().YAMLString()) -} -``` - -

-
- -## type [KclType]() - -```go -type KclType = kcl.KclType -``` - -### func [GetSchemaType]() - -```go -func GetSchemaType(file, code, schemaName string) ([]*KclType, error) -``` - -GetSchemaType returns schema types from a kcl file or code\. - -file: string The kcl filename code: string The kcl code string schema\_name: string The schema name got\, when the schema name is empty\, all schemas are returned\. - -## type [Option]() - -```go -type Option = kcl.Option -``` - -### func [WithCode]() - -```go -func WithCode(codes ...string) Option -``` - -WithCode returns a Option which hold a kcl source code list\. - -### func [WithDisableNone]() - -```go -func WithDisableNone(disableNone bool) Option -``` - -WithDisableNone returns a Option which hold a disable none switch\. - -### func [WithKFilenames]() - -```go -func WithKFilenames(filenames ...string) Option -``` - -WithKFilenames returns a Option which hold a filenames list\. - -### func [WithOptions]() - -```go -func WithOptions(key_value_list ...string) Option -``` - -WithOptions returns a Option which hold a key=value pair list for option function\. - -
Example -

- -```go -{ - const code = ` -name = option("name") -age = option("age") -` - x, err := kclvm.Run("hello.k", kclvm.WithCode(code), - kclvm.WithOptions("name=kcl", "age=1"), - ) - if err != nil { - log.Fatal(err) - } - - fmt.Println(x.First().YAMLString()) - -} -``` - -#### Output - -``` -age: 1 -name: kcl -``` - -

-
- -### func [WithOverrides]() - -```go -func WithOverrides(override_list ...string) Option -``` - -WithOverrides returns a Option which hold a override list\. - -### func [WithPrintOverridesAST]() - -```go -func WithPrintOverridesAST(printOverridesAST bool) Option -``` - -WithPrintOverridesAST returns a Option which hold a printOverridesAST switch\. - -### func [WithSettings]() - -```go -func WithSettings(filename string) Option -``` - -WithSettings returns a Option which hold a settings file\. - -### func [WithWorkDir]() - -```go -func WithWorkDir(workDir string) Option -``` - -WithWorkDir returns a Option which hold a work dir\. - -## type [ValidateOptions]() - -```go -type ValidateOptions = validate.ValidateOptions -``` - - - -Generated by [gomarkdoc]() diff --git a/docs/reference/lang/xlang-api/index.md b/docs/reference/lang/xlang-api/index.md deleted file mode 100644 index f6f8e076..00000000 --- a/docs/reference/lang/xlang-api/index.md +++ /dev/null @@ -1 +0,0 @@ -# Multi-Language diff --git a/docs/reference/lang/xlang-api/overview.md b/docs/reference/lang/xlang-api/overview.md deleted file mode 100644 index 58f9df62..00000000 --- a/docs/reference/lang/xlang-api/overview.md +++ /dev/null @@ -1,226 +0,0 @@ ---- -sidebar_position: 1 ---- - -# Introduction - -The KCL language provides general programming language interfaces such as C/Rust/Go/Python/Java, and the related languages are under development. - -## 1. C/Rust API - -The core of KCL is developed in Rust, and the C language API is exported externally for packaging and integration in high-level languages such as Go/Python/Java. - -## 2. Go API - -Go API is a C-API provided by CGO wrapping KCL, while providing deeper customization features to meet the needs of upper-level tools such as Kusion Engine. - -### 2.1. Abstract Model - -The abstract model of the KCL Go API is as follows: - -``` -┌─────────────────┐ ┌─────────────────┐ ┌─────────────────┐ -│ kcl files │ │ KCLVM-Go-API │ │ KCLResultList │ -│ ┌───────────┐ │ │ │ │ │ -│ │ 1.k │ │ │ │ │ │ -│ └───────────┘ │ │ │ │ ┌───────────┐ │ ┌───────────────┐ -│ ┌───────────┐ │ │ ┌───────────┐ │ │ │ KCLResult │──┼────────▶│x.Get("a.b.c") │ -│ │ 2.k │ │ │ │ Run(path) │ │ │ └───────────┘ │ └───────────────┘ -│ └───────────┘ │────┐ │ └───────────┘ │ │ │ -│ ┌───────────┐ │ │ │ │ │ ┌───────────┐ │ ┌───────────────┐ -│ │ 3.k │ │ │ │ │ │ │ KCLResult │──┼────────▶│x.Get("k", &v) │ -│ └───────────┘ │ │ │ │ │ └───────────┘ │ └───────────────┘ -│ ┌───────────┐ │ ├───▶│ ┌───────────┐ │──────────▶│ │ -│ │setting.yml│ │ │ │ │RunFiles() │ │ │ ┌───────────┐ │ ┌───────────────┐ -│ └───────────┘ │ │ │ └───────────┘ │ │ │ KCLResult │──┼────────▶│x.JSONString() │ -└─────────────────┘ │ │ │ │ └───────────┘ │ └───────────────┘ - │ │ │ │ │ -┌─────────────────┐ │ │ │ │ ┌───────────┐ │ ┌───────────────┐ -│ Options │ │ │ ┌───────────┐ │ │ │ KCLResult │──┼────────▶│x.YAMLString() │ -│WithOptions │ │ │ │MustRun() │ │ │ └───────────┘ │ └───────────────┘ -│WithOverrides │────┘ │ └───────────┘ │ │ │ -│WithWorkDir │ │ │ │ │ -│WithDisableNone │ │ │ │ │ -└─────────────────┘ └─────────────────┘ └─────────────────┘ -``` - -The input file contains the KCL file and the `setting.yml` configuration file, and `Options` can be used to specify additional parameters and information such as working directory. The "KCLVM-Go-API" part is the provided KCLVM execution function. The execution function executes the KCL program according to the input file and additional parameters, and finally outputs the result of `KCLResultList`. `KCLResultList` is a list of `KCLResult`, each `KCLResult` corresponding to a generated configuration file or `map[string]interface{}`. - -### 2.2. Example - -```go -package main - -import ( - "fmt" - - "kusionstack.io/kclvm-go/api/kcl" -) - - -func main() { - const k_code = ` -import kcl_plugin.hello - -name = "kcl" -age = 1 - -two = hello.add(1, 1) - -schema Person: - name: str = "kcl" - age: int = 1 - -x0 = Person{} -x1 = Person{age:101} -` - - result := kcl.MustRun("hello.k", kcl.WithCode(k_code)).First() - fmt.Println(result.YAMLString()) - - fmt.Println("----") - fmt.Println("x0.name:", result.Get("x0.name")) - fmt.Println("x1.age:", result.Get("x1.age")) - - fmt.Println("----") - - var person struct { - Name string - Age int - } - fmt.Printf("person: %+v\n", result.Get("x1", &person)) -} -``` - -Output result: - -```yaml -age: 1 -name: kcl -two: 2 -x0: - age: 1 - name: kcl -x1: - age: 101 - name: kcl - ----- -x0.name: kcl -x1.age: 101 ----- -person: &{Name:kcl Age:101} -``` - -## 3. REST-API - -The C-API provided by KCL does not have a REST-API. The REST-API is defined by Protobuf and is finally implemented by the upper-layer Go-SDK. - -### 3.1. Start REST Service - -The RestAPI service can be started in the following ways: - -``` -kcl-go rest-server -http=:2021 -``` - -The service can then be requested via the POST protocol: - -```shell -$ curl -X POST http://127.0.0.1:2021/api:protorpc/BuiltinService.Ping --data '{}' -{ - "error": "", - "result": {} -} -``` - -The POST request and the returned JSON data are consistent with the structure defined by Protobuf. - -### 3.2. `BuiltinService` - -Where the `/api:protorpc/BuiltinService.Ping` path represents the `Ping` method of the `BuiltinService` service. - -The complete `BuiltinService` is defined by Protobuf: - -```protobuf -service BuiltinService { - rpc Ping(Ping_Args) returns(Ping_Result); - rpc ListMethod(ListMethod_Args) returns(ListMethod_Result); -} - -message Ping_Args { - string value = 1; -} -message Ping_Result { - string value = 1; -} - -message ListMethod_Args { - // empty -} -message ListMethod_Result { - repeated string method_name_list = 1; -} -``` - -The `Ping` method can verify whether the service is normal, and the `ListMethod` method can query the list of all services and functions provided. - -### 3.3. `KclvmService` - -The `KclvmService` service is a service related to KCLVM functionality. The usage is the same as the `BuiltinService` service. - -For example, there is the following `Person` structure definition: - -```python -schema Person: - key: str - - check: - "value" in key # 'key' is required and 'key' must contain "value" -``` - -Then we want to use `Person` to verify the following JSON data: - -```json -{"key": "value"} -``` - -This can be done through the `ValidateCode` method of the `KclvmService` service. Refer to the `ValidateCode_Args` structure of the `ValidateCode` method: - -```protobuf -message ValidateCode_Args { - string data = 1; - string code = 2; - string schema = 3; - string attribute_name = 4; - string format = 5; -} -``` - -Construct the JSON data required by the POST request according to the `ValidateCode_Args` structure, which contains the `Person` definition and the JSON data to be verified: - -```json -{ - "code": "\nschema Person:\n key: str\n\n check:\n \"value\" in key # 'key' is required and 'key' must contain \"value\"\n", - "data": "{\"attr_name\": {\"key\": \"value\"}}" -} -``` - -Save this JSON data to the `vet-hello.json` file and verify it with the following command: - -```shell -$ curl -X POST \ - http://127.0.0.1:2021/api:protorpc/KclvmService.ValidateCode \ - -H "accept: application/json" \ - --data @./vet-hello.json -{ - "error": "", - "result": { - "success": true - } -} -``` - -## 4. APIs in other languages - -Coming soon diff --git a/docs/reference/lang/xlang-api/rest-api.md b/docs/reference/lang/xlang-api/rest-api.md deleted file mode 100644 index c84d69a4..00000000 --- a/docs/reference/lang/xlang-api/rest-api.md +++ /dev/null @@ -1,505 +0,0 @@ ---- -sidebar_position: 2 ---- -# Rest API - -## 1. Start REST Service - -The RestAPI service can be started in the following ways: - -``` -kcl-go rest-server -http=:2021 -``` - -The service can then be requested via the POST protocol: - -```shell -$ curl -X POST http://127.0.0.1:2021/api:protorpc/BuiltinService.Ping --data '{}' -{ - "error": "", - "result": {} -} -``` - -The POST request and the returned JSON data are consistent with the structure defined by Protobuf. - -## 2. `BuiltinService` - -Where the `/api:protorpc/BuiltinService.Ping` path represents the `Ping` method of the `BuiltinService` service. - -The complete `BuiltinService` is defined by Protobuf: - -```protobuf -service BuiltinService { - rpc Ping(Ping_Args) returns(Ping_Result); - rpc ListMethod(ListMethod_Args) returns(ListMethod_Result); -} - -message Ping_Args { - string value = 1; -} -message Ping_Result { - string value = 1; -} - -message ListMethod_Args { - // empty -} -message ListMethod_Result { - repeated string method_name_list = 1; -} -``` - -The `Ping` method can verify whether the service is normal, and the `ListMethod` method can query the list of all services and functions provided. - -## 3. `KclvmService` - -The `KclvmService` service is a service related to KCLVM functionality. The usage is the same as the `BuiltinService` service. - -For example, there is the following `Person` structure definition: - -```python -schema Person: - key: str - - check: - "value" in key # 'key' is required and 'key' must contain "value" -``` - -Then we want to use `Person` to verify the following JSON data: - -```json -{"key": "value"} -``` - -This can be done through the `ValidateCode` method of the `KclvmService` service. Refer to the `ValidateCode_Args` structure of the `ValidateCode` method: - -```protobuf -message ValidateCode_Args { - string data = 1; - string code = 2; - string schema = 3; - string attribute_name = 4; - string format = 5; -} -``` - -Construct the JSON data required by the POST request according to the `ValidateCode_Args` structure, which contains the `Person` definition and the JSON data to be verified: - -```json -{ - "code": "\nschema Person:\n key: str\n\n check:\n \"value\" in key # 'key' is required and 'key' must contain \"value\"\n", - "data": "{\"attr_name\": {\"key\": \"value\"}}" -} -``` - -Save this JSON data to the `vet-hello.json` file and verify it with the following command: - -```shell -$ curl -X POST \ - http://127.0.0.1:2021/api:protorpc/KclvmService.ValidateCode \ - -H "accept: application/json" \ - --data @./vet-hello.json -{ - "error": "", - "result": { - "success": true - } -} -``` - -## 4. Complete Protobuf Service Definition - -Cross-language APIs defined via Protobuf([https://github.com/KusionStack/kclvm-go/blob/main/pkg/spec/gpyrpc/gpyrpc.proto](https://github.com/KusionStack/kclvm-go/blob/main/pkg/spec/gpyrpc/gpyrpc.proto)): - -```protobuf -// Copyright 2021 The KCL Authors. All rights reserved. -// -// This file defines the request parameters and return structure of the KCLVM RPC server. -// We can use the following command to start a KCLVM RPC server. -// -// ``` -// kclvm -m kclvm.program.rpc-server -http=:2021 -// ``` -// -// The service can then be requested via the POST protocol: -// -// ``` -// $ curl -X POST http://127.0.0.1:2021/api:protorpc/BuiltinService.Ping --data '{}' -// { -// "error": "", -// "result": {} -// } -// ``` - -syntax = "proto3"; - -package gpyrpc; - -option go_package = "kusionstack.io/kclvm-go/pkg/spec/gpyrpc;gpyrpc"; - -import "google/protobuf/any.proto"; -import "google/protobuf/descriptor.proto"; - -// ---------------------------------------------------------------------------- - -// kcl main.k -D name=value -message CmdArgSpec { - string name = 1; - string value = 2; -} - -// kcl main.k -O pkgpath:path.to.field=field_value -message CmdOverrideSpec { - string pkgpath = 1; - string field_path = 2; - string field_value = 3; - string action = 4; -} - -// ---------------------------------------------------------------------------- -// gpyrpc request/response/error types -// ---------------------------------------------------------------------------- - -message RestResponse { - google.protobuf.Any result = 1; - string error = 2; - KclError kcl_err = 3; -} - -message KclError { - string ewcode = 1; // See kclvm/kcl/error/kcl_err_msg.py - string name = 2; - string msg = 3; - repeated KclErrorInfo error_infos = 4; -} - -message KclErrorInfo { - string err_level = 1; - string arg_msg = 2; - string filename = 3; - string src_code = 4; - string line_no = 5; - string col_no = 6; -} - -// ---------------------------------------------------------------------------- -// service requset/response -// ---------------------------------------------------------------------------- - -// gpyrpc.BuiltinService -service BuiltinService { - rpc Ping(Ping_Args) returns(Ping_Result); - rpc ListMethod(ListMethod_Args) returns(ListMethod_Result); -} - -// gpyrpc.KclvmService -service KclvmService { - rpc Ping(Ping_Args) returns(Ping_Result); - - rpc ParseFile_LarkTree(ParseFile_LarkTree_Args) returns(ParseFile_LarkTree_Result); - rpc ParseFile_AST(ParseFile_AST_Args) returns(ParseFile_AST_Result); - rpc ParseProgram_AST(ParseProgram_AST_Args) returns(ParseProgram_AST_Result); - - rpc ExecProgram(ExecProgram_Args) returns(ExecProgram_Result); - - rpc ResetPlugin(ResetPlugin_Args) returns(ResetPlugin_Result); - - rpc FormatCode(FormatCode_Args) returns(FormatCode_Result); - rpc FormatPath(FormatPath_Args) returns(FormatPath_Result); - rpc LintPath(LintPath_Args) returns(LintPath_Result); - rpc OverrideFile(OverrideFile_Args) returns (OverrideFile_Result); - - rpc EvalCode(EvalCode_Args) returns(EvalCode_Result); - rpc ResolveCode(ResolveCode_Args) returns(ResolveCode_Result); - rpc GetSchemaType(GetSchemaType_Args) returns(GetSchemaType_Result); - rpc ValidateCode(ValidateCode_Args) returns(ValidateCode_Result); - rpc SpliceCode(SpliceCode_Args) returns(SpliceCode_Result); - - rpc Complete(Complete_Args) returns(Complete_Result); - rpc GoToDef(GoToDef_Args) returns(GoToDef_Result); - rpc DocumentSymbol(DocumentSymbol_Args) returns(DocumentSymbol_Result); - rpc Hover(Hover_Args) returns(Hover_Result); - - rpc ListDepFiles(ListDepFiles_Args) returns(ListDepFiles_Result); - rpc LoadSettingsFiles(LoadSettingsFiles_Args) returns(LoadSettingsFiles_Result); -} - -message Ping_Args { - string value = 1; -} -message Ping_Result { - string value = 1; -} - -message ListMethod_Args { - // empty -} -message ListMethod_Result { - repeated string method_name_list = 1; -} - -message ParseFile_LarkTree_Args { - string filename = 1; - string source_code = 2; - bool ignore_file_line = 3; -} -message ParseFile_LarkTree_Result { - string lark_tree_json = 1; -} - -message ParseFile_AST_Args { - string filename = 1; - string source_code = 2; -} -message ParseFile_AST_Result { - string ast_json = 1; // json value -} - -message ParseProgram_AST_Args { - repeated string k_filename_list = 1; -} -message ParseProgram_AST_Result { - string ast_json = 1; // json value -} - -message ExecProgram_Args { - string work_dir = 1; - - repeated string k_filename_list = 2; - repeated string k_code_list = 3; - - repeated CmdArgSpec args = 4; - repeated CmdOverrideSpec overrides = 5; - - bool disable_yaml_result = 6; - - bool print_override_ast = 7; - - // -r --strict-range-check - bool strict_range_check = 8; - - // -n --disable-none - bool disable_none = 9; - // -v --verbose - int32 verbose = 10; - - // -d --debug - int32 debug = 11; -} -message ExecProgram_Result { - string json_result = 1; - string yaml_result = 2; - - string escaped_time = 101; -} - -message ResetPlugin_Args { - string plugin_root = 1; -} -message ResetPlugin_Result { - // empty -} - -message FormatCode_Args { - string source = 1; -} - -message FormatCode_Result { - bytes formatted = 1; -} - -message FormatPath_Args { - string path = 1; -} - -message FormatPath_Result { - repeated string changedPaths = 1; -} - -message LintPath_Args { - string path = 1; -} - -message LintPath_Result { - repeated string results = 1; -} - -message OverrideFile_Args { - string file = 1; - repeated string specs = 2; -} - -message OverrideFile_Result { - bool result = 1; -} - -message EvalCode_Args { - string code = 1; -} -message EvalCode_Result { - string json_result = 2; -} - -message ResolveCode_Args { - string code = 1; -} - -message ResolveCode_Result { - bool success = 1; -} - -message GetSchemaType_Args { - string file = 1; - string code = 2; - string schema_name = 3; // emtry is all -} -message GetSchemaType_Result { - repeated KclType schema_type_list = 1; -} - -message ValidateCode_Args { - string data = 1; - string code = 2; - string schema = 3; - string attribute_name = 4; - string format = 5; -} - -message ValidateCode_Result { - bool success = 1; - string err_message = 2; -} - -message CodeSnippet { - string schema = 1; - string rule = 2; -} - -message SpliceCode_Args { - repeated CodeSnippet codeSnippets = 1; -} - -message SpliceCode_Result { - string spliceCode = 1; -} - -message Position { - int64 line = 1; - int64 column = 2; - string filename = 3; -} - -message Complete_Args { - Position pos = 1; - string name = 2; - string code = 3; -} - -message Complete_Result { - string completeItems = 1; -} - -message GoToDef_Args { - Position pos = 1; - string code = 2; -} - -message GoToDef_Result { - string locations = 1; -} - -message DocumentSymbol_Args { - string file = 1; - string code = 2; -} - -message DocumentSymbol_Result { - string symbol = 1; -} - -message Hover_Args { - Position pos = 1; - string code = 2; -} - -message Hover_Result { - string hoverResult = 1; -} - -message ListDepFiles_Args { - string work_dir = 1; - bool use_abs_path = 2; - bool include_all = 3; - bool use_fast_parser = 4; -} - -message ListDepFiles_Result { - string pkgroot = 1; - string pkgpath = 2; - repeated string files = 3; -} - -// --------------------------------------------------------------------------------- -// LoadSettingsFiles API -// Input work dir and setting files and return the merged kcl singleton config. -// --------------------------------------------------------------------------------- - -message LoadSettingsFiles_Args { - string work_dir = 1; - repeated string files = 2; -} - -message LoadSettingsFiles_Result { - CliConfig kcl_cli_configs = 1; - repeated KeyValuePair kcl_options = 2; -} - -message CliConfig { - repeated string files = 1; - string output = 2; - repeated string overrides = 3; - repeated string path_selector = 4; - bool strict_range_check = 5; - bool disable_none = 6; - int64 verbose = 7; - bool debug = 8; -} - -message KeyValuePair { - string key = 1; - string value = 2; -} - -// ---------------------------------------------------------------------------- -// JSON Schema Lit -// ---------------------------------------------------------------------------- - -message KclType { - string type = 1; // schema, dict, list, str, int, float, bool, null, type_string - repeated KclType union_types = 2 ; // union types - string default = 3; // default value - - string schema_name = 4; // schema name - string schema_doc = 5; // schema doc - map properties = 6; // schema properties - repeated string required = 7; // required schema properties, [property_name1, property_name2] - - KclType key = 8; // dict key type - KclType item = 9; // dict/list item type - - int32 line = 10; - - repeated Decorator decorators = 11; // schema decorators -} - -message Decorator { - string name = 1; - repeated string arguments = 2; - map keywords = 3; -} - -// ---------------------------------------------------------------------------- -// END -// ---------------------------------------------------------------------------- -``` diff --git a/docs/reference/model/1-overview.md b/docs/reference/model/1-overview.md deleted file mode 100644 index c80baa1c..00000000 --- a/docs/reference/model/1-overview.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -id: overview -sidebar_label: Overview ---- -# Kusion Model Overview - -**Kusion 模型库**也叫做 `Kusion Model`,是 KusionStack 中预置的、使用 KCL 描述的配置模型,它提供给用户开箱即用、高度抽象的配置界面,模型库最初朴素的出发点就是改善 YAML 用户的效率和体验,我们希望通过将代码更繁杂的模型抽象封装到统一的模型中,从而简化用户侧配置代码的编写。 - -⚡️ **Kusion 模型库**由以下部分组成: - -- **核心模型库**: - - **前端模型**:前端模型即「用户界面」,包含平台侧暴露给用户的所有可配置属性,其中省略了一些重复的、可推导的配置,抽象出必要属性暴露给用户,具有用户友好的特性,比如 `server.k`。 - - **后端模型**:后端模型是「模型实现」,是让前端模型属性生效的模型,主要包含前端模型实例的渲染逻辑,后端模型中可借助 KCL 编写校验、逻辑判断、代码片段复用等代码,提高配置代码复用性和健壮性,对用户不感知,比如 `server_backend.k` -- **底层模型**:是不包含任何实现逻辑和抽象的模型,往往由工具转换生成,无需修改,和真正生效的 YAML 属性一一对应,底层模型需要经过进一步抽象,一般不直接被用户使用。比如,`kusion_kubernetes` 是 Kubernetes 场景的底层模型库; - -![](/img/docs/reference/model/kusion-model-01.png) diff --git a/docs/reference/model/2-concept.md b/docs/reference/model/2-concept.md deleted file mode 100644 index efed2823..00000000 --- a/docs/reference/model/2-concept.md +++ /dev/null @@ -1,115 +0,0 @@ ---- -id: concept -sidebar_label: Concept ---- -# Kusion Model Concept - -## 1. Kusion 模型库 (Kusion Model) - -**Kusion 模型库**也叫做 `Kusion Model`,是 KusionStack 中预置的、使用 KCL 描述的配置模型,它提供给用户开箱即用、高度抽象的配置界面,模型库最初朴素的出发点就是改善 YAML 用户的效率和体验,我们希望通过将代码更繁杂的模型抽象封装到统一的模型中,从而简化用户侧配置代码的编写。 - -⚡️ **Kusion 模型库**由以下部分组成: - -- **核心模型库**: - - **前端模型**:前端模型即「用户界面」,包含平台侧暴露给用户的所有可配置属性,其中省略了一些重复的、可推导的配置,抽象出必要属性暴露给用户,具有用户友好的特性,比如 `server.k`。 - - **后端模型**:后端模型是「模型实现」,是让前端模型属性生效的模型,主要包含前端模型实例的渲染逻辑,后端模型中可借助 KCL 编写校验、逻辑判断、代码片段复用等代码,提高配置代码复用性和健壮性,对用户不感知,比如 `server_backend.k` -- **底层模型**:是不包含任何实现逻辑和抽象的模型,往往由工具转换生成,无需修改,和真正生效的 YAML 属性一一对应,底层模型需要经过进一步抽象,一般不直接被用户使用。比如,`kusion_kubernetes` 是 Kubernetes 场景的底层模型库; - -## 2. 前端模型 - -**前端模型** 即「用户界面」,包含平台侧暴露给用户的所有可配置属性,其中省略了一些重复的、可推导的配置,抽象出必要属性暴露给用户,具有用户友好的特性。用户只需要像实例化一个类(Class)一样,传入必要参数构成一份应用的「配置清单」,经过工具链编译即可得到完整的 Kubernetes Manifests,其中包含 Deployment、Service 等 Kubernetes 资源; -一个简单的前端模型样例如下: - -```bash -schema Server: - # Application workload type, default to 'Deployment' - workloadType: "Deployment" | "StatefulSet" = "Deployment" - - # Application replicas - replicas: int = option("replicas") or 1 - - # Main container image - image: str = option("image") - # Main container resource - schedulingStrategy: strategy.SchedulingStrategy = strategy.SchedulingStrategy{} - - # Main container configuration - mainContainer: container.Main - # Sidecar container configurations - sidecarContainers?: [s.Sidecar] - # Init container configurations - initContainers?: [s.Sidecar] - - # Workload configuration - labels?: {str:str} - annotations?: {str:str} - selector?: {str:str} - podMetadata?: apis.ObjectMeta - volumes?: [volume.Volume] - - # Other configurations - needNamespace?: bool = True - - configMaps?: [configmap.ConfigMap] - secrets?: [secret.Secret] - services?: [service.Service] -``` - -## 3. 后端模型 - -**后端模型** 是「模型实现」,是让前端模型属性生效的模型,主要包含前端模型实例的渲染逻辑,后端模型中可借助 KCL 编写校验、逻辑判断、代码片段复用等代码,提高配置代码复用性和健壮性,对用户不感知; -一个简单的后端模型定义如下: - -```bash -schema ServerBackend[inputData]: - mixin [ - ...... - ] - - # Validations - assert ac.__META_APP_NAME, "app name must be specified and can't be empty or None or Undefined" - ...... - - # Varaible - _workload_name: str = "{}{}".format(metadata.__META_APP_NAME, metadata.__META_ENV_TYPE_NAME).lower() - - # result - kubernetes: {str: []} { - Deployment = [ - v1alpha1.Deployment { - name = _cafedName - ...... - } - ] - } -``` - -## 4. 底层模型 - -**底层模型** 是不包含任何实现逻辑和抽象的模型,往往由工具转换生成,无需修改,和真正生效的 YAML 属性一一对应,底层模型需要经过进一步抽象,一般不直接被用户使用。比如,`kusion_kubernetes` 是 Kubernetes 场景的底层模型库; - -常用的底层模型库有: - -* Kubernetes 底层模型库(kusion_kubernetes) -* Prometheus 底层模型库(kusion_prometheus) - -## 5. Mixin - -**Mixin** 是 KCL 提供的一种复用代码片段的方式,后端模型只需要通过下述方式引用,就可以复用 Mixin 中的逻辑: - -```bash -import sigma.base.pkg.kusion_models.mixins - -schema ServerBackend[inputData]: - mixin [ - ...... - mixins.SchedulingStrategyMixin, - ...... - ] -``` - -## 6. FAQ - -### ❓ 为什么要区分前端模型和后端模型? - -区分前端模型和后端模型的直接目的是将「用户界面」和「模型实现」进行分离; diff --git a/docs/reference/model/core/1-server.md b/docs/reference/model/core/1-server.md deleted file mode 100644 index 6b5e83ec..00000000 --- a/docs/reference/model/core/1-server.md +++ /dev/null @@ -1,276 +0,0 @@ -# Server - -云原生应用运维模型(Server)严格来说属于 KusionStack 模型分层中的前端模型(Front-end Model),它被用来声明应用启动的参数配置,其中省略了启动一个云原生应用过程中一些重复的、可推导的配置,抽象出必要属性暴露给用户,具有用户友好的特性。 -用户只需要像实例化一个类(Class)一样,传入必要参数构成一份应用的「配置」,经过 KCL 编译即可得到完整的部署 YAML,其中包含的 Kubernetes 资源包含 Deployment、Service 等; - -## 1. 模型全景 -### 1.1 模型定义 - -:::note -**注意**:模型开放的属性遵守最小化原则 -::: - -请查阅 [Server](/docs/reference/model/kusion_models/kube/frontend/doc_server) 模型定义。 -### 1.2 最小示例 - -```python -import base.pkg.kusion_models.kube.frontend -import base.pkg.kusion_models.kube.frontend.container -import base.pkg.kusion_models.kube.templates.resource as res_tpl - -# The application configuration in stack will overwrite -# the configuration with the same attribute in base. -appConfiguration: frontend.Server { - image = "gcr.io/google-samples/gb-frontend:v4" - schedulingStrategy = { - # 调度策略,即资源要求 - resource = res_tpl.tiny - } - mainContainer = container.Main { - # 主容器名称 - name = "php-redis" - # 主容器环境变量 - env = [{name = "GET_HOSTS_FROM", value = "dns"}] - # 主容器端口 - ports = [{containerPort = 80}] - } -} -``` -上面的代码示例,定义了一个名为 `appConfiguration` 的对象,它是模型 `Server` 的实例。 -指定了 `image`、`schedulingStrategy` 和 `mainContainer` 三个字段, -并且后 2 个字段是其他 `schema` 的实例。 -这段代码就定义了一个 Kubernetes 的 Deployment 对象的最小属性集,即镜像、调度策略和主容器信息。 - -### 1.3 完整示例 - -```python -import base.pkg.kusion_models.kube.frontend -import base.pkg.kusion_models.kube.frontend.container -import base.pkg.kusion_models.kube.templates.resource as res_tpl -import base.pkg.kusion_models.kube.frontend.service - -# Application Configuration -appConfiguration: frontend.Server { - # Main Container Configuration - mainContainer: container.Main { - # 主容器名称 - name = "php-redis" - # 主容器环境变量 - env = [ - { - name = "GET_HOSTS_FROM" - value = "dns" - } - ] - # 主容器端口 - ports = [ - { - containerPort = 80 - } - ] - } - # Server 标签选择器 - selector = { - tier = "frontend" - } - # Pod 模版 - podMetadata = { - # Pod 标签 - labels = { - tier = "frontend" - } - } - # 调度策略 - schedulingStrategy = { - resource = res_tpl.medium - } - # 微服务 - services = [ - service.Service { - # 微服务名称 - name = "frontend-service" - # 微服务类型 - type = "NodePort" - # 微服务端口映射 - ports = [ - { - port = 80 - } - ] - } - ] - image = "gcr.io/google-samples/gb-frontend:v4" -} -``` -上面的代码在最小示例的基础上,添加了更多属性的声明。 -指定了 Deployment 及其衍生出的 Pod 之间的标签选择关系(selector/label), -还指定了访问 Pod 的微服务,类型是 `NodePort`,映射到容器的端口是 `80`。 - -## 2. 对容器的定义 - -### 2.1 模型定义 - -请查阅 [Container](/docs/reference/model/kusion_models/kube/frontend/container/doc_container) 模型定义。 -### 2.2 示例 - -```python -import base.pkg.kusion_models.kube.frontend -import base.pkg.kusion_models.kube.frontend.container -import base.pkg.kusion_models.kube.frontend.container.env as e -import base.pkg.kusion_models.kube.frontend.container.port as cp -import base.pkg.kusion_models.kube.frontend.container.probe as p - -# The application configuration in stack will overwrite -# the configuration with the same attribute in base. -appConfiguration: frontend.Server { - # Main container configuration - mainContainer = container.Main { - # 主容器名称,可选 - name = "main" - # 主容器启动命令,可选 - command = ["/home/admin/server.sh"] - # 主容器启动参数,可选 - args = ["start"] - # 主容器环境变量,可选 - env = [ - e.Env { - name = "app.version" - value = "v1.0.0" - } - ] - envFrom = [ - e.EnvFromSource { - configMapRef = "my-configmap" - } - ] - # 主容器端口,可选 - ports = [ - cp.ContainerPort { - containerPort = 12201 - protocol = "TCP" - } - ] - # 主容器存活探针,可选 - livenessProbe = p.Probe { - # 探活连续失败阈值 - failureThreshold = 3 - # 首次探活延迟 - initialDelaySeconds = 30 - # 探活间隔 - periodSeconds = 5 - # 探活连续成功阈值 - successThreshold = 1 - # 探活超时时间 - timeoutSeconds = 10 - # 探活操作 - handler = p.Exec { - command = ["/bin/sh", "-c", "echo livenessProbe"] - } - } - # 主容器就绪探针,可选 - readinessProbe = p.Probe { - failureThreshold = 3 - initialDelaySeconds = 30 - periodSeconds = 5 - successThreshold = 2 - timeoutSeconds = 10 - handler = p.Exec { - command = ["/bin/sh", "-c", "echo readinessProbe"] - } - } - # 主容器启动探针, 启动探针探测成功之后存活探测才开始工作,可选 - startupProbe = p.Probe { - failureThreshold = 3 - initialDelaySeconds = 30 - periodSeconds = 5 - successThreshold = 2 - timeoutSeconds = 10 - handler = p.Exec { - command = ["/bin/sh", "-c", "echo startupProbe"] - } - } - } -} -``` -上面的代码是把主容器的常用属性全都列了出来,可以指定主容器的名称、启动命令、静态环境变量、动态环境变量和探针。 -我们一般不会直接使用主容器,就像 Kubernetes 中除了静态 Pod,一般不会直接创建 Pod 一样。 - -## 3. 资源规格 - -### 3.1 模型定义 - -请查阅 [Resource](/reference/model/kusion_models/kube/frontend/resource/doc_resource.md) 模型定义。 -### 3.2 示例 - -```python -import base.pkg.kusion_models.kube.frontend.resource as res - -res = res.Resource { - # CPU - cpu = 2 - # 内存 - memory = 2048Mi - # 磁盘 - disk = 20Gi -} -``` -上面的代码定义了 `res` 变量,它是将 Kubernetes 的三种常用资源:CPU、内存和磁盘,抽象成 `Resource` 模型的一个对象。 - -## 4. 调度策略 - -### 4.1 模型定义 - -请查阅 [Scheduling Strategy](/docs/reference/model/kusion_models/kube/frontend/strategy/doc_scheduling_strategy) 模型定义。 - -### 4.2 示例 - -```python -import base.pkg.kusion_models.kube.frontend -import base.pkg.kusion_models.kube.frontend.resource as res - -appConfiguration: frontend.Server { - # 调度策略,即资源请求 - schedulingStrategy.resource = res.Resource { - cpu = 100m - memory = 100Mi - disk = 1Gi - } -} -``` -上面的代码是将 3.2 小节的资源规格对象赋值给了调度策略,明确了 `Server` 模型下发到集群后的资源调度请求。 - -## 5. Volume 挂载 - -对 Kubernetes 原生的 Volume 和 VolumeMount 进行了封装; - -### 5.1 模型定义 - -请查阅 [Volume](/docs/reference/model/kusion_models/kube/frontend/volume/doc_volume) 模型定义。 -### 5.2 示例 - -```python -import base.pkg.kusion_models.kube.frontend -import base.pkg.kusion_models.kube.frontend.volume as v - -# The application configuration in stack will overwrite -# the configuration with the same attribute in base. -appConfiguration: frontend.Server { - # 卷定义 - volumes = [ - v.Volume { - # 卷名称 - name = "log-volume" - # 卷类型 - volumeSource = v.EmptyDir{} - # 挂载点 - mounts = [ - v.Mount{ - container = "main" - path = "/home/admin/logs" - } - ] - } - ] -} -``` -上面的代码定义了一个 `EmptyDir` 卷,在 `appConfiguration` 中指定了挂载的容器和挂载路径。 \ No newline at end of file diff --git a/docs/reference/model/core/_category_.json b/docs/reference/model/core/_category_.json deleted file mode 100644 index f65a3f99..00000000 --- a/docs/reference/model/core/_category_.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "label": "Core Model Library", - "position": 2 -} diff --git a/docs/reference/model/kusion_kubernetes/api/admissionregistration/v1/doc_mutating_webhook.md b/docs/reference/model/kusion_kubernetes/api/admissionregistration/v1/doc_mutating_webhook.md deleted file mode 100644 index 707cc03d..00000000 --- a/docs/reference/model/kusion_kubernetes/api/admissionregistration/v1/doc_mutating_webhook.md +++ /dev/null @@ -1,26 +0,0 @@ -# mutating_webhook - -Source: [base/pkg/kusion_kubernetes/api/admissionregistration/v1/mutating_webhook.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/admissionregistration/v1/mutating_webhook.k) - -This is the mutating\_webhook module in kusion\_kubernetes.api.admissionregistration.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema MutatingWebhook - -MutatingWebhook describes an admission webhook and the resources and operations it applies to. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**admissionReviewVersions**
AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy.
failurePolicy : str, default is Undefined, optional
FailurePolicy defines how unrecognized errors from the admission endpoint are handled - allowed values are Ignore or Fail. Defaults to Fail.
matchPolicy : str, default is Undefined, optional
matchPolicy defines how the "rules" list is used to match incoming requests. Allowed values are "Exact" or "Equivalent".|[str]|Undefined|**required**| -|**failurePolicy**|str|Undefined|optional| -|**matchPolicy**|str|Undefined|optional| -|**name**|str|Undefined|**required**| -|**reinvocationPolicy**|str|Undefined|optional| -|**rules**|[[v1.RuleWithOperations](doc_rule_with_operations#schema-rulewithoperations)]|Undefined|optional| -|**sideEffects**|str|Undefined|**required**| -|**timeoutSeconds**|int|Undefined|optional| -|**clientConfig**|[WebhookClientConfig](doc_webhook_client_config#schema-webhookclientconfig)|Undefined|**required**| -|**namespaceSelector**|[apis.LabelSelector](../../../apimachinery/apis/doc_label_selector#schema-labelselector)|Undefined|optional| -|**objectSelector**|[apis.LabelSelector](../../../apimachinery/apis/doc_label_selector#schema-labelselector)|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/admissionregistration/v1/doc_rule_with_operations.md b/docs/reference/model/kusion_kubernetes/api/admissionregistration/v1/doc_rule_with_operations.md deleted file mode 100644 index 937349bf..00000000 --- a/docs/reference/model/kusion_kubernetes/api/admissionregistration/v1/doc_rule_with_operations.md +++ /dev/null @@ -1,20 +0,0 @@ -# rule_with_operations - -Source: [base/pkg/kusion_kubernetes/api/admissionregistration/v1/rule_with_operations.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/admissionregistration/v1/rule_with_operations.k) - -This is the rule\_with\_operations module in kusion\_kubernetes.api.admissionregistration.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema RuleWithOperations - -RuleWithOperations is a tuple of Operations and Resources. It is recommended to make sure that all the tuple expansions are valid. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**apiGroups**
APIGroups is the API groups the resources belong to. '\*' is all groups. If '\*' is present, the length of the slice must be one. Required.
apiVersions : [str], default is Undefined, optional
APIVersions is the API versions the resources belong to. '\*' is all versions. If '\*' is present, the length of the slice must be one. Required.
operations : [str], default is Undefined, optional
Operations is the operations the admission hook cares about - CREATE, UPDATE, DELETE, CONNECT or \* for all of those operations and any future admission operations that are added. If '\*' is present, the length of the slice must be one. Required.
resources : [str], default is Undefined, optional
Resources is a list of resources this rule applies to.|[str]|Undefined|optional| -|**apiVersions**|[str]|Undefined|optional| -|**operations**|[str]|Undefined|optional| -|**resources**|[str]|Undefined|optional| -|**scope**|str|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/admissionregistration/v1/doc_service_reference.md b/docs/reference/model/kusion_kubernetes/api/admissionregistration/v1/doc_service_reference.md deleted file mode 100644 index 1fb844ed..00000000 --- a/docs/reference/model/kusion_kubernetes/api/admissionregistration/v1/doc_service_reference.md +++ /dev/null @@ -1,19 +0,0 @@ -# service_reference - -Source: [base/pkg/kusion_kubernetes/api/admissionregistration/v1/service_reference.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/admissionregistration/v1/service_reference.k) - -This is the service\_reference module in kusion\_kubernetes.api.admissionregistration.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema ServiceReference - -ServiceReference holds a reference to Service.legacy.k8s.io - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**name**
`name` is the name of the service. Required|str|Undefined|**required**| -|**namespace**
`namespace` is the namespace of the service. Required|str|Undefined|**required**| -|**path**
`path` is an optional URL path which will be sent in any request to this service.|str|Undefined|optional| -|**port**
If specified, the port on the service that hosting webhook. Default to 443 for backward compatibility. `port` should be a valid port number (1-65535, inclusive).|int|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/admissionregistration/v1/doc_validating_webhook.md b/docs/reference/model/kusion_kubernetes/api/admissionregistration/v1/doc_validating_webhook.md deleted file mode 100644 index 29d8e26c..00000000 --- a/docs/reference/model/kusion_kubernetes/api/admissionregistration/v1/doc_validating_webhook.md +++ /dev/null @@ -1,25 +0,0 @@ -# validating_webhook - -Source: [base/pkg/kusion_kubernetes/api/admissionregistration/v1/validating_webhook.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/admissionregistration/v1/validating_webhook.k) - -This is the validating\_webhook module in kusion\_kubernetes.api.admissionregistration.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema ValidatingWebhook - -ValidatingWebhook describes an admission webhook and the resources and operations it applies to. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**admissionReviewVersions**
AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy.
failurePolicy : str, default is Undefined, optional
FailurePolicy defines how unrecognized errors from the admission endpoint are handled - allowed values are Ignore or Fail. Defaults to Fail.
matchPolicy : str, default is Undefined, optional
matchPolicy defines how the "rules" list is used to match incoming requests. Allowed values are "Exact" or "Equivalent".|[str]|Undefined|**required**| -|**failurePolicy**|str|Undefined|optional| -|**matchPolicy**|str|Undefined|optional| -|**name**|str|Undefined|**required**| -|**rules**|[[v1.RuleWithOperations](doc_rule_with_operations#schema-rulewithoperations)]|Undefined|optional| -|**sideEffects**|str|Undefined|**required**| -|**timeoutSeconds**|int|Undefined|optional| -|**clientConfig**|[WebhookClientConfig](doc_webhook_client_config#schema-webhookclientconfig)|Undefined|**required**| -|**namespaceSelector**|[apis.LabelSelector](../../../apimachinery/apis/doc_label_selector#schema-labelselector)|Undefined|optional| -|**objectSelector**|[apis.LabelSelector](../../../apimachinery/apis/doc_label_selector#schema-labelselector)|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/admissionregistration/v1/doc_webhook_client_config.md b/docs/reference/model/kusion_kubernetes/api/admissionregistration/v1/doc_webhook_client_config.md deleted file mode 100644 index f76a8760..00000000 --- a/docs/reference/model/kusion_kubernetes/api/admissionregistration/v1/doc_webhook_client_config.md +++ /dev/null @@ -1,18 +0,0 @@ -# webhook_client_config - -Source: [base/pkg/kusion_kubernetes/api/admissionregistration/v1/webhook_client_config.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/admissionregistration/v1/webhook_client_config.k) - -This is the webhook\_client\_config module in kusion\_kubernetes.api.admissionregistration.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema WebhookClientConfig - -WebhookClientConfig contains the information to make a TLS connection with the webhook - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**caBundle**
`caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. If unspecified, system trust roots on the apiserver are used.
url : str, default is Undefined, optional
`url` gives the location of the webhook, in standard URL form (`scheme://host:port/path`). Exactly one of `url` or `service` must be specified.|str|Undefined|optional| -|**url**|str|Undefined|optional| -|**service**|[ServiceReference](doc_service_reference#schema-servicereference)|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/apps/v1/doc_daemon_set.md b/docs/reference/model/kusion_kubernetes/api/apps/v1/doc_daemon_set.md deleted file mode 100644 index 3ca1f951..00000000 --- a/docs/reference/model/kusion_kubernetes/api/apps/v1/doc_daemon_set.md +++ /dev/null @@ -1,19 +0,0 @@ -# daemon_set - -Source: [base/pkg/kusion_kubernetes/api/apps/v1/daemon_set.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/apps/v1/daemon_set.k) - -This is the daemon\_set module in kusion\_kubernetes.api.apps.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema DaemonSet - -DaemonSet represents the configuration of a daemon set. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**apiVersion**
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md\#resources|"apps/v1"|"apps/v1"|**required**| -|**kind**
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md\#types-kinds|"DaemonSet"|"DaemonSet"|**required**| -|**metadata**
Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md\#metadata|[apis.ObjectMeta](../../../apimachinery/apis/doc_object_meta#schema-objectmeta)|Undefined|optional| -|**spec**
The desired behavior of this daemon set. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md\#spec-and-status|[DaemonSetSpec](doc_daemon_set_spec#schema-daemonsetspec)|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/apps/v1/doc_daemon_set_spec.md b/docs/reference/model/kusion_kubernetes/api/apps/v1/doc_daemon_set_spec.md deleted file mode 100644 index 163fc273..00000000 --- a/docs/reference/model/kusion_kubernetes/api/apps/v1/doc_daemon_set_spec.md +++ /dev/null @@ -1,20 +0,0 @@ -# daemon_set_spec - -Source: [base/pkg/kusion_kubernetes/api/apps/v1/daemon_set_spec.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/apps/v1/daemon_set_spec.k) - -This is the daemon\_set\_spec module in kusion\_kubernetes.api.apps.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema DaemonSetSpec - -DaemonSetSpec is the specification of a daemon set. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**minReadySeconds**
The minimum number of seconds for which a newly created DaemonSet pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready).|int|Undefined|optional| -|**revisionHistoryLimit**
The number of old history to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10.|int|Undefined|optional| -|**selector**
A label query over pods that are managed by the daemon set. Must match in order to be controlled. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/\#label-selectors|[apis.LabelSelector](../../../apimachinery/apis/doc_label_selector#schema-labelselector)|Undefined|**required**| -|**template**
An object that describes the pod that will be created. The DaemonSet will create exactly one copy of this pod on every node that matches the template's node selector (or on every node if no node selector is specified). More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller\#pod-template|[v1.PodTemplateSpec](../../core/v1/doc_pod_template_spec#schema-podtemplatespec)|Undefined|**required**| -|**updateStrategy**
An update strategy to replace existing DaemonSet pods with new pods.|[DaemonSetUpdateStrategy](doc_daemon_set_update_strategy#schema-daemonsetupdatestrategy)|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/apps/v1/doc_daemon_set_update_strategy.md b/docs/reference/model/kusion_kubernetes/api/apps/v1/doc_daemon_set_update_strategy.md deleted file mode 100644 index d4e5ee9a..00000000 --- a/docs/reference/model/kusion_kubernetes/api/apps/v1/doc_daemon_set_update_strategy.md +++ /dev/null @@ -1,17 +0,0 @@ -# daemon_set_update_strategy - -Source: [base/pkg/kusion_kubernetes/api/apps/v1/daemon_set_update_strategy.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/apps/v1/daemon_set_update_strategy.k) - -This is the daemon\_set\_update\_strategy module in kusion\_kubernetes.api.apps.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema DaemonSetUpdateStrategy - -DaemonSetUpdateStrategy is a struct used to control the update strategy for a DaemonSet. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**type**
Type of daemon set update. Can be "RollingUpdate" or "OnDelete". Default is RollingUpdate.|str|Undefined|optional| -|**rollingUpdate**
Rolling update config params. Present only if type = "RollingUpdate".|[RollingUpdateDaemonSet](doc_rolling_update_daemon_set#schema-rollingupdatedaemonset)|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/apps/v1/doc_deployment.md b/docs/reference/model/kusion_kubernetes/api/apps/v1/doc_deployment.md deleted file mode 100644 index 721a5882..00000000 --- a/docs/reference/model/kusion_kubernetes/api/apps/v1/doc_deployment.md +++ /dev/null @@ -1,19 +0,0 @@ -# deployment - -Source: [base/pkg/kusion_kubernetes/api/apps/v1/deployment.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/apps/v1/deployment.k) - -This is the deployment module in kusion\_kubernetes.api.apps.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema Deployment - -Deployment enables declarative updates for Pods and ReplicaSets. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**apiVersion**
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md\#resources|"apps/v1"|"apps/v1"|**required**| -|**kind**
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md\#types-kinds|"Deployment"|"Deployment"|**required**| -|**metadata**
Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md\#metadata|[apis.ObjectMeta](../../../apimachinery/apis/doc_object_meta#schema-objectmeta)|Undefined|optional| -|**spec**
Specification of the desired behavior of the Deployment.|[DeploymentSpec](doc_deployment_spec#schema-deploymentspec)|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/apps/v1/doc_deployment_spec.md b/docs/reference/model/kusion_kubernetes/api/apps/v1/doc_deployment_spec.md deleted file mode 100644 index 7e5bb369..00000000 --- a/docs/reference/model/kusion_kubernetes/api/apps/v1/doc_deployment_spec.md +++ /dev/null @@ -1,23 +0,0 @@ -# deployment_spec - -Source: [base/pkg/kusion_kubernetes/api/apps/v1/deployment_spec.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/apps/v1/deployment_spec.k) - -This is the deployment\_spec module in kusion\_kubernetes.api.apps.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema DeploymentSpec - -DeploymentSpec is the specification of the desired behavior of the Deployment. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**minReadySeconds**
Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)|int|Undefined|optional| -|**paused**
Indicates that the deployment is paused.|bool|Undefined|optional| -|**progressDeadlineSeconds**
The maximum time in seconds for a deployment to make progress before it is considered to be failed. The deployment controller will continue to process failed deployments and a condition with a ProgressDeadlineExceeded reason will be surfaced in the deployment status. Note that progress will not be estimated during the time a deployment is paused. Defaults to 600s.|int|Undefined|optional| -|**replicas**
Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.|int|Undefined|optional| -|**revisionHistoryLimit**
The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10.|int|Undefined|optional| -|**selector**
Label selector for pods. Existing ReplicaSets whose pods are selected by this will be the ones affected by this deployment. It must match the pod template's labels.|[apis.LabelSelector](../../../apimachinery/apis/doc_label_selector#schema-labelselector)|Undefined|**required**| -|**strategy**
The deployment strategy to use to replace existing pods with new ones.|[DeploymentStrategy](doc_deployment_strategy#schema-deploymentstrategy)|Undefined|optional| -|**template**
Template describes the pods that will be created.|[v1.PodTemplateSpec](../../core/v1/doc_pod_template_spec#schema-podtemplatespec)|Undefined|**required**| - diff --git a/docs/reference/model/kusion_kubernetes/api/apps/v1/doc_deployment_strategy.md b/docs/reference/model/kusion_kubernetes/api/apps/v1/doc_deployment_strategy.md deleted file mode 100644 index c2c5962d..00000000 --- a/docs/reference/model/kusion_kubernetes/api/apps/v1/doc_deployment_strategy.md +++ /dev/null @@ -1,17 +0,0 @@ -# deployment_strategy - -Source: [base/pkg/kusion_kubernetes/api/apps/v1/deployment_strategy.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/apps/v1/deployment_strategy.k) - -This is the deployment\_strategy module in kusion\_kubernetes.api.apps.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema DeploymentStrategy - -DeploymentStrategy describes how to replace existing pods with new ones. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**type**
Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate.|str|Undefined|optional| -|**rollingUpdate**
Rolling update config params. Present only if DeploymentStrategyType = RollingUpdate.|[RollingUpdateDeployment](doc_rolling_update_deployment#schema-rollingupdatedeployment)|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/apps/v1/doc_rolling_update_daemon_set.md b/docs/reference/model/kusion_kubernetes/api/apps/v1/doc_rolling_update_daemon_set.md deleted file mode 100644 index a91151b6..00000000 --- a/docs/reference/model/kusion_kubernetes/api/apps/v1/doc_rolling_update_daemon_set.md +++ /dev/null @@ -1,17 +0,0 @@ -# rolling_update_daemon_set - -Source: [base/pkg/kusion_kubernetes/api/apps/v1/rolling_update_daemon_set.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/apps/v1/rolling_update_daemon_set.k) - -This is the rolling\_update\_daemon\_set module in kusion\_kubernetes.api.apps.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema RollingUpdateDaemonSet - -Spec to control the desired behavior of daemon set rolling update. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**maxSurge**
The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediatedly created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption. This is beta field and enabled/disabled by DaemonSetUpdateSurge feature gate.|int \| str|Undefined|optional| -|**maxUnavailable**
The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up. This cannot be 0 if MaxSurge is 0 Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update.|int \| str|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/apps/v1/doc_rolling_update_deployment.md b/docs/reference/model/kusion_kubernetes/api/apps/v1/doc_rolling_update_deployment.md deleted file mode 100644 index c70c499f..00000000 --- a/docs/reference/model/kusion_kubernetes/api/apps/v1/doc_rolling_update_deployment.md +++ /dev/null @@ -1,17 +0,0 @@ -# rolling_update_deployment - -Source: [base/pkg/kusion_kubernetes/api/apps/v1/rolling_update_deployment.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/apps/v1/rolling_update_deployment.k) - -This is the rolling\_update\_deployment module in kusion\_kubernetes.api.apps.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema RollingUpdateDeployment - -Spec to control the desired behavior of rolling update. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**maxSurge**
The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. Defaults to 25%. Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new ReplicaSet can be scaled up further, ensuring that total number of pods running at any time during the update is at most 130% of desired pods.|int \| str|Undefined|optional| -|**maxUnavailable**
The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding down. This can not be 0 if MaxSurge is 0. Defaults to 25%. Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods immediately when the rolling update starts. Once new pods are ready, old ReplicaSet can be scaled down further, followed by scaling up the new ReplicaSet, ensuring that the total number of pods available at all times during the update is at least 70% of desired pods.|int \| str|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/apps/v1/doc_rolling_update_stateful_set_strategy.md b/docs/reference/model/kusion_kubernetes/api/apps/v1/doc_rolling_update_stateful_set_strategy.md deleted file mode 100644 index 7b025e4a..00000000 --- a/docs/reference/model/kusion_kubernetes/api/apps/v1/doc_rolling_update_stateful_set_strategy.md +++ /dev/null @@ -1,16 +0,0 @@ -# rolling_update_stateful_set_strategy - -Source: [base/pkg/kusion_kubernetes/api/apps/v1/rolling_update_stateful_set_strategy.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/apps/v1/rolling_update_stateful_set_strategy.k) - -This is the rolling\_update\_stateful\_set\_strategy module in kusion\_kubernetes.api.apps.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema RollingUpdateStatefulSetStrategy - -RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**partition**
Partition indicates the ordinal at which the StatefulSet should be partitioned. Default value is 0.|int|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/apps/v1/doc_stateful_set.md b/docs/reference/model/kusion_kubernetes/api/apps/v1/doc_stateful_set.md deleted file mode 100644 index 56d3e5a6..00000000 --- a/docs/reference/model/kusion_kubernetes/api/apps/v1/doc_stateful_set.md +++ /dev/null @@ -1,19 +0,0 @@ -# stateful_set - -Source: [base/pkg/kusion_kubernetes/api/apps/v1/stateful_set.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/apps/v1/stateful_set.k) - -This is the stateful\_set module in kusion\_kubernetes.api.apps.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema StatefulSet - -StatefulSet represents a set of pods with consistent identities. Identities are defined as:
- Network: A single stable DNS and hostname.
- Storage: As many VolumeClaims as requested.The StatefulSet guarantees that a given network identity will always map to the same storage identity. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**apiVersion**
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md\#resources
kind : "StatefulSet", default is "StatefulSet", required
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md\#types-kinds
metadata : apis.ObjectMeta, default is Undefined, optional
Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md\#metadata
spec : StatefulSetSpec, default is Undefined, optional
Spec defines the desired identities of pods in this set.|"apps/v1"|"apps/v1"|**required**| -|**kind**|"StatefulSet"|Undefined|**required**| -|**metadata**|[apis.ObjectMeta](../../../apimachinery/apis/doc_object_meta#schema-objectmeta)|Undefined|optional| -|**spec**|[StatefulSetSpec](doc_stateful_set_spec#schema-statefulsetspec)|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/apps/v1/doc_stateful_set_spec.md b/docs/reference/model/kusion_kubernetes/api/apps/v1/doc_stateful_set_spec.md deleted file mode 100644 index 05d2456c..00000000 --- a/docs/reference/model/kusion_kubernetes/api/apps/v1/doc_stateful_set_spec.md +++ /dev/null @@ -1,24 +0,0 @@ -# stateful_set_spec - -Source: [base/pkg/kusion_kubernetes/api/apps/v1/stateful_set_spec.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/apps/v1/stateful_set_spec.k) - -This is the stateful\_set\_spec module in kusion\_kubernetes.api.apps.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema StatefulSetSpec - -A StatefulSetSpec is the specification of a StatefulSet. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**minReadySeconds**
Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready) This is an alpha field and requires enabling StatefulSetMinReadySeconds feature gate.|int|Undefined|optional| -|**podManagementPolicy**
podManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down. The default policy is `OrderedReady`, where pods are created in increasing order (pod-0, then pod-1, etc) and the controller will wait until each pod is ready before continuing. When scaling down, the pods are removed in the opposite order. The alternative policy is `Parallel` which will create pods in parallel to match the desired scale without waiting, and on scale down will delete all pods at once.|str|Undefined|optional| -|**replicas**
replicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1.|int|Undefined|optional| -|**revisionHistoryLimit**
revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.|int|Undefined|optional| -|**serviceName**
serviceName is the name of the service that governs this StatefulSet. This service must exist before the StatefulSet, and is responsible for the network identity of the set. Pods get DNS/hostnames that follow the pattern: pod-specific-string.serviceName.default.svc.cluster.local where "pod-specific-string" is managed by the StatefulSet controller.|str|Undefined|**required**| -|**volumeClaimTemplates**
volumeClaimTemplates is a list of claims that pods are allowed to reference. The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. A claim in this list takes precedence over any volumes in the template, with the same name.|[[v1.PersistentVolumeClaim](../../core/v1/doc_persistent_volume_claim#schema-persistentvolumeclaim)]|Undefined|optional| -|**selector**
selector is a label query over pods that should match the replica count. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/\#label-selectors|[apis.LabelSelector](../../../apimachinery/apis/doc_label_selector#schema-labelselector)|Undefined|**required**| -|**template**
template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet.|[v1.PodTemplateSpec](../../core/v1/doc_pod_template_spec#schema-podtemplatespec)|Undefined|**required**| -|**updateStrategy**
updateStrategy indicates the StatefulSetUpdateStrategy that will be employed to update Pods in the StatefulSet when a revision is made to Template.|[StatefulSetUpdateStrategy](doc_stateful_set_update_strategy#schema-statefulsetupdatestrategy)|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/apps/v1/doc_stateful_set_update_strategy.md b/docs/reference/model/kusion_kubernetes/api/apps/v1/doc_stateful_set_update_strategy.md deleted file mode 100644 index 4d7bb2ec..00000000 --- a/docs/reference/model/kusion_kubernetes/api/apps/v1/doc_stateful_set_update_strategy.md +++ /dev/null @@ -1,17 +0,0 @@ -# stateful_set_update_strategy - -Source: [base/pkg/kusion_kubernetes/api/apps/v1/stateful_set_update_strategy.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/apps/v1/stateful_set_update_strategy.k) - -This is the stateful\_set\_update\_strategy module in kusion\_kubernetes.api.apps.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema StatefulSetUpdateStrategy - -StatefulSetUpdateStrategy indicates the strategy that the StatefulSet controller will use to perform updates. It includes any additional parameters necessary to perform the update for the indicated strategy. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**type**
Type indicates the type of the StatefulSetUpdateStrategy. Default is RollingUpdate.|str|Undefined|optional| -|**rollingUpdate**
RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType.|[RollingUpdateStatefulSetStrategy](doc_rolling_update_stateful_set_strategy#schema-rollingupdatestatefulsetstrategy)|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/autoscaling/v1/doc_cross_version_object_reference.md b/docs/reference/model/kusion_kubernetes/api/autoscaling/v1/doc_cross_version_object_reference.md deleted file mode 100644 index 9c530817..00000000 --- a/docs/reference/model/kusion_kubernetes/api/autoscaling/v1/doc_cross_version_object_reference.md +++ /dev/null @@ -1,18 +0,0 @@ -# cross_version_object_reference - -Source: [base/pkg/kusion_kubernetes/api/autoscaling/v1/cross_version_object_reference.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/autoscaling/v1/cross_version_object_reference.k) - -This is the cross\_version\_object\_reference module in kusion\_kubernetes.api.autoscaling.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema CrossVersionObjectReference - -CrossVersionObjectReference contains enough information to let you identify the referred resource. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**apiVersion**
API version of the referent|str|Undefined|optional| -|**kind**
Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md\#types-kinds"|str|Undefined|**required**| -|**name**
Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers\#names|str|Undefined|**required**| - diff --git a/docs/reference/model/kusion_kubernetes/api/autoscaling/v1/doc_horizontal_pod_autoscaler.md b/docs/reference/model/kusion_kubernetes/api/autoscaling/v1/doc_horizontal_pod_autoscaler.md deleted file mode 100644 index af7c1c5c..00000000 --- a/docs/reference/model/kusion_kubernetes/api/autoscaling/v1/doc_horizontal_pod_autoscaler.md +++ /dev/null @@ -1,19 +0,0 @@ -# horizontal_pod_autoscaler - -Source: [base/pkg/kusion_kubernetes/api/autoscaling/v1/horizontal_pod_autoscaler.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/autoscaling/v1/horizontal_pod_autoscaler.k) - -This is the horizontal\_pod\_autoscaler module in kusion\_kubernetes.api.autoscaling.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema HorizontalPodAutoscaler - -configuration of a horizontal pod autoscaler. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**apiVersion**
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md\#resources|"autoscaling/v1"|"autoscaling/v1"|**required**| -|**kind**
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md\#types-kinds|"HorizontalPodAutoscaler"|"HorizontalPodAutoscaler"|**required**| -|**metadata**
Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md\#metadata|[apis.ObjectMeta](../../../apimachinery/apis/doc_object_meta#schema-objectmeta)|Undefined|optional| -|**spec**
behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md\#spec-and-status.|[HorizontalPodAutoscalerSpec](doc_horizontal_pod_autoscaler_spec#schema-horizontalpodautoscalerspec)|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/autoscaling/v1/doc_horizontal_pod_autoscaler_spec.md b/docs/reference/model/kusion_kubernetes/api/autoscaling/v1/doc_horizontal_pod_autoscaler_spec.md deleted file mode 100644 index c2632619..00000000 --- a/docs/reference/model/kusion_kubernetes/api/autoscaling/v1/doc_horizontal_pod_autoscaler_spec.md +++ /dev/null @@ -1,19 +0,0 @@ -# horizontal_pod_autoscaler_spec - -Source: [base/pkg/kusion_kubernetes/api/autoscaling/v1/horizontal_pod_autoscaler_spec.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/autoscaling/v1/horizontal_pod_autoscaler_spec.k) - -This is the horizontal\_pod\_autoscaler\_spec module in kusion\_kubernetes.api.autoscaling.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema HorizontalPodAutoscalerSpec - -specification of a horizontal pod autoscaler. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**maxReplicas**
upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas.|int|Undefined|**required**| -|**minReplicas**
minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available.|int|Undefined|optional| -|**targetCPUUtilizationPercentage**
target average CPU utilization (represented as a percentage of requested CPU) over all the pods; if not specified the default autoscaling policy will be used.|int|Undefined|optional| -|**scaleTargetRef**
reference to scaled resource; horizontal pod autoscaler will learn the current resource consumption and will set the desired number of pods by using its Scale subresource.|[CrossVersionObjectReference](doc_cross_version_object_reference#schema-crossversionobjectreference)|Undefined|**required**| - diff --git a/docs/reference/model/kusion_kubernetes/api/batch/v1/doc_job.md b/docs/reference/model/kusion_kubernetes/api/batch/v1/doc_job.md deleted file mode 100644 index 085b67c8..00000000 --- a/docs/reference/model/kusion_kubernetes/api/batch/v1/doc_job.md +++ /dev/null @@ -1,19 +0,0 @@ -# job - -Source: [base/pkg/kusion_kubernetes/api/batch/v1/job.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/batch/v1/job.k) - -This is the job module in kusion\_kubernetes.api.batch.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema Job - -Job represents the configuration of a single job. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**apiVersion**
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md\#resources|"batch/v1"|"batch/v1"|**required**| -|**kind**
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md\#types-kinds|"Job"|"Job"|**required**| -|**metadata**
Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md\#metadata|[apis.ObjectMeta](../../../apimachinery/apis/doc_object_meta#schema-objectmeta)|Undefined|optional| -|**spec**
Specification of the desired behavior of a job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md\#spec-and-status|[JobSpec](doc_job_spec#schema-jobspec)|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/batch/v1/doc_job_spec.md b/docs/reference/model/kusion_kubernetes/api/batch/v1/doc_job_spec.md deleted file mode 100644 index 55e70447..00000000 --- a/docs/reference/model/kusion_kubernetes/api/batch/v1/doc_job_spec.md +++ /dev/null @@ -1,25 +0,0 @@ -# job_spec - -Source: [base/pkg/kusion_kubernetes/api/batch/v1/job_spec.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/batch/v1/job_spec.k) - -This is the job\_spec module in kusion\_kubernetes.api.batch.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema JobSpec - -JobSpec describes how the job execution will look like. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**activeDeadlineSeconds**
Specifies the duration in seconds relative to the startTime that the job may be continuously active before the system tries to terminate it; value must be positive integer. If a Job is suspended (at creation or through an update), this timer will effectively be stopped and reset when the Job is resumed again.
backoffLimit : int, default is Undefined, optional
Specifies the number of retries before marking this job failed. Defaults to 6
completionMode : str, default is Undefined, optional
CompletionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`.|int|Undefined|optional| -|**backoffLimit**|int|Undefined|optional| -|**completionMode**|str|Undefined|optional| -|**completions**|int|Undefined|optional| -|**manualSelector**|bool|Undefined|optional| -|**parallelism**|int|Undefined|optional| -|**suspend**|bool|Undefined|optional| -|**ttlSecondsAfterFinished**|int|Undefined|optional| -|**selector**|[apis.LabelSelector](../../../apimachinery/apis/doc_label_selector#schema-labelselector)|Undefined|optional| -|**template**|[v1.PodTemplateSpec](../../core/v1/doc_pod_template_spec#schema-podtemplatespec)|Undefined|**required**| - diff --git a/docs/reference/model/kusion_kubernetes/api/batch/v1beta1/doc_cron_job.md b/docs/reference/model/kusion_kubernetes/api/batch/v1beta1/doc_cron_job.md deleted file mode 100644 index 1d6a22e9..00000000 --- a/docs/reference/model/kusion_kubernetes/api/batch/v1beta1/doc_cron_job.md +++ /dev/null @@ -1,19 +0,0 @@ -# cron_job - -Source: [base/pkg/kusion_kubernetes/api/batch/v1beta1/cron_job.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/batch/v1beta1/cron_job.k) - -This is the cron\_job module in kusion\_kubernetes.api.batch.v1beta1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema CronJob - -CronJob represents the configuration of a single cron job. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**apiVersion**
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md\#resources|"batch/v1beta1"|"batch/v1beta1"|**required**| -|**kind**
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md\#types-kinds|"CronJob"|"CronJob"|**required**| -|**metadata**
Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md\#metadata|[apis.ObjectMeta](../../../apimachinery/apis/doc_object_meta#schema-objectmeta)|Undefined|optional| -|**spec**
Specification of the desired behavior of a cron job, including the schedule. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md\#spec-and-status|[CronJobSpec](doc_cron_job_spec#schema-cronjobspec)|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/batch/v1beta1/doc_cron_job_spec.md b/docs/reference/model/kusion_kubernetes/api/batch/v1beta1/doc_cron_job_spec.md deleted file mode 100644 index 25ebb84f..00000000 --- a/docs/reference/model/kusion_kubernetes/api/batch/v1beta1/doc_cron_job_spec.md +++ /dev/null @@ -1,22 +0,0 @@ -# cron_job_spec - -Source: [base/pkg/kusion_kubernetes/api/batch/v1beta1/cron_job_spec.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/batch/v1beta1/cron_job_spec.k) - -This is the cron\_job\_spec module in kusion\_kubernetes.api.batch.v1beta1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema CronJobSpec - -CronJobSpec describes how the job execution will look like and when it will actually run. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**concurrencyPolicy**
Specifies how to treat concurrent executions of a Job. Valid values are: - "Allow" (default): allows CronJobs to run concurrently; - "Forbid": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - "Replace": cancels currently running job and replaces it with a new one|str|Undefined|optional| -|**failedJobsHistoryLimit**
The number of failed finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.|int|Undefined|optional| -|**schedule**
The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron.|str|Undefined|**required**| -|**startingDeadlineSeconds**
Optional deadline in seconds for starting the job if it misses scheduled time for any reason. Missed jobs executions will be counted as failed ones.|int|Undefined|optional| -|**successfulJobsHistoryLimit**
The number of successful finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified. Defaults to 3.|int|Undefined|optional| -|**suspend**
This flag tells the controller to suspend subsequent executions, it does not apply to already started executions. Defaults to false.|bool|Undefined|optional| -|**jobTemplate**
Specifies the job that will be created when executing a CronJob.|[JobTemplateSpec](doc_job_template_spec#schema-jobtemplatespec)|Undefined|**required**| - diff --git a/docs/reference/model/kusion_kubernetes/api/batch/v1beta1/doc_job_template_spec.md b/docs/reference/model/kusion_kubernetes/api/batch/v1beta1/doc_job_template_spec.md deleted file mode 100644 index 421c9530..00000000 --- a/docs/reference/model/kusion_kubernetes/api/batch/v1beta1/doc_job_template_spec.md +++ /dev/null @@ -1,17 +0,0 @@ -# job_template_spec - -Source: [base/pkg/kusion_kubernetes/api/batch/v1beta1/job_template_spec.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/batch/v1beta1/job_template_spec.k) - -This is the job\_template\_spec module in kusion\_kubernetes.api.batch.v1beta1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema JobTemplateSpec - -JobTemplateSpec describes the data a Job should have when created from a template - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**metadata**
Standard object's metadata of the jobs created from this template. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md\#metadata|[apis.ObjectMeta](../../../apimachinery/apis/doc_object_meta#schema-objectmeta)|Undefined|optional| -|**spec**
Specification of the desired behavior of the job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md\#spec-and-status|[v1.JobSpec](../v1/doc_job_spec#schema-jobspec)|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_affinity.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_affinity.md deleted file mode 100644 index c9157463..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_affinity.md +++ /dev/null @@ -1,18 +0,0 @@ -# affinity - -Source: [base/pkg/kusion_kubernetes/api/core/v1/affinity.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/affinity.k) - -This is the affinity module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema Affinity - -Affinity is a group of affinity scheduling rules. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**nodeAffinity**
Describes node affinity scheduling rules for the pod.|[NodeAffinity](doc_node_affinity#schema-nodeaffinity)|Undefined|optional| -|**podAffinity**
Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).|[PodAffinity](doc_pod_affinity#schema-podaffinity)|Undefined|optional| -|**podAntiAffinity**
Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).|[PodAntiAffinity](doc_pod_anti_affinity#schema-podantiaffinity)|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_aws_elastic_block_store_volume_source.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_aws_elastic_block_store_volume_source.md deleted file mode 100644 index 38ef4ef7..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_aws_elastic_block_store_volume_source.md +++ /dev/null @@ -1,19 +0,0 @@ -# aws_elastic_block_store_volume_source - -Source: [base/pkg/kusion_kubernetes/api/core/v1/aws_elastic_block_store_volume_source.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/aws_elastic_block_store_volume_source.k) - -This is the aws\_elastic\_block\_store\_volume\_source module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema AWSElasticBlockStoreVolumeSource - -Represents a Persistent Disk resource in AWS.
An AWS EBS disk must exist before mounting to a container. The disk must also be in the same AWS zone as the kubelet. An AWS EBS disk can only be mounted as read/write once. AWS EBS volumes support ownership management and SELinux relabeling. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**fsType**
Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes\#awselasticblockstore
partition : int, default is Undefined, optional
The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as "1". Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).
readOnly : bool, default is Undefined, optional
Specify "true" to force and set the ReadOnly property in VolumeMounts to "true". If omitted, the default is "false". More info: https://kubernetes.io/docs/concepts/storage/volumes\#awselasticblockstore
volumeID : str, default is Undefined, required
Unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes\#awselasticblockstore|str|Undefined|optional| -|**partition**|int|Undefined|optional| -|**readOnly**|bool|Undefined|optional| -|**volumeID**|str|Undefined|**required**| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_azure_disk_volume_source.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_azure_disk_volume_source.md deleted file mode 100644 index e3e0a29c..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_azure_disk_volume_source.md +++ /dev/null @@ -1,21 +0,0 @@ -# azure_disk_volume_source - -Source: [base/pkg/kusion_kubernetes/api/core/v1/azure_disk_volume_source.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/azure_disk_volume_source.k) - -This is the azure\_disk\_volume\_source module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema AzureDiskVolumeSource - -AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**cachingMode**
Host Caching mode: None, Read Only, Read Write.|str|Undefined|optional| -|**diskName**
The Name of the data disk in the blob storage|str|Undefined|**required**| -|**diskURI**
The URI the data disk in the blob storage|str|Undefined|**required**| -|**fsType**
Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.|str|Undefined|optional| -|**kind**
Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared|str|Undefined|optional| -|**readOnly**
Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.|bool|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_azure_file_volume_source.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_azure_file_volume_source.md deleted file mode 100644 index cdb629ae..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_azure_file_volume_source.md +++ /dev/null @@ -1,18 +0,0 @@ -# azure_file_volume_source - -Source: [base/pkg/kusion_kubernetes/api/core/v1/azure_file_volume_source.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/azure_file_volume_source.k) - -This is the azure\_file\_volume\_source module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema AzureFileVolumeSource - -AzureFile represents an Azure File Service mount on the host and bind mount to the pod. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**readOnly**
Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.|bool|Undefined|optional| -|**secretName**
the name of secret that contains Azure Storage Account Name and Key|str|Undefined|**required**| -|**shareName**
Share Name|str|Undefined|**required**| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_capabilities.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_capabilities.md deleted file mode 100644 index cb96ca1f..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_capabilities.md +++ /dev/null @@ -1,17 +0,0 @@ -# capabilities - -Source: [base/pkg/kusion_kubernetes/api/core/v1/capabilities.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/capabilities.k) - -This is the capabilities module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema Capabilities - -Adds and removes POSIX capabilities from running containers. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**add**
Added capabilities|[str]|Undefined|optional| -|**drop**
Removed capabilities|[str]|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_ceph_fs_volume_source.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_ceph_fs_volume_source.md deleted file mode 100644 index 25272f16..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_ceph_fs_volume_source.md +++ /dev/null @@ -1,21 +0,0 @@ -# ceph_fs_volume_source - -Source: [base/pkg/kusion_kubernetes/api/core/v1/ceph_fs_volume_source.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/ceph_fs_volume_source.k) - -This is the ceph\_fs\_volume\_source module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema CephFSVolumeSource - -Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**monitors**
Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md\#how-to-use-it|[str]|Undefined|**required**| -|**path**
Optional: Used as the mounted root, rather than the full Ceph tree, default is /|str|Undefined|optional| -|**readOnly**
Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md\#how-to-use-it|bool|Undefined|optional| -|**secretFile**
Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md\#how-to-use-it|str|Undefined|optional| -|**user**
Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md\#how-to-use-it|str|Undefined|optional| -|**secretRef**
Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md\#how-to-use-it|[LocalObjectReference](doc_local_object_reference#schema-localobjectreference)|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_cinder_volume_source.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_cinder_volume_source.md deleted file mode 100644 index a9d2465b..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_cinder_volume_source.md +++ /dev/null @@ -1,19 +0,0 @@ -# cinder_volume_source - -Source: [base/pkg/kusion_kubernetes/api/core/v1/cinder_volume_source.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/cinder_volume_source.k) - -This is the cinder\_volume\_source module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema CinderVolumeSource - -Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**fsType**
Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md|str|Undefined|optional| -|**readOnly**
Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md|bool|Undefined|optional| -|**volumeID**
volume id used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md|str|Undefined|**required**| -|**secretRef**
Optional: points to a secret object containing parameters used to connect to OpenStack.|[LocalObjectReference](doc_local_object_reference#schema-localobjectreference)|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_client_ip_config.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_client_ip_config.md deleted file mode 100644 index 64f4a983..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_client_ip_config.md +++ /dev/null @@ -1,16 +0,0 @@ -# client_ip_config - -Source: [base/pkg/kusion_kubernetes/api/core/v1/client_ip_config.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/client_ip_config.k) - -This is the client\_ip\_config module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema ClientIPConfig - -ClientIPConfig represents the configurations of Client IP based session affinity. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**timeoutSeconds**
timeoutSeconds specifies the seconds of ClientIP type session sticky time. The value must be \>0 && \<=86400(for 1 day) if ServiceAffinity == "ClientIP". Default value is 10800(for 3 hours).|int|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_config_map.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_config_map.md deleted file mode 100644 index d0fe545b..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_config_map.md +++ /dev/null @@ -1,21 +0,0 @@ -# config_map - -Source: [base/pkg/kusion_kubernetes/api/core/v1/config_map.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/config_map.k) - -This is the config\_map module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema ConfigMap - -ConfigMap holds configuration data for pods to consume. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**apiVersion**
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md\#resources|"v1"|"v1"|**required**| -|**binaryData**
BinaryData contains the binary data. Each key must consist of alphanumeric characters, '-', '\_' or '.'. BinaryData can contain byte sequences that are not in the UTF-8 range. The keys stored in BinaryData must not overlap with the ones in the Data field, this is enforced during validation process. Using this field will require 1.10+ apiserver and kubelet.|{str: str}|Undefined|optional| -|**data**
Data contains the configuration data. Each key must consist of alphanumeric characters, '-', '\_' or '.'. Values with non-UTF-8 byte sequences must use the BinaryData field. The keys stored in Data must not overlap with the keys in the BinaryData field, this is enforced during validation process.|{str: str}|Undefined|optional| -|**immutable**
Immutable, if set to true, ensures that data stored in the ConfigMap cannot be updated (only object metadata can be modified). If not set to true, the field can be modified at any time. Defaulted to nil.|bool|Undefined|optional| -|**kind**
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md\#types-kinds|"ConfigMap"|"ConfigMap"|**required**| -|**metadata**
Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md\#metadata|[apis.ObjectMeta](../../../apimachinery/apis/doc_object_meta#schema-objectmeta)|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_config_map_env_source.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_config_map_env_source.md deleted file mode 100644 index 78e7bf33..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_config_map_env_source.md +++ /dev/null @@ -1,17 +0,0 @@ -# config_map_env_source - -Source: [base/pkg/kusion_kubernetes/api/core/v1/config_map_env_source.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/config_map_env_source.k) - -This is the config\_map\_env\_source module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema ConfigMapEnvSource - -ConfigMapEnvSource selects a ConfigMap to populate the environment variables with.
The contents of the target ConfigMap's Data field will represent the key-value pairs as environment variables. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**name**
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/\#names
optional : bool, default is Undefined, optional
Specify whether the ConfigMap must be defined|str|Undefined|optional| -|**optional**|bool|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_config_map_key_selector.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_config_map_key_selector.md deleted file mode 100644 index 83eb1caf..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_config_map_key_selector.md +++ /dev/null @@ -1,18 +0,0 @@ -# config_map_key_selector - -Source: [base/pkg/kusion_kubernetes/api/core/v1/config_map_key_selector.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/config_map_key_selector.k) - -This is the config\_map\_key\_selector module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema ConfigMapKeySelector - -Selects a key from a ConfigMap. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**key**
The key to select.|str|Undefined|**required**| -|**name**
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/\#names|str|Undefined|optional| -|**optional**
Specify whether the ConfigMap or its key must be defined|bool|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_config_map_node_config_source.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_config_map_node_config_source.md deleted file mode 100644 index 3cdec19f..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_config_map_node_config_source.md +++ /dev/null @@ -1,20 +0,0 @@ -# config_map_node_config_source - -Source: [base/pkg/kusion_kubernetes/api/core/v1/config_map_node_config_source.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/config_map_node_config_source.k) - -This is the config\_map\_node\_config\_source module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema ConfigMapNodeConfigSource - -ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node. This API is deprecated since 1.22: https://git.k8s.io/enhancements/keps/sig-node/281-dynamic-kubelet-configuration - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**kubeletConfigKey**
KubeletConfigKey declares which key of the referenced ConfigMap corresponds to the KubeletConfiguration structure This field is required in all cases.|str|Undefined|**required**| -|**name**
Name is the metadata.name of the referenced ConfigMap. This field is required in all cases.|str|Undefined|**required**| -|**namespace**
Namespace is the metadata.namespace of the referenced ConfigMap. This field is required in all cases.|str|Undefined|**required**| -|**resourceVersion**
ResourceVersion is the metadata.ResourceVersion of the referenced ConfigMap. This field is forbidden in Node.Spec, and required in Node.Status.|str|Undefined|optional| -|**uid**
UID is the metadata.UID of the referenced ConfigMap. This field is forbidden in Node.Spec, and required in Node.Status.|str|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_config_map_projection.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_config_map_projection.md deleted file mode 100644 index b04445d1..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_config_map_projection.md +++ /dev/null @@ -1,18 +0,0 @@ -# config_map_projection - -Source: [base/pkg/kusion_kubernetes/api/core/v1/config_map_projection.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/config_map_projection.k) - -This is the config\_map\_projection module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema ConfigMapProjection - -Adapts a ConfigMap into a projected volume.
The contents of the target ConfigMap's Data field will be presented in a projected volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. Note that this is identical to a configmap volume source without the default mode. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**items**
If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.
name : str, default is Undefined, optional
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/\#names
optional : bool, default is Undefined, optional
Specify whether the ConfigMap or its keys must be defined|[[v1.KeyToPath](doc_key_to_path#schema-keytopath)]|Undefined|optional| -|**name**|str|Undefined|optional| -|**optional**|bool|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_config_map_volume_source.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_config_map_volume_source.md deleted file mode 100644 index 6320f7be..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_config_map_volume_source.md +++ /dev/null @@ -1,19 +0,0 @@ -# config_map_volume_source - -Source: [base/pkg/kusion_kubernetes/api/core/v1/config_map_volume_source.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/config_map_volume_source.k) - -This is the config\_map\_volume\_source module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema ConfigMapVolumeSource - -Adapts a ConfigMap into a volume.
The contents of the target ConfigMap's Data field will be presented in a volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. ConfigMap volumes support ownership management and SELinux relabeling. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**defaultMode**
Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.
items : [KeyToPath], default is Undefined, optional
If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.
name : str, default is Undefined, optional
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/\#names
optional : bool, default is Undefined, optional
Specify whether the ConfigMap or its keys must be defined|int|Undefined|optional| -|**items**|[[v1.KeyToPath](doc_key_to_path#schema-keytopath)]|Undefined|optional| -|**name**|str|Undefined|optional| -|**optional**|bool|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_container.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_container.md deleted file mode 100644 index 189c8e2d..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_container.md +++ /dev/null @@ -1,37 +0,0 @@ -# container - -Source: [base/pkg/kusion_kubernetes/api/core/v1/container.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/container.k) - -This is the container module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema Container - -A single application container that you want to run within a pod. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**args**
Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR\_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR\_NAME) syntax: i.e. "$$(VAR\_NAME)" will produce the string literal "$(VAR\_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/\#running-a-command-in-a-shell|[str]|Undefined|optional| -|**command**
Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR\_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR\_NAME) syntax: i.e. "$$(VAR\_NAME)" will produce the string literal "$(VAR\_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/\#running-a-command-in-a-shell|[str]|Undefined|optional| -|**env**
List of environment variables to set in the container. Cannot be updated.|[[v1.EnvVar](doc_env_var#schema-envvar)]|Undefined|optional| -|**envFrom**
List of sources to populate environment variables in the container. The keys defined within a source must be a C\_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.|[[v1.EnvFromSource](doc_env_from_source#schema-envfromsource)]|Undefined|optional| -|**image**
Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.|str|Undefined|optional| -|**imagePullPolicy**
Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images\#updating-images|str|Undefined|optional| -|**name**
Name of the container specified as a DNS\_LABEL. Each container in a pod must have a unique name (DNS\_LABEL). Cannot be updated.|str|Undefined|**required**| -|**ports**
List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated.|[[v1.ContainerPort](doc_container_port#schema-containerport)]|Undefined|optional| -|**stdin**
Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.|bool|Undefined|optional| -|**stdinOnce**
Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false|bool|Undefined|optional| -|**terminationMessagePath**
Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.|str|Undefined|optional| -|**terminationMessagePolicy**
Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.|str|Undefined|optional| -|**tty**
Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.|bool|Undefined|optional| -|**volumeDevices**
volumeDevices is the list of block devices to be used by the container.|[[v1.VolumeDevice](doc_volume_device#schema-volumedevice)]|Undefined|optional| -|**volumeMounts**
Pod volumes to mount into the container's filesystem. Cannot be updated.|[[v1.VolumeMount](doc_volume_mount#schema-volumemount)]|Undefined|optional| -|**workingDir**
Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.|str|Undefined|optional| -|**lifecycle**
Actions that the management system should take in response to container lifecycle events. Cannot be updated.|[Lifecycle](doc_lifecycle#schema-lifecycle)|Undefined|optional| -|**livenessProbe**
Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle\#container-probes|[Probe](doc_probe#schema-probe)|Undefined|optional| -|**readinessProbe**
Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle\#container-probes|[Probe](doc_probe#schema-probe)|Undefined|optional| -|**resources**
Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/|[ResourceRequirements](doc_resource_requirements#schema-resourcerequirements)|Undefined|optional| -|**securityContext**
SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/|[SecurityContext](doc_security_context#schema-securitycontext)|Undefined|optional| -|**startupProbe**
StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle\#container-probes|[Probe](doc_probe#schema-probe)|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_container_port.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_container_port.md deleted file mode 100644 index da1d6f08..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_container_port.md +++ /dev/null @@ -1,20 +0,0 @@ -# container_port - -Source: [base/pkg/kusion_kubernetes/api/core/v1/container_port.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/container_port.k) - -This is the container\_port module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema ContainerPort - -ContainerPort represents a network port in a single container. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**containerPort**
Number of port to expose on the pod's IP address. This must be a valid port number, 0 \< x \< 65536.|int|Undefined|**required**| -|**hostIP**
What host IP to bind the external port to.|str|Undefined|optional| -|**hostPort**
Number of port to expose on the host. If specified, this must be a valid port number, 0 \< x \< 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this.|int|Undefined|optional| -|**name**
If specified, this must be an IANA\_SVC\_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services.|str|Undefined|optional| -|**protocol**
Protocol for port. Must be UDP, TCP, or SCTP. Defaults to "TCP".|str|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_csi_volume_source.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_csi_volume_source.md deleted file mode 100644 index a085b0a6..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_csi_volume_source.md +++ /dev/null @@ -1,20 +0,0 @@ -# csi_volume_source - -Source: [base/pkg/kusion_kubernetes/api/core/v1/csi_volume_source.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/csi_volume_source.k) - -This is the csi\_volume\_source module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema CSIVolumeSource - -Represents a source location of a volume to mount, managed by an external CSI driver - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**driver**
Driver is the name of the CSI driver that handles this volume. Consult with your admin for the correct name as registered in the cluster.|str|Undefined|**required**| -|**fsType**
Filesystem type to mount. Ex. "ext4", "xfs", "ntfs". If not provided, the empty value is passed to the associated CSI driver which will determine the default filesystem to apply.|str|Undefined|optional| -|**readOnly**
Specifies a read-only configuration for the volume. Defaults to false (read/write).|bool|Undefined|optional| -|**volumeAttributes**
VolumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values.|{str: str}|Undefined|optional| -|**nodePublishSecretRef**
NodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secret references are passed.|[LocalObjectReference](doc_local_object_reference#schema-localobjectreference)|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_downward_api_projection.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_downward_api_projection.md deleted file mode 100644 index 671bba6d..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_downward_api_projection.md +++ /dev/null @@ -1,16 +0,0 @@ -# downward_api_projection - -Source: [base/pkg/kusion_kubernetes/api/core/v1/downward_api_projection.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/downward_api_projection.k) - -This is the downward\_api\_projection module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema DownwardAPIProjection - -Represents downward API info for projecting into a projected volume. Note that this is identical to a downwardAPI volume source without the default mode. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**items**
Items is a list of DownwardAPIVolume file|[[v1.DownwardAPIVolumeFile](doc_downward_api_volume_file#schema-downwardapivolumefile)]|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_downward_api_volume_file.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_downward_api_volume_file.md deleted file mode 100644 index edcadd98..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_downward_api_volume_file.md +++ /dev/null @@ -1,19 +0,0 @@ -# downward_api_volume_file - -Source: [base/pkg/kusion_kubernetes/api/core/v1/downward_api_volume_file.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/downward_api_volume_file.k) - -This is the downward\_api\_volume\_file module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema DownwardAPIVolumeFile - -DownwardAPIVolumeFile represents information to create the file containing the pod field - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**mode**
Optional: mode bits used to set permissions on this file, must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.|int|Undefined|optional| -|**path**
Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'|str|Undefined|**required**| -|**fieldRef**
Required: Selects a field of the pod: only annotations, labels, name and namespace are supported.|[ObjectFieldSelector](doc_object_field_selector#schema-objectfieldselector)|Undefined|optional| -|**resourceFieldRef**
Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.|[ResourceFieldSelector](doc_resource_field_selector#schema-resourcefieldselector)|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_downward_api_volume_source.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_downward_api_volume_source.md deleted file mode 100644 index 58cf1143..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_downward_api_volume_source.md +++ /dev/null @@ -1,17 +0,0 @@ -# downward_api_volume_source - -Source: [base/pkg/kusion_kubernetes/api/core/v1/downward_api_volume_source.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/downward_api_volume_source.k) - -This is the downward\_api\_volume\_source module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema DownwardAPIVolumeSource - -DownwardAPIVolumeSource represents a volume containing downward API info. Downward API volumes support ownership management and SELinux relabeling. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**defaultMode**
Optional: mode bits to use on created files by default. Must be a Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.|int|Undefined|optional| -|**items**
Items is a list of downward API volume file|[[v1.DownwardAPIVolumeFile](doc_downward_api_volume_file#schema-downwardapivolumefile)]|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_empty_dir_volume_source.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_empty_dir_volume_source.md deleted file mode 100644 index df9a7cd1..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_empty_dir_volume_source.md +++ /dev/null @@ -1,17 +0,0 @@ -# empty_dir_volume_source - -Source: [base/pkg/kusion_kubernetes/api/core/v1/empty_dir_volume_source.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/empty_dir_volume_source.k) - -This is the empty\_dir\_volume\_source module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema EmptyDirVolumeSource - -Represents an empty directory for a pod. Empty directory volumes support ownership management and SELinux relabeling. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**medium**
What type of storage medium should back this directory. The default is "" which means to use the node's default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes\#emptydir|str|Undefined|optional| -|**sizeLimit**
Total amount of local storage required for this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means that the limit is undefined. More info: http://kubernetes.io/docs/user-guide/volumes\#emptydir|str|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_env_from_source.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_env_from_source.md deleted file mode 100644 index 4264f926..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_env_from_source.md +++ /dev/null @@ -1,18 +0,0 @@ -# env_from_source - -Source: [base/pkg/kusion_kubernetes/api/core/v1/env_from_source.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/env_from_source.k) - -This is the env\_from\_source module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema EnvFromSource - -EnvFromSource represents the source of a set of ConfigMaps - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**prefix**
An optional identifier to prepend to each key in the ConfigMap. Must be a C\_IDENTIFIER.|str|Undefined|optional| -|**configMapRef**
The ConfigMap to select from|[ConfigMapEnvSource](doc_config_map_env_source#schema-configmapenvsource)|Undefined|optional| -|**secretRef**
The Secret to select from|[SecretEnvSource](doc_secret_env_source#schema-secretenvsource)|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_env_var.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_env_var.md deleted file mode 100644 index 6a99e817..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_env_var.md +++ /dev/null @@ -1,18 +0,0 @@ -# env_var - -Source: [base/pkg/kusion_kubernetes/api/core/v1/env_var.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/env_var.k) - -This is the env\_var module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema EnvVar - -EnvVar represents an environment variable present in a Container. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**name**
Name of the environment variable. Must be a C\_IDENTIFIER.|str|Undefined|**required**| -|**value**
Variable references $(VAR\_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR\_NAME) syntax: i.e. "$$(VAR\_NAME)" will produce the string literal "$(VAR\_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "".|str|Undefined|optional| -|**valueFrom**
Source for the environment variable's value. Cannot be used if value is not empty.|[EnvVarSource](doc_env_var_source#schema-envvarsource)|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_env_var_source.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_env_var_source.md deleted file mode 100644 index 6c39b99a..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_env_var_source.md +++ /dev/null @@ -1,19 +0,0 @@ -# env_var_source - -Source: [base/pkg/kusion_kubernetes/api/core/v1/env_var_source.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/env_var_source.k) - -This is the env\_var\_source module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema EnvVarSource - -EnvVarSource represents a source for the value of an EnvVar. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**configMapKeyRef**
Selects a key of a ConfigMap.|[ConfigMapKeySelector](doc_config_map_key_selector#schema-configmapkeyselector)|Undefined|optional| -|**fieldRef**
Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['\']`, `metadata.annotations['\']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.|[ObjectFieldSelector](doc_object_field_selector#schema-objectfieldselector)|Undefined|optional| -|**resourceFieldRef**
Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.|[ResourceFieldSelector](doc_resource_field_selector#schema-resourcefieldselector)|Undefined|optional| -|**secretKeyRef**
Selects a key of a secret in the pod's namespace|[SecretKeySelector](doc_secret_key_selector#schema-secretkeyselector)|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_ephemeral_container.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_ephemeral_container.md deleted file mode 100644 index 4bd6efed..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_ephemeral_container.md +++ /dev/null @@ -1,38 +0,0 @@ -# ephemeral_container - -Source: [base/pkg/kusion_kubernetes/api/core/v1/ephemeral_container.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/ephemeral_container.k) - -This is the ephemeral\_container module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema EphemeralContainer - -An EphemeralContainer is a container that may be added temporarily to an existing pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a pod is removed or restarted. If an ephemeral container causes a pod to exceed its resource allocation, the pod may be evicted. Ephemeral containers may not be added by directly updating the pod spec. They must be added via the pod's ephemeralcontainers subresource, and they will appear in the pod spec once added. This is an alpha feature enabled by the EphemeralContainers feature flag. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**args**
Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR\_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR\_NAME) syntax: i.e. "$$(VAR\_NAME)" will produce the string literal "$(VAR\_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/\#running-a-command-in-a-shell|[str]|Undefined|optional| -|**command**
Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR\_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR\_NAME) syntax: i.e. "$$(VAR\_NAME)" will produce the string literal "$(VAR\_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/\#running-a-command-in-a-shell|[str]|Undefined|optional| -|**env**
List of environment variables to set in the container. Cannot be updated.|[[v1.EnvVar](doc_env_var#schema-envvar)]|Undefined|optional| -|**envFrom**
List of sources to populate environment variables in the container. The keys defined within a source must be a C\_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.|[[v1.EnvFromSource](doc_env_from_source#schema-envfromsource)]|Undefined|optional| -|**image**
Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images|str|Undefined|optional| -|**imagePullPolicy**
Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images\#updating-images|str|Undefined|optional| -|**name**
Name of the ephemeral container specified as a DNS\_LABEL. This name must be unique among all containers, init containers and ephemeral containers.|str|Undefined|**required**| -|**ports**
Ports are not allowed for ephemeral containers.|[[v1.ContainerPort](doc_container_port#schema-containerport)]|Undefined|optional| -|**stdin**
Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.|bool|Undefined|optional| -|**stdinOnce**
Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false|bool|Undefined|optional| -|**targetContainerName**
If set, the name of the container from PodSpec that this ephemeral container targets. The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. If not set then the ephemeral container is run in whatever namespaces are shared for the pod. Note that the container runtime must support this feature.|str|Undefined|optional| -|**terminationMessagePath**
Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.|str|Undefined|optional| -|**terminationMessagePolicy**
Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.|str|Undefined|optional| -|**tty**
Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.|bool|Undefined|optional| -|**volumeDevices**
volumeDevices is the list of block devices to be used by the container.|[[v1.VolumeDevice](doc_volume_device#schema-volumedevice)]|Undefined|optional| -|**volumeMounts**
Pod volumes to mount into the container's filesystem. Cannot be updated.|[[v1.VolumeMount](doc_volume_mount#schema-volumemount)]|Undefined|optional| -|**workingDir**
Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.|str|Undefined|optional| -|**lifecycle**
Lifecycle is not allowed for ephemeral containers.|[Lifecycle](doc_lifecycle#schema-lifecycle)|Undefined|optional| -|**livenessProbe**
Probes are not allowed for ephemeral containers.|[Probe](doc_probe#schema-probe)|Undefined|optional| -|**readinessProbe**
Probes are not allowed for ephemeral containers.|[Probe](doc_probe#schema-probe)|Undefined|optional| -|**resources**
Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod.|[ResourceRequirements](doc_resource_requirements#schema-resourcerequirements)|Undefined|optional| -|**securityContext**
Optional: SecurityContext defines the security options the ephemeral container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.|[SecurityContext](doc_security_context#schema-securitycontext)|Undefined|optional| -|**startupProbe**
Probes are not allowed for ephemeral containers.|[Probe](doc_probe#schema-probe)|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_ephemeral_volume_source.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_ephemeral_volume_source.md deleted file mode 100644 index 1d730d65..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_ephemeral_volume_source.md +++ /dev/null @@ -1,16 +0,0 @@ -# ephemeral_volume_source - -Source: [base/pkg/kusion_kubernetes/api/core/v1/ephemeral_volume_source.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/ephemeral_volume_source.k) - -This is the ephemeral\_volume\_source module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema EphemeralVolumeSource - -Represents an ephemeral volume that is handled by a normal storage driver. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**volumeClaimTemplate**
Will be used to create a stand-alone PVC to provision the volume. The pod in which this EphemeralVolumeSource is embedded will be the owner of the PVC, i.e. the PVC will be deleted together with the pod. The name of the PVC will be `\-\` where `\` is the name from the `PodSpec.Volumes` array entry. Pod validation will reject the pod if the concatenated name is not valid for a PVC (for example, too long).|[PersistentVolumeClaimTemplate](doc_persistent_volume_claim_template#schema-persistentvolumeclaimtemplate)|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_exec_action.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_exec_action.md deleted file mode 100644 index 4ee9d7c9..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_exec_action.md +++ /dev/null @@ -1,16 +0,0 @@ -# exec_action - -Source: [base/pkg/kusion_kubernetes/api/core/v1/exec_action.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/exec_action.k) - -This is the exec\_action module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema ExecAction - -ExecAction describes a "run in container" action. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**command**
Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.|[str]|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_fc_volume_source.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_fc_volume_source.md deleted file mode 100644 index ffa00769..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_fc_volume_source.md +++ /dev/null @@ -1,20 +0,0 @@ -# fc_volume_source - -Source: [base/pkg/kusion_kubernetes/api/core/v1/fc_volume_source.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/fc_volume_source.k) - -This is the fc\_volume\_source module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema FCVolumeSource - -Represents a Fibre Channel volume. Fibre Channel volumes can only be mounted as read/write once. Fibre Channel volumes support ownership management and SELinux relabeling. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**fsType**
Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.|str|Undefined|optional| -|**lun**
Optional: FC target lun number|int|Undefined|optional| -|**readOnly**
Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.|bool|Undefined|optional| -|**targetWWNs**
Optional: FC target worldwide names (WWNs)|[str]|Undefined|optional| -|**wwids**
Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.|[str]|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_flex_volume_source.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_flex_volume_source.md deleted file mode 100644 index 6e9e6d6d..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_flex_volume_source.md +++ /dev/null @@ -1,20 +0,0 @@ -# flex_volume_source - -Source: [base/pkg/kusion_kubernetes/api/core/v1/flex_volume_source.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/flex_volume_source.k) - -This is the flex\_volume\_source module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema FlexVolumeSource - -FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**driver**
Driver is the name of the driver to use for this volume.|str|Undefined|**required**| -|**fsType**
Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script.|str|Undefined|optional| -|**options**
Optional: Extra command options if any.|{str: str}|Undefined|optional| -|**readOnly**
Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.|bool|Undefined|optional| -|**secretRef**
Optional: SecretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts.|[LocalObjectReference](doc_local_object_reference#schema-localobjectreference)|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_flocker_volume_source.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_flocker_volume_source.md deleted file mode 100644 index b9a591df..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_flocker_volume_source.md +++ /dev/null @@ -1,17 +0,0 @@ -# flocker_volume_source - -Source: [base/pkg/kusion_kubernetes/api/core/v1/flocker_volume_source.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/flocker_volume_source.k) - -This is the flocker\_volume\_source module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema FlockerVolumeSource - -Represents a Flocker volume mounted by the Flocker agent. One and only one of datasetName and datasetUUID should be set. Flocker volumes do not support ownership management or SELinux relabeling. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**datasetName**
Name of the dataset stored as metadata -\> name on the dataset for Flocker should be considered as deprecated|str|Undefined|optional| -|**datasetUUID**
UUID of the dataset. This is unique identifier of a Flocker dataset|str|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_gce_persistent_disk_volume_source.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_gce_persistent_disk_volume_source.md deleted file mode 100644 index 1daeceee..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_gce_persistent_disk_volume_source.md +++ /dev/null @@ -1,19 +0,0 @@ -# gce_persistent_disk_volume_source - -Source: [base/pkg/kusion_kubernetes/api/core/v1/gce_persistent_disk_volume_source.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/gce_persistent_disk_volume_source.k) - -This is the gce\_persistent\_disk\_volume\_source module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema GCEPersistentDiskVolumeSource - -Represents a Persistent Disk resource in Google Compute Engine.
A GCE PD must exist before mounting to a container. The disk must also be in the same GCE project and zone as the kubelet. A GCE PD can only be mounted as read/write once or read-only many times. GCE PDs support ownership management and SELinux relabeling. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**fsType**
Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes\#gcepersistentdisk
partition : int, default is Undefined, optional
The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as "1". Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes\#gcepersistentdisk
pdName : str, default is Undefined, required
Unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes\#gcepersistentdisk
readOnly : bool, default is Undefined, optional
ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes\#gcepersistentdisk|str|Undefined|optional| -|**partition**|int|Undefined|optional| -|**pdName**|str|Undefined|**required**| -|**readOnly**|bool|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_git_repo_volume_source.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_git_repo_volume_source.md deleted file mode 100644 index 06ee9235..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_git_repo_volume_source.md +++ /dev/null @@ -1,18 +0,0 @@ -# git_repo_volume_source - -Source: [base/pkg/kusion_kubernetes/api/core/v1/git_repo_volume_source.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/git_repo_volume_source.k) - -This is the git\_repo\_volume\_source module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema GitRepoVolumeSource - -Represents a volume that is populated with the contents of a git repository. Git repo volumes do not support ownership management. Git repo volumes support SELinux relabeling.
DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**directory**
Target directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name.
repository : str, default is Undefined, required
Repository URL
revision : str, default is Undefined, optional
Commit hash for the specified revision.|str|Undefined|optional| -|**repository**|str|Undefined|**required**| -|**revision**|str|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_glusterfs_volume_source.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_glusterfs_volume_source.md deleted file mode 100644 index 44e9b02e..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_glusterfs_volume_source.md +++ /dev/null @@ -1,18 +0,0 @@ -# glusterfs_volume_source - -Source: [base/pkg/kusion_kubernetes/api/core/v1/glusterfs_volume_source.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/glusterfs_volume_source.k) - -This is the glusterfs\_volume\_source module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema GlusterfsVolumeSource - -Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**endpoints**
EndpointsName is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md\#create-a-pod|str|Undefined|**required**| -|**path**
Path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md\#create-a-pod|str|Undefined|**required**| -|**readOnly**
ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md\#create-a-pod|bool|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_handler.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_handler.md deleted file mode 100644 index c82990b6..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_handler.md +++ /dev/null @@ -1,18 +0,0 @@ -# handler - -Source: [base/pkg/kusion_kubernetes/api/core/v1/handler.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/handler.k) - -This is the handler module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema Handler - -Handler defines a specific action that should be taken - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**exec**
One and only one of the following should be specified. Exec specifies the action to take.|[ExecAction](doc_exec_action#schema-execaction)|Undefined|optional| -|**httpGet**
HTTPGet specifies the http request to perform.|[HTTPGetAction](doc_http_get_action#schema-httpgetaction)|Undefined|optional| -|**tcpSocket**
TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported|[TCPSocketAction](doc_tcp_socket_action#schema-tcpsocketaction)|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_host_alias.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_host_alias.md deleted file mode 100644 index 11649c89..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_host_alias.md +++ /dev/null @@ -1,17 +0,0 @@ -# host_alias - -Source: [base/pkg/kusion_kubernetes/api/core/v1/host_alias.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/host_alias.k) - -This is the host\_alias module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema HostAlias - -HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the pod's hosts file. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**hostnames**
Hostnames for the above IP address.|[str]|Undefined|optional| -|**ip**
IP address of the host file entry.|str|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_host_path_volume_source.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_host_path_volume_source.md deleted file mode 100644 index 599f04e5..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_host_path_volume_source.md +++ /dev/null @@ -1,17 +0,0 @@ -# host_path_volume_source - -Source: [base/pkg/kusion_kubernetes/api/core/v1/host_path_volume_source.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/host_path_volume_source.k) - -This is the host\_path\_volume\_source module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema HostPathVolumeSource - -Represents a host path mapped into a pod. Host path volumes do not support ownership management or SELinux relabeling. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**path**
Path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes\#hostpath|str|Undefined|**required**| -|**type**
Type for HostPath Volume Defaults to "" More info: https://kubernetes.io/docs/concepts/storage/volumes\#hostpath|str|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_http_get_action.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_http_get_action.md deleted file mode 100644 index aa947a4e..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_http_get_action.md +++ /dev/null @@ -1,20 +0,0 @@ -# http_get_action - -Source: [base/pkg/kusion_kubernetes/api/core/v1/http_get_action.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/http_get_action.k) - -This is the http\_get\_action module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema HTTPGetAction - -HTTPGetAction describes an action based on HTTP Get requests. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**host**
Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.|str|Undefined|optional| -|**httpHeaders**
Custom headers to set in the request. HTTP allows repeated headers.|[[v1.HTTPHeader](doc_http_header#schema-httpheader)]|Undefined|optional| -|**path**
Path to access on the HTTP server.|str|Undefined|optional| -|**port**
Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA\_SVC\_NAME.|int \| str|Undefined|**required**| -|**scheme**
Scheme to use for connecting to the host. Defaults to HTTP.|str|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_http_header.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_http_header.md deleted file mode 100644 index fd1bfcc1..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_http_header.md +++ /dev/null @@ -1,17 +0,0 @@ -# http_header - -Source: [base/pkg/kusion_kubernetes/api/core/v1/http_header.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/http_header.k) - -This is the http\_header module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema HTTPHeader - -HTTPHeader describes a custom header to be used in HTTP probes - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**name**
The header field name|str|Undefined|**required**| -|**value**
The header field value|str|Undefined|**required**| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_iscsi_volume_source.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_iscsi_volume_source.md deleted file mode 100644 index 2d9b4cc5..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_iscsi_volume_source.md +++ /dev/null @@ -1,26 +0,0 @@ -# iscsi_volume_source - -Source: [base/pkg/kusion_kubernetes/api/core/v1/iscsi_volume_source.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/iscsi_volume_source.k) - -This is the iscsi\_volume\_source module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema ISCSIVolumeSource - -Represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**chapAuthDiscovery**
whether support iSCSI Discovery CHAP authentication|bool|Undefined|optional| -|**chapAuthSession**
whether support iSCSI Session CHAP authentication|bool|Undefined|optional| -|**fsType**
Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes\#iscsi|str|Undefined|optional| -|**initiatorName**
Custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface \:\ will be created for the connection.|str|Undefined|optional| -|**iqn**
Target iSCSI Qualified Name.|str|Undefined|**required**| -|**iscsiInterface**
iSCSI Interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).|str|Undefined|optional| -|**lun**
iSCSI Target Lun number.|int|Undefined|**required**| -|**portals**
iSCSI Target Portal List. The portal is either an IP or ip\_addr:port if the port is other than default (typically TCP ports 860 and 3260).|[str]|Undefined|optional| -|**readOnly**
ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false.|bool|Undefined|optional| -|**targetPortal**
iSCSI Target Portal. The Portal is either an IP or ip\_addr:port if the port is other than default (typically TCP ports 860 and 3260).|str|Undefined|**required**| -|**secretRef**
CHAP Secret for iSCSI target and initiator authentication|[LocalObjectReference](doc_local_object_reference#schema-localobjectreference)|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_key_to_path.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_key_to_path.md deleted file mode 100644 index 09c1371e..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_key_to_path.md +++ /dev/null @@ -1,18 +0,0 @@ -# key_to_path - -Source: [base/pkg/kusion_kubernetes/api/core/v1/key_to_path.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/key_to_path.k) - -This is the key\_to\_path module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema KeyToPath - -Maps a string key to a path within a volume. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**key**
The key to project.|str|Undefined|**required**| -|**mode**
Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.|int|Undefined|optional| -|**path**
The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.|str|Undefined|**required**| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_lifecycle.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_lifecycle.md deleted file mode 100644 index 5c1962d4..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_lifecycle.md +++ /dev/null @@ -1,17 +0,0 @@ -# lifecycle - -Source: [base/pkg/kusion_kubernetes/api/core/v1/lifecycle.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/lifecycle.k) - -This is the lifecycle module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema Lifecycle - -Lifecycle describes actions that the management system should take in response to container lifecycle events. For the PostStart and PreStop lifecycle handlers, management of the container blocks until the action is complete, unless the container process fails, in which case the handler is aborted. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**postStart**
PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/\#container-hooks|[Handler](doc_handler#schema-handler)|Undefined|optional| -|**preStop**
PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The reason for termination is passed to the handler. The Pod's termination grace period countdown begins before the PreStop hooked is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period. Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/\#container-hooks|[Handler](doc_handler#schema-handler)|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_local_object_reference.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_local_object_reference.md deleted file mode 100644 index 6aa1f34d..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_local_object_reference.md +++ /dev/null @@ -1,16 +0,0 @@ -# local_object_reference - -Source: [base/pkg/kusion_kubernetes/api/core/v1/local_object_reference.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/local_object_reference.k) - -This is the local\_object\_reference module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema LocalObjectReference - -LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**name**
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/\#names|str|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_namespace.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_namespace.md deleted file mode 100644 index dfdd353d..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_namespace.md +++ /dev/null @@ -1,19 +0,0 @@ -# namespace - -Source: [base/pkg/kusion_kubernetes/api/core/v1/namespace.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/namespace.k) - -This is the namespace module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema Namespace - -Namespace provides a scope for Names. Use of multiple namespaces is optional. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**apiVersion**
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md\#resources|"v1"|"v1"|**required**| -|**kind**
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md\#types-kinds|"Namespace"|"Namespace"|**required**| -|**metadata**
Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md\#metadata|[apis.ObjectMeta](../../../apimachinery/apis/doc_object_meta#schema-objectmeta)|Undefined|optional| -|**spec**
Spec defines the behavior of the Namespace. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md\#spec-and-status|[NamespaceSpec](doc_namespace_spec#schema-namespacespec)|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_namespace_spec.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_namespace_spec.md deleted file mode 100644 index 86a81c22..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_namespace_spec.md +++ /dev/null @@ -1,16 +0,0 @@ -# namespace_spec - -Source: [base/pkg/kusion_kubernetes/api/core/v1/namespace_spec.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/namespace_spec.k) - -This is the namespace\_spec module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema NamespaceSpec - -NamespaceSpec describes the attributes on a Namespace. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**finalizers**
Finalizers is an opaque list of values that must be empty to permanently remove object from storage. More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/|[str]|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_nfs_volume_source.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_nfs_volume_source.md deleted file mode 100644 index fa20e02a..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_nfs_volume_source.md +++ /dev/null @@ -1,18 +0,0 @@ -# nfs_volume_source - -Source: [base/pkg/kusion_kubernetes/api/core/v1/nfs_volume_source.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/nfs_volume_source.k) - -This is the nfs\_volume\_source module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema NFSVolumeSource - -Represents an NFS mount that lasts the lifetime of a pod. NFS volumes do not support ownership management or SELinux relabeling. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**path**
Path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes\#nfs|str|Undefined|**required**| -|**readOnly**
ReadOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes\#nfs|bool|Undefined|optional| -|**server**
Server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes\#nfs|str|Undefined|**required**| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_node.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_node.md deleted file mode 100644 index 0c5656c1..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_node.md +++ /dev/null @@ -1,19 +0,0 @@ -# node - -Source: [base/pkg/kusion_kubernetes/api/core/v1/node.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/node.k) - -This is the node module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema Node - -Node is a worker node in Kubernetes. Each node will have a unique identifier in the cache (i.e. in etcd). - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**apiVersion**
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md\#resources|"v1"|"v1"|**required**| -|**kind**
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md\#types-kinds|"Node"|"Node"|**required**| -|**metadata**
Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md\#metadata|[apis.ObjectMeta](../../../apimachinery/apis/doc_object_meta#schema-objectmeta)|Undefined|optional| -|**spec**
Spec defines the behavior of a node. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md\#spec-and-status|[NodeSpec](doc_node_spec#schema-nodespec)|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_node_affinity.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_node_affinity.md deleted file mode 100644 index fcb5523c..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_node_affinity.md +++ /dev/null @@ -1,17 +0,0 @@ -# node_affinity - -Source: [base/pkg/kusion_kubernetes/api/core/v1/node_affinity.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/node_affinity.k) - -This is the node\_affinity module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema NodeAffinity - -Node affinity is a group of node affinity scheduling rules. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**preferredDuringSchedulingIgnoredDuringExecution**
The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.|[[v1.PreferredSchedulingTerm](doc_preferred_scheduling_term#schema-preferredschedulingterm)]|Undefined|optional| -|**requiredDuringSchedulingIgnoredDuringExecution**
If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.|[NodeSelector](doc_node_selector#schema-nodeselector)|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_node_config_source.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_node_config_source.md deleted file mode 100644 index 810f8cd4..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_node_config_source.md +++ /dev/null @@ -1,16 +0,0 @@ -# node_config_source - -Source: [base/pkg/kusion_kubernetes/api/core/v1/node_config_source.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/node_config_source.k) - -This is the node\_config\_source module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema NodeConfigSource - -NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil. This API is deprecated since 1.22 - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**configMap**
ConfigMap is a reference to a Node's ConfigMap|[ConfigMapNodeConfigSource](doc_config_map_node_config_source#schema-configmapnodeconfigsource)|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_node_selector.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_node_selector.md deleted file mode 100644 index bf2d147d..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_node_selector.md +++ /dev/null @@ -1,16 +0,0 @@ -# node_selector - -Source: [base/pkg/kusion_kubernetes/api/core/v1/node_selector.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/node_selector.k) - -This is the node\_selector module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema NodeSelector - -A node selector represents the union of the results of one or more label queries over a set of nodes; that is, it represents the OR of the selectors represented by the node selector terms. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**nodeSelectorTerms**
Required. A list of node selector terms. The terms are ORed.|[[v1.NodeSelectorTerm](doc_node_selector_term#schema-nodeselectorterm)]|Undefined|**required**| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_node_selector_requirement.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_node_selector_requirement.md deleted file mode 100644 index 5ed16d55..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_node_selector_requirement.md +++ /dev/null @@ -1,18 +0,0 @@ -# node_selector_requirement - -Source: [base/pkg/kusion_kubernetes/api/core/v1/node_selector_requirement.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/node_selector_requirement.k) - -This is the node\_selector\_requirement module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema NodeSelectorRequirement - -A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**key**
The label key that the selector applies to.|str|Undefined|**required**| -|**operator**
Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.|str|Undefined|**required**| -|**values**
An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.|[str]|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_node_selector_term.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_node_selector_term.md deleted file mode 100644 index ec13e17a..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_node_selector_term.md +++ /dev/null @@ -1,17 +0,0 @@ -# node_selector_term - -Source: [base/pkg/kusion_kubernetes/api/core/v1/node_selector_term.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/node_selector_term.k) - -This is the node\_selector\_term module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema NodeSelectorTerm - -A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**matchExpressions**
A list of node selector requirements by node's labels.|[[v1.NodeSelectorRequirement](doc_node_selector_requirement#schema-nodeselectorrequirement)]|Undefined|optional| -|**matchFields**
A list of node selector requirements by node's fields.|[[v1.NodeSelectorRequirement](doc_node_selector_requirement#schema-nodeselectorrequirement)]|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_node_spec.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_node_spec.md deleted file mode 100644 index 309988de..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_node_spec.md +++ /dev/null @@ -1,22 +0,0 @@ -# node_spec - -Source: [base/pkg/kusion_kubernetes/api/core/v1/node_spec.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/node_spec.k) - -This is the node\_spec module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema NodeSpec - -NodeSpec describes the attributes that a node is created with. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**externalID**
Deprecated. Not all kubelets will set this field. Remove field after 1.13. see: https://issues.k8s.io/61966|str|Undefined|optional| -|**podCIDR**
PodCIDR represents the pod IP range assigned to the node.|str|Undefined|optional| -|**podCIDRs**
podCIDRs represents the IP ranges assigned to the node for usage by Pods on that node. If this field is specified, the 0th entry must match the podCIDR field. It may contain at most 1 value for each of IPv4 and IPv6.|[str]|Undefined|optional| -|**providerID**
ID of the node assigned by the cloud provider in the format: \://\|str|Undefined|optional| -|**taints**
If specified, the node's taints.|[[v1.Taint](doc_taint#schema-taint)]|Undefined|optional| -|**unschedulable**
Unschedulable controls node schedulability of new pods. By default, node is schedulable. More info: https://kubernetes.io/docs/concepts/nodes/node/\#manual-node-administration|bool|Undefined|optional| -|**configSource**
Deprecated. If specified, the source of the node's configuration. The DynamicKubeletConfig feature gate must be enabled for the Kubelet to use this field. This field is deprecated as of 1.22: https://git.k8s.io/enhancements/keps/sig-node/281-dynamic-kubelet-configuration|[NodeConfigSource](doc_node_config_source#schema-nodeconfigsource)|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_object_field_selector.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_object_field_selector.md deleted file mode 100644 index 7599b3d8..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_object_field_selector.md +++ /dev/null @@ -1,17 +0,0 @@ -# object_field_selector - -Source: [base/pkg/kusion_kubernetes/api/core/v1/object_field_selector.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/object_field_selector.k) - -This is the object\_field\_selector module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema ObjectFieldSelector - -ObjectFieldSelector selects an APIVersioned field of an object. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**apiVersion**
Version of the schema the FieldPath is written in terms of, defaults to "v1".|str|Undefined|optional| -|**fieldPath**
Path of the field to select in the specified API version.|str|Undefined|**required**| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_object_reference.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_object_reference.md deleted file mode 100644 index 5249d1ae..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_object_reference.md +++ /dev/null @@ -1,22 +0,0 @@ -# object_reference - -Source: [base/pkg/kusion_kubernetes/api/core/v1/object_reference.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/object_reference.k) - -This is the object\_reference module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema ObjectReference - -ObjectReference contains enough information to let you inspect or modify the referred object. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**apiVersion**
API version of the referent.|str|Undefined|optional| -|**fieldPath**
If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object.|str|Undefined|optional| -|**kind**
Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md\#types-kinds|str|Undefined|optional| -|**name**
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/\#names|str|Undefined|optional| -|**namespace**
Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/|str|Undefined|optional| -|**resourceVersion**
Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md\#concurrency-control-and-consistency|str|Undefined|optional| -|**uid**
UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/\#uids|str|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_persistent_volume_claim.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_persistent_volume_claim.md deleted file mode 100644 index ea96773a..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_persistent_volume_claim.md +++ /dev/null @@ -1,19 +0,0 @@ -# persistent_volume_claim - -Source: [base/pkg/kusion_kubernetes/api/core/v1/persistent_volume_claim.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/persistent_volume_claim.k) - -This is the persistent\_volume\_claim module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema PersistentVolumeClaim - -PersistentVolumeClaim is a user's request for and claim to a persistent volume - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**apiVersion**
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md\#resources|"v1"|"v1"|**required**| -|**kind**
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md\#types-kinds|"PersistentVolumeClaim"|"PersistentVolumeClaim"|**required**| -|**metadata**
Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md\#metadata|[apis.ObjectMeta](../../../apimachinery/apis/doc_object_meta#schema-objectmeta)|Undefined|optional| -|**spec**
Spec defines the desired characteristics of a volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes\#persistentvolumeclaims|[PersistentVolumeClaimSpec](doc_persistent_volume_claim_spec#schema-persistentvolumeclaimspec)|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_persistent_volume_claim_spec.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_persistent_volume_claim_spec.md deleted file mode 100644 index 04c21703..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_persistent_volume_claim_spec.md +++ /dev/null @@ -1,23 +0,0 @@ -# persistent_volume_claim_spec - -Source: [base/pkg/kusion_kubernetes/api/core/v1/persistent_volume_claim_spec.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/persistent_volume_claim_spec.k) - -This is the persistent\_volume\_claim\_spec module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema PersistentVolumeClaimSpec - -PersistentVolumeClaimSpec describes the common attributes of storage devices and allows a Source for provider-specific attributes - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**accessModes**
AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes\#access-modes-1
storageClassName : str, default is Undefined, optional
Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes\#class-1
volumeMode : str, default is Undefined, optional
volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.
volumeName : str, default is Undefined, optional
VolumeName is the binding reference to the PersistentVolume backing this claim.
dataSource : TypedLocalObjectReference, default is Undefined, optional
This field can be used to specify either: \* An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) \* An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field.
dataSourceRef : TypedLocalObjectReference, default is Undefined, optional
Specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. There are two important differences between DataSource and DataSourceRef: \* While DataSource only allows two specific types of objects, DataSourceRef
allows any non-core object, as well as PersistentVolumeClaim objects.|[str]|Undefined|optional| -|**storageClassName**|str|Undefined|optional| -|**volumeMode**|str|Undefined|optional| -|**volumeName**|str|Undefined|optional| -|**dataSource**|[TypedLocalObjectReference](doc_typed_local_object_reference#schema-typedlocalobjectreference)|Undefined|optional| -|**dataSourceRef**|[TypedLocalObjectReference](doc_typed_local_object_reference#schema-typedlocalobjectreference)|Undefined|optional| -|**resources**|[ResourceRequirements](doc_resource_requirements#schema-resourcerequirements)|Undefined|optional| -|**selector**|[apis.LabelSelector](../../../apimachinery/apis/doc_label_selector#schema-labelselector)|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_persistent_volume_claim_template.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_persistent_volume_claim_template.md deleted file mode 100644 index 879a64df..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_persistent_volume_claim_template.md +++ /dev/null @@ -1,17 +0,0 @@ -# persistent_volume_claim_template - -Source: [base/pkg/kusion_kubernetes/api/core/v1/persistent_volume_claim_template.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/persistent_volume_claim_template.k) - -This is the persistent\_volume\_claim\_template module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema PersistentVolumeClaimTemplate - -PersistentVolumeClaimTemplate is used to produce PersistentVolumeClaim objects as part of an EphemeralVolumeSource. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**metadata**
May contain labels and annotations that will be copied into the PVC when creating it. No other fields are allowed and will be rejected during validation.|[apis.ObjectMeta](../../../apimachinery/apis/doc_object_meta#schema-objectmeta)|Undefined|optional| -|**spec**
The specification for the PersistentVolumeClaim. The entire content is copied unchanged into the PVC that gets created from this template. The same fields as in a PersistentVolumeClaim are also valid here.|[PersistentVolumeClaimSpec](doc_persistent_volume_claim_spec#schema-persistentvolumeclaimspec)|Undefined|**required**| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_persistent_volume_claim_volume_source.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_persistent_volume_claim_volume_source.md deleted file mode 100644 index ddbddb3c..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_persistent_volume_claim_volume_source.md +++ /dev/null @@ -1,17 +0,0 @@ -# persistent_volume_claim_volume_source - -Source: [base/pkg/kusion_kubernetes/api/core/v1/persistent_volume_claim_volume_source.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/persistent_volume_claim_volume_source.k) - -This is the persistent\_volume\_claim\_volume\_source module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema PersistentVolumeClaimVolumeSource - -PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace. This volume finds the bound PV and mounts that volume for the pod. A PersistentVolumeClaimVolumeSource is, essentially, a wrapper around another type of volume that is owned by someone else (the system). - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**claimName**
ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes\#persistentvolumeclaims|str|Undefined|**required**| -|**readOnly**
Will force the ReadOnly setting in VolumeMounts. Default false.|bool|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_photon_persistent_disk_volume_source.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_photon_persistent_disk_volume_source.md deleted file mode 100644 index e459836f..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_photon_persistent_disk_volume_source.md +++ /dev/null @@ -1,17 +0,0 @@ -# photon_persistent_disk_volume_source - -Source: [base/pkg/kusion_kubernetes/api/core/v1/photon_persistent_disk_volume_source.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/photon_persistent_disk_volume_source.k) - -This is the photon\_persistent\_disk\_volume\_source module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema PhotonPersistentDiskVolumeSource - -Represents a Photon Controller persistent disk resource. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**fsType**
Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.|str|Undefined|optional| -|**pdID**
ID that identifies Photon Controller persistent disk|str|Undefined|**required**| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_pod_affinity.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_pod_affinity.md deleted file mode 100644 index b08fe69e..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_pod_affinity.md +++ /dev/null @@ -1,17 +0,0 @@ -# pod_affinity - -Source: [base/pkg/kusion_kubernetes/api/core/v1/pod_affinity.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/pod_affinity.k) - -This is the pod\_affinity module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema PodAffinity - -Pod affinity is a group of inter pod affinity scheduling rules. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**preferredDuringSchedulingIgnoredDuringExecution**
The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.|[[v1.WeightedPodAffinityTerm](doc_weighted_pod_affinity_term#schema-weightedpodaffinityterm)]|Undefined|optional| -|**requiredDuringSchedulingIgnoredDuringExecution**
If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.|[[v1.PodAffinityTerm](doc_pod_affinity_term#schema-podaffinityterm)]|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_pod_affinity_term.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_pod_affinity_term.md deleted file mode 100644 index 4bf2d355..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_pod_affinity_term.md +++ /dev/null @@ -1,19 +0,0 @@ -# pod_affinity_term - -Source: [base/pkg/kusion_kubernetes/api/core/v1/pod_affinity_term.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/pod_affinity_term.k) - -This is the pod\_affinity\_term module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema PodAffinityTerm - -Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key \ matches that of any node on which a pod of the set of pods is running - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**namespaces**
namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace"|[str]|Undefined|optional| -|**topologyKey**
This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.|str|Undefined|**required**| -|**labelSelector**
A label query over a set of resources, in this case pods.|[apis.LabelSelector](../../../apimachinery/apis/doc_label_selector#schema-labelselector)|Undefined|optional| -|**namespaceSelector**
A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled.|[apis.LabelSelector](../../../apimachinery/apis/doc_label_selector#schema-labelselector)|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_pod_anti_affinity.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_pod_anti_affinity.md deleted file mode 100644 index d410c480..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_pod_anti_affinity.md +++ /dev/null @@ -1,17 +0,0 @@ -# pod_anti_affinity - -Source: [base/pkg/kusion_kubernetes/api/core/v1/pod_anti_affinity.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/pod_anti_affinity.k) - -This is the pod\_anti\_affinity module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema PodAntiAffinity - -Pod anti affinity is a group of inter pod anti affinity scheduling rules. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**preferredDuringSchedulingIgnoredDuringExecution**
The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.|[[v1.WeightedPodAffinityTerm](doc_weighted_pod_affinity_term#schema-weightedpodaffinityterm)]|Undefined|optional| -|**requiredDuringSchedulingIgnoredDuringExecution**
If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.|[[v1.PodAffinityTerm](doc_pod_affinity_term#schema-podaffinityterm)]|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_pod_dns_config.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_pod_dns_config.md deleted file mode 100644 index 68de04dd..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_pod_dns_config.md +++ /dev/null @@ -1,18 +0,0 @@ -# pod_dns_config - -Source: [base/pkg/kusion_kubernetes/api/core/v1/pod_dns_config.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/pod_dns_config.k) - -This is the pod\_dns\_config module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema PodDNSConfig - -PodDNSConfig defines the DNS parameters of a pod in addition to those generated from DNSPolicy. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**nameservers**
A list of DNS name server IP addresses. This will be appended to the base nameservers generated from DNSPolicy. Duplicated nameservers will be removed.|[str]|Undefined|optional| -|**options**
A list of DNS resolver options. This will be merged with the base options generated from DNSPolicy. Duplicated entries will be removed. Resolution options given in Options will override those that appear in the base DNSPolicy.|[[v1.PodDNSConfigOption](doc_pod_dns_config_option#schema-poddnsconfigoption)]|Undefined|optional| -|**searches**
A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed.|[str]|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_pod_dns_config_option.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_pod_dns_config_option.md deleted file mode 100644 index 225d58f9..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_pod_dns_config_option.md +++ /dev/null @@ -1,17 +0,0 @@ -# pod_dns_config_option - -Source: [base/pkg/kusion_kubernetes/api/core/v1/pod_dns_config_option.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/pod_dns_config_option.k) - -This is the pod\_dns\_config\_option module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema PodDNSConfigOption - -PodDNSConfigOption defines DNS resolver options of a pod. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**name**
Required.|str|Undefined|optional| -|**value**
value|str|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_pod_readiness_gate.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_pod_readiness_gate.md deleted file mode 100644 index 5016f99d..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_pod_readiness_gate.md +++ /dev/null @@ -1,16 +0,0 @@ -# pod_readiness_gate - -Source: [base/pkg/kusion_kubernetes/api/core/v1/pod_readiness_gate.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/pod_readiness_gate.k) - -This is the pod\_readiness\_gate module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema PodReadinessGate - -PodReadinessGate contains the reference to a pod condition - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**conditionType**
ConditionType refers to a condition in the pod's condition list with matching type.|str|Undefined|**required**| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_pod_security_context.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_pod_security_context.md deleted file mode 100644 index 7c330c8e..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_pod_security_context.md +++ /dev/null @@ -1,25 +0,0 @@ -# pod_security_context - -Source: [base/pkg/kusion_kubernetes/api/core/v1/pod_security_context.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/pod_security_context.k) - -This is the pod\_security\_context module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema PodSecurityContext - -PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**fsGroup**
A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:|int|Undefined|optional| -|**fsGroupChangePolicy**|str|Undefined|optional| -|**runAsGroup**|int|Undefined|optional| -|**runAsNonRoot**|bool|Undefined|optional| -|**runAsUser**|int|Undefined|optional| -|**supplementalGroups**|[int]|Undefined|optional| -|**sysctls**|[[v1.Sysctl](doc_sysctl#schema-sysctl)]|Undefined|optional| -|**seLinuxOptions**|[SELinuxOptions](doc_se_linux_options#schema-selinuxoptions)|Undefined|optional| -|**seccompProfile**|[SeccompProfile](doc_seccomp_profile#schema-seccompprofile)|Undefined|optional| -|**windowsOptions**|[WindowsSecurityContextOptions](doc_windows_security_context_options#schema-windowssecuritycontextoptions)|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_pod_spec.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_pod_spec.md deleted file mode 100644 index 92ebfac4..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_pod_spec.md +++ /dev/null @@ -1,50 +0,0 @@ -# pod_spec - -Source: [base/pkg/kusion_kubernetes/api/core/v1/pod_spec.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/pod_spec.k) - -This is the pod\_spec module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema PodSpec - -PodSpec is a description of a pod. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**activeDeadlineSeconds**
Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer.|int|Undefined|optional| -|**automountServiceAccountToken**
AutomountServiceAccountToken indicates whether a service account token should be automatically mounted.|bool|Undefined|optional| -|**containers**
List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated.|[[v1.Container](doc_container#schema-container)]|Undefined|**required**| -|**dnsPolicy**
Set DNS policy for the pod. Defaults to "ClusterFirst". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.|str|Undefined|optional| -|**enableServiceLinks**
EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.|bool|Undefined|optional| -|**ephemeralContainers**
List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.|[[v1.EphemeralContainer](doc_ephemeral_container#schema-ephemeralcontainer)]|Undefined|optional| -|**hostAliases**
HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.|[[v1.HostAlias](doc_host_alias#schema-hostalias)]|Undefined|optional| -|**hostIPC**
Use the host's ipc namespace. Optional: Default to false.|bool|Undefined|optional| -|**hostNetwork**
Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false.|bool|Undefined|optional| -|**hostPID**
Use the host's pid namespace. Optional: Default to false.|bool|Undefined|optional| -|**hostname**
Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.|str|Undefined|optional| -|**imagePullSecrets**
ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images\#specifying-imagepullsecrets-on-a-pod|[[v1.LocalObjectReference](doc_local_object_reference#schema-localobjectreference)]|Undefined|optional| -|**initContainers**
List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/|[[v1.Container](doc_container#schema-container)]|Undefined|optional| -|**nodeName**
NodeName is a request to schedule this pod onto a specific node. If it is non-empty, the scheduler simply schedules this pod onto that node, assuming that it fits resource requirements.|str|Undefined|optional| -|**nodeSelector**
NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/|{str: str}|Undefined|optional| -|**overhead**
Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md This field is beta-level as of Kubernetes v1.18, and is only honored by servers that enable the PodOverhead feature.|{str: str}|Undefined|optional| -|**preemptionPolicy**
PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is beta-level, gated by the NonPreemptingPriority feature-gate.|str|Undefined|optional| -|**priority**
The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority.|int|Undefined|optional| -|**priorityClassName**
If specified, indicates the pod's priority. "system-node-critical" and "system-cluster-critical" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.|str|Undefined|optional| -|**readinessGates**
If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to "True" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates|[[v1.PodReadinessGate](doc_pod_readiness_gate#schema-podreadinessgate)]|Undefined|optional| -|**restartPolicy**
Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/\#restart-policy|str|Undefined|optional| -|**runtimeClassName**
RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class This is a beta feature as of Kubernetes v1.14.|str|Undefined|optional| -|**schedulerName**
If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler.|str|Undefined|optional| -|**serviceAccount**
DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead.|str|Undefined|optional| -|**serviceAccountName**
ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/|str|Undefined|optional| -|**setHostnameAsFQDN**
If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). In Windows containers, this means setting the registry value of hostname for the registry key HKEY\_LOCAL\_MACHINE\SYSTEM\CurrentControlSet\Services\Tcpip\Parameters to FQDN. If a pod does not have FQDN, this has no effect. Default to false.|bool|Undefined|optional| -|**shareProcessNamespace**
Share a single process namespace between all of the containers in a pod. When this is set containers will be able to view and signal processes from other containers in the same pod, and the first process in each container will not be assigned PID 1. HostPID and ShareProcessNamespace cannot both be set. Optional: Default to false.|bool|Undefined|optional| -|**subdomain**
If specified, the fully qualified Pod hostname will be "\.\.\.svc.\". If not specified, the pod will not have a domainname at all.|str|Undefined|optional| -|**terminationGracePeriodSeconds**
Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds.|int|Undefined|optional| -|**tolerations**
If specified, the pod's tolerations.|[[v1.Toleration](doc_toleration#schema-toleration)]|Undefined|optional| -|**topologySpreadConstraints**
TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed.|[[v1.TopologySpreadConstraint](doc_topology_spread_constraint#schema-topologyspreadconstraint)]|Undefined|optional| -|**volumes**
List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes|[[v1.Volume](doc_volume#schema-volume)]|Undefined|optional| -|**affinity**
If specified, the pod's scheduling constraints|[Affinity](doc_affinity#schema-affinity)|Undefined|optional| -|**dnsConfig**
Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy.|[PodDNSConfig](doc_pod_dns_config#schema-poddnsconfig)|Undefined|optional| -|**securityContext**
SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.|[PodSecurityContext](doc_pod_security_context#schema-podsecuritycontext)|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_pod_template_spec.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_pod_template_spec.md deleted file mode 100644 index 97dce9f3..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_pod_template_spec.md +++ /dev/null @@ -1,17 +0,0 @@ -# pod_template_spec - -Source: [base/pkg/kusion_kubernetes/api/core/v1/pod_template_spec.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/pod_template_spec.k) - -This is the pod\_template\_spec module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema PodTemplateSpec - -PodTemplateSpec describes the data a pod should have when created from a template - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**metadata**
Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md\#metadata|[apis.ObjectMeta](../../../apimachinery/apis/doc_object_meta#schema-objectmeta)|Undefined|optional| -|**spec**
Specification of the desired behavior of the pod. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md\#spec-and-status|[PodSpec](doc_pod_spec#schema-podspec)|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_portworx_volume_source.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_portworx_volume_source.md deleted file mode 100644 index a9575593..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_portworx_volume_source.md +++ /dev/null @@ -1,18 +0,0 @@ -# portworx_volume_source - -Source: [base/pkg/kusion_kubernetes/api/core/v1/portworx_volume_source.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/portworx_volume_source.k) - -This is the portworx\_volume\_source module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema PortworxVolumeSource - -PortworxVolumeSource represents a Portworx volume resource. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**fsType**
FSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified.|str|Undefined|optional| -|**readOnly**
Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.|bool|Undefined|optional| -|**volumeID**
VolumeID uniquely identifies a Portworx volume|str|Undefined|**required**| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_preferred_scheduling_term.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_preferred_scheduling_term.md deleted file mode 100644 index 29ca99b0..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_preferred_scheduling_term.md +++ /dev/null @@ -1,17 +0,0 @@ -# preferred_scheduling_term - -Source: [base/pkg/kusion_kubernetes/api/core/v1/preferred_scheduling_term.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/preferred_scheduling_term.k) - -This is the preferred\_scheduling\_term module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema PreferredSchedulingTerm - -An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**weight**
Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.|int|Undefined|**required**| -|**preference**
A node selector term, associated with the corresponding weight.|[NodeSelectorTerm](doc_node_selector_term#schema-nodeselectorterm)|Undefined|**required**| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_probe.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_probe.md deleted file mode 100644 index b69ef6a4..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_probe.md +++ /dev/null @@ -1,24 +0,0 @@ -# probe - -Source: [base/pkg/kusion_kubernetes/api/core/v1/probe.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/probe.k) - -This is the probe module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema Probe - -Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**failureThreshold**
Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.|int|Undefined|optional| -|**initialDelaySeconds**
Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle\#container-probes|int|Undefined|optional| -|**periodSeconds**
How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.|int|Undefined|optional| -|**successThreshold**
Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.|int|Undefined|optional| -|**terminationGracePeriodSeconds**
Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.|int|Undefined|optional| -|**timeoutSeconds**
Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle\#container-probes|int|Undefined|optional| -|**exec**
One and only one of the following should be specified. Exec specifies the action to take.|[ExecAction](doc_exec_action#schema-execaction)|Undefined|optional| -|**httpGet**
HTTPGet specifies the http request to perform.|[HTTPGetAction](doc_http_get_action#schema-httpgetaction)|Undefined|optional| -|**tcpSocket**
TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported|[TCPSocketAction](doc_tcp_socket_action#schema-tcpsocketaction)|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_projected_volume_source.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_projected_volume_source.md deleted file mode 100644 index a62d0784..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_projected_volume_source.md +++ /dev/null @@ -1,17 +0,0 @@ -# projected_volume_source - -Source: [base/pkg/kusion_kubernetes/api/core/v1/projected_volume_source.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/projected_volume_source.k) - -This is the projected\_volume\_source module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema ProjectedVolumeSource - -Represents a projected volume source - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**defaultMode**
Mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.|int|Undefined|optional| -|**sources**
list of volume projections|[[v1.VolumeProjection](doc_volume_projection#schema-volumeprojection)]|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_quobyte_volume_source.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_quobyte_volume_source.md deleted file mode 100644 index 5e826327..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_quobyte_volume_source.md +++ /dev/null @@ -1,21 +0,0 @@ -# quobyte_volume_source - -Source: [base/pkg/kusion_kubernetes/api/core/v1/quobyte_volume_source.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/quobyte_volume_source.k) - -This is the quobyte\_volume\_source module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema QuobyteVolumeSource - -Represents a Quobyte mount that lasts the lifetime of a pod. Quobyte volumes do not support ownership management or SELinux relabeling. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**group**
Group to map volume access to Default is no group|str|Undefined|optional| -|**readOnly**
ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false.|bool|Undefined|optional| -|**registry**
Registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes|str|Undefined|**required**| -|**tenant**
Tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin|str|Undefined|optional| -|**user**
User to map volume access to Defaults to serivceaccount user|str|Undefined|optional| -|**volume**
Volume is a string that references an already created Quobyte volume by name.|str|Undefined|**required**| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_rbd_volume_source.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_rbd_volume_source.md deleted file mode 100644 index 83c6aa8b..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_rbd_volume_source.md +++ /dev/null @@ -1,23 +0,0 @@ -# rbd_volume_source - -Source: [base/pkg/kusion_kubernetes/api/core/v1/rbd_volume_source.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/rbd_volume_source.k) - -This is the rbd\_volume\_source module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema RBDVolumeSource - -Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**fsType**
Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes\#rbd|str|Undefined|optional| -|**image**
The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md\#how-to-use-it|str|Undefined|**required**| -|**keyring**
Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md\#how-to-use-it|str|Undefined|optional| -|**monitors**
A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md\#how-to-use-it|[str]|Undefined|**required**| -|**pool**
The rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md\#how-to-use-it|str|Undefined|optional| -|**readOnly**
ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md\#how-to-use-it|bool|Undefined|optional| -|**user**
The rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md\#how-to-use-it|str|Undefined|optional| -|**secretRef**
SecretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://examples.k8s.io/volumes/rbd/README.md\#how-to-use-it|[LocalObjectReference](doc_local_object_reference#schema-localobjectreference)|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_resource_field_selector.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_resource_field_selector.md deleted file mode 100644 index 46db4732..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_resource_field_selector.md +++ /dev/null @@ -1,18 +0,0 @@ -# resource_field_selector - -Source: [base/pkg/kusion_kubernetes/api/core/v1/resource_field_selector.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/resource_field_selector.k) - -This is the resource\_field\_selector module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema ResourceFieldSelector - -ResourceFieldSelector represents container resources (cpu, memory) and their output format - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**containerName**
Container name: required for volumes, optional for env vars|str|Undefined|optional| -|**divisor**
Specifies the output format of the exposed resources, defaults to "1"|str|Undefined|optional| -|**resource**
Required: resource to select|str|Undefined|**required**| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_resource_requirements.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_resource_requirements.md deleted file mode 100644 index 782e675a..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_resource_requirements.md +++ /dev/null @@ -1,17 +0,0 @@ -# resource_requirements - -Source: [base/pkg/kusion_kubernetes/api/core/v1/resource_requirements.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/resource_requirements.k) - -This is the resource\_requirements module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema ResourceRequirements - -ResourceRequirements describes the compute resource requirements. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**limits**
Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/|{str: str}|Undefined|optional| -|**requests**
Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/|{str: str}|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_scale_io_volume_source.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_scale_io_volume_source.md deleted file mode 100644 index 1c1c9f91..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_scale_io_volume_source.md +++ /dev/null @@ -1,25 +0,0 @@ -# scale_io_volume_source - -Source: [base/pkg/kusion_kubernetes/api/core/v1/scale_io_volume_source.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/scale_io_volume_source.k) - -This is the scale\_io\_volume\_source module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema ScaleIOVolumeSource - -ScaleIOVolumeSource represents a persistent ScaleIO volume - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**fsType**
Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Default is "xfs".|str|Undefined|optional| -|**gateway**
The host address of the ScaleIO API Gateway.|str|Undefined|**required**| -|**protectionDomain**
The name of the ScaleIO Protection Domain for the configured storage.|str|Undefined|optional| -|**readOnly**
Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.|bool|Undefined|optional| -|**sslEnabled**
Flag to enable/disable SSL communication with Gateway, default false|bool|Undefined|optional| -|**storageMode**
Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned.|str|Undefined|optional| -|**storagePool**
The ScaleIO Storage Pool associated with the protection domain.|str|Undefined|optional| -|**system**
The name of the storage system as configured in ScaleIO.|str|Undefined|**required**| -|**volumeName**
The name of a volume already created in the ScaleIO system that is associated with this volume source.|str|Undefined|optional| -|**secretRef**
SecretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail.|[LocalObjectReference](doc_local_object_reference#schema-localobjectreference)|Undefined|**required**| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_se_linux_options.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_se_linux_options.md deleted file mode 100644 index 9e01315d..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_se_linux_options.md +++ /dev/null @@ -1,19 +0,0 @@ -# se_linux_options - -Source: [base/pkg/kusion_kubernetes/api/core/v1/se_linux_options.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/se_linux_options.k) - -This is the se\_linux\_options module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema SELinuxOptions - -SELinuxOptions are the labels to be applied to the container - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**level**
Level is SELinux level label that applies to the container.|str|Undefined|optional| -|**role**
Role is a SELinux role label that applies to the container.|str|Undefined|optional| -|**type**
Type is a SELinux type label that applies to the container.|str|Undefined|optional| -|**user**
User is a SELinux user label that applies to the container.|str|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_seccomp_profile.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_seccomp_profile.md deleted file mode 100644 index 682deb39..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_seccomp_profile.md +++ /dev/null @@ -1,17 +0,0 @@ -# seccomp_profile - -Source: [base/pkg/kusion_kubernetes/api/core/v1/seccomp_profile.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/seccomp_profile.k) - -This is the seccomp\_profile module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema SeccompProfile - -SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**localhostProfile**
localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is "Localhost".
$type : str, default is Undefined, required
type indicates which kind of seccomp profile will be applied. Valid options are:|str|Undefined|optional| -|**type**|str|Undefined|**required**| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_secret.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_secret.md deleted file mode 100644 index a47f57d5..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_secret.md +++ /dev/null @@ -1,22 +0,0 @@ -# secret - -Source: [base/pkg/kusion_kubernetes/api/core/v1/secret.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/secret.k) - -This is the secret module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema Secret - -Secret holds secret data of a certain type. The total bytes of the values in the Data field must be less than MaxSecretSize bytes. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**apiVersion**
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md\#resources|"v1"|"v1"|**required**| -|**data**
Data contains the secret data. Each key must consist of alphanumeric characters, '-', '\_' or '.'. The serialized form of the secret data is a base64 encoded string, representing the arbitrary (possibly non-string) data value here. Described in https://tools.ietf.org/html/rfc4648\#section-4|{str: str}|Undefined|optional| -|**immutable**
Immutable, if set to true, ensures that data stored in the Secret cannot be updated (only object metadata can be modified). If not set to true, the field can be modified at any time. Defaulted to nil.|bool|Undefined|optional| -|**kind**
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md\#types-kinds|"Secret"|"Secret"|**required**| -|**stringData**
stringData allows specifying non-binary secret data in string form. It is provided as a write-only input field for convenience. All keys and values are merged into the data field on write, overwriting any existing values. The stringData field is never output when reading from the API.|{str: str}|Undefined|optional| -|**type**
Used to facilitate programmatic handling of secret data.|str|Undefined|optional| -|**metadata**
Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md\#metadata|[apis.ObjectMeta](../../../apimachinery/apis/doc_object_meta#schema-objectmeta)|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_secret_env_source.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_secret_env_source.md deleted file mode 100644 index 0427c84c..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_secret_env_source.md +++ /dev/null @@ -1,17 +0,0 @@ -# secret_env_source - -Source: [base/pkg/kusion_kubernetes/api/core/v1/secret_env_source.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/secret_env_source.k) - -This is the secret\_env\_source module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema SecretEnvSource - -SecretEnvSource selects a Secret to populate the environment variables with.
The contents of the target Secret's Data field will represent the key-value pairs as environment variables. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**name**
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/\#names
optional : bool, default is Undefined, optional
Specify whether the Secret must be defined|str|Undefined|optional| -|**optional**|bool|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_secret_key_selector.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_secret_key_selector.md deleted file mode 100644 index b5c99448..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_secret_key_selector.md +++ /dev/null @@ -1,18 +0,0 @@ -# secret_key_selector - -Source: [base/pkg/kusion_kubernetes/api/core/v1/secret_key_selector.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/secret_key_selector.k) - -This is the secret\_key\_selector module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema SecretKeySelector - -SecretKeySelector selects a key of a Secret. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**key**
The key of the secret to select from. Must be a valid secret key.|str|Undefined|**required**| -|**name**
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/\#names|str|Undefined|optional| -|**optional**
Specify whether the Secret or its key must be defined|bool|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_secret_projection.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_secret_projection.md deleted file mode 100644 index c3079928..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_secret_projection.md +++ /dev/null @@ -1,18 +0,0 @@ -# secret_projection - -Source: [base/pkg/kusion_kubernetes/api/core/v1/secret_projection.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/secret_projection.k) - -This is the secret\_projection module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema SecretProjection - -Adapts a secret into a projected volume.
The contents of the target Secret's Data field will be presented in a projected volume as files using the keys in the Data field as the file names. Note that this is identical to a secret volume source without the default mode. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**items**
If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.
name : str, default is Undefined, optional
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/\#names
optional : bool, default is Undefined, optional
Specify whether the Secret or its key must be defined|[[v1.KeyToPath](doc_key_to_path#schema-keytopath)]|Undefined|optional| -|**name**|str|Undefined|optional| -|**optional**|bool|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_secret_volume_source.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_secret_volume_source.md deleted file mode 100644 index 0a1f8d7a..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_secret_volume_source.md +++ /dev/null @@ -1,19 +0,0 @@ -# secret_volume_source - -Source: [base/pkg/kusion_kubernetes/api/core/v1/secret_volume_source.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/secret_volume_source.k) - -This is the secret\_volume\_source module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema SecretVolumeSource - -Adapts a Secret into a volume.
The contents of the target Secret's Data field will be presented in a volume as files using the keys in the Data field as the file names. Secret volumes support ownership management and SELinux relabeling. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**defaultMode**
Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.
items : [KeyToPath], default is Undefined, optional
If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.
optional : bool, default is Undefined, optional
Specify whether the Secret or its keys must be defined
secretName : str, default is Undefined, optional
Name of the secret in the pod's namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes\#secret|int|Undefined|optional| -|**items**|[[v1.KeyToPath](doc_key_to_path#schema-keytopath)]|Undefined|optional| -|**optional**|bool|Undefined|optional| -|**secretName**|str|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_security_context.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_security_context.md deleted file mode 100644 index c7a8b34b..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_security_context.md +++ /dev/null @@ -1,26 +0,0 @@ -# security_context - -Source: [base/pkg/kusion_kubernetes/api/core/v1/security_context.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/security_context.k) - -This is the security\_context module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema SecurityContext - -SecurityContext holds security configuration that will be applied to a container. Some fields are present in both SecurityContext and PodSecurityContext. When both are set, the values in SecurityContext take precedence. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**allowPrivilegeEscalation**
AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no\_new\_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP\_SYS\_ADMIN|bool|Undefined|optional| -|**privileged**
Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false.|bool|Undefined|optional| -|**procMount**
procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled.|str|Undefined|optional| -|**readOnlyRootFilesystem**
Whether this container has a read-only root filesystem. Default is false.|bool|Undefined|optional| -|**runAsGroup**
The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.|int|Undefined|optional| -|**runAsNonRoot**
Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.|bool|Undefined|optional| -|**runAsUser**
The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.|int|Undefined|optional| -|**capabilities**
The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime.|[Capabilities](doc_capabilities#schema-capabilities)|Undefined|optional| -|**seLinuxOptions**
The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.|[SELinuxOptions](doc_se_linux_options#schema-selinuxoptions)|Undefined|optional| -|**seccompProfile**
The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options.|[SeccompProfile](doc_seccomp_profile#schema-seccompprofile)|Undefined|optional| -|**windowsOptions**
The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.|[WindowsSecurityContextOptions](doc_windows_security_context_options#schema-windowssecuritycontextoptions)|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_service.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_service.md deleted file mode 100644 index 34b6fa8b..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_service.md +++ /dev/null @@ -1,19 +0,0 @@ -# service - -Source: [base/pkg/kusion_kubernetes/api/core/v1/service.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/service.k) - -This is the service module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema Service - -Service is a named abstraction of software service (for example, mysql) consisting of local port (for example 3306) that the proxy listens on, and the selector that determines which pods will answer requests sent through the proxy. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**apiVersion**
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md\#resources|"v1"|"v1"|**required**| -|**kind**
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md\#types-kinds|"Service"|"Service"|**required**| -|**metadata**
Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md\#metadata|[apis.ObjectMeta](../../../apimachinery/apis/doc_object_meta#schema-objectmeta)|Undefined|optional| -|**spec**
Spec defines the behavior of a service. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md\#spec-and-status|[ServiceSpec](doc_service_spec#schema-servicespec)|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_service_account.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_service_account.md deleted file mode 100644 index 89a89865..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_service_account.md +++ /dev/null @@ -1,21 +0,0 @@ -# service_account - -Source: [base/pkg/kusion_kubernetes/api/core/v1/service_account.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/service_account.k) - -This is the service\_account module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema ServiceAccount - -ServiceAccount binds together: \* a name, understood by users, and perhaps by peripheral systems, for an identity \* a principal that can be authenticated and authorized \* a set of secrets - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**apiVersion**
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md\#resources|"v1"|"v1"|**required**| -|**automountServiceAccountToken**
AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted. Can be overridden at the pod level.|bool|Undefined|optional| -|**imagePullSecrets**
ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. More info: https://kubernetes.io/docs/concepts/containers/images/\#specifying-imagepullsecrets-on-a-pod|[[v1.LocalObjectReference](doc_local_object_reference#schema-localobjectreference)]|Undefined|optional| -|**kind**
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md\#types-kinds|"ServiceAccount"|"ServiceAccount"|**required**| -|**secrets**
Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount. More info: https://kubernetes.io/docs/concepts/configuration/secret|[[v1.ObjectReference](doc_object_reference#schema-objectreference)]|Undefined|optional| -|**metadata**
Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md\#metadata|[apis.ObjectMeta](../../../apimachinery/apis/doc_object_meta#schema-objectmeta)|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_service_account_token_projection.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_service_account_token_projection.md deleted file mode 100644 index 37c9d049..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_service_account_token_projection.md +++ /dev/null @@ -1,18 +0,0 @@ -# service_account_token_projection - -Source: [base/pkg/kusion_kubernetes/api/core/v1/service_account_token_projection.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/service_account_token_projection.k) - -This is the service\_account\_token\_projection module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema ServiceAccountTokenProjection - -ServiceAccountTokenProjection represents a projected service account token volume. This projection can be used to insert a service account token into the pods runtime filesystem for use against APIs (Kubernetes API Server or otherwise). - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**audience**
Audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver.|str|Undefined|optional| -|**expirationSeconds**
ExpirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes.|int|Undefined|optional| -|**path**
Path is the path relative to the mount point of the file to project the token into.|str|Undefined|**required**| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_service_port.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_service_port.md deleted file mode 100644 index af7b4465..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_service_port.md +++ /dev/null @@ -1,21 +0,0 @@ -# service_port - -Source: [base/pkg/kusion_kubernetes/api/core/v1/service_port.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/service_port.k) - -This is the service\_port module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema ServicePort - -ServicePort contains information on service's port. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**appProtocol**
The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol.|str|Undefined|optional| -|**name**
The name of this port within the service. This must be a DNS\_LABEL. All ports within a ServiceSpec must have unique names. When considering the endpoints for a Service, this must match the 'name' field in the EndpointPort. Optional if only one ServicePort is defined on this service.|str|Undefined|optional| -|**nodePort**
The port on each node on which this service is exposed when type is NodePort or LoadBalancer. Usually assigned by the system. If a value is specified, in-range, and not in use it will be used, otherwise the operation will fail. If not specified, a port will be allocated if this Service requires one. If this field is specified when creating a Service which does not need it, creation will fail. This field will be wiped when updating a Service to no longer need it (e.g. changing type from NodePort to ClusterIP). More info: https://kubernetes.io/docs/concepts/services-networking/service/\#type-nodeport|int|Undefined|optional| -|**port**
The port that will be exposed by this service.|int|Undefined|**required**| -|**protocol**
The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". Default is TCP.|str|Undefined|optional| -|**targetPort**
Number or name of the port to access on the pods targeted by the service. Number must be in the range 1 to 65535. Name must be an IANA\_SVC\_NAME. If this is a string, it will be looked up as a named port in the target Pod's container ports. If this is not specified, the value of the 'port' field is used (an identity map). This field is ignored for services with clusterIP=None, and should be omitted or set equal to the 'port' field. More info: https://kubernetes.io/docs/concepts/services-networking/service/\#defining-a-service|int \| str|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_service_spec.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_service_spec.md deleted file mode 100644 index 531defdf..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_service_spec.md +++ /dev/null @@ -1,34 +0,0 @@ -# service_spec - -Source: [base/pkg/kusion_kubernetes/api/core/v1/service_spec.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/service_spec.k) - -This is the service\_spec module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema ServiceSpec - -ServiceSpec describes the attributes that a user creates on a service. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**allocateLoadBalancerNodePorts**
allocateLoadBalancerNodePorts defines if NodePorts will be automatically allocated for services with type LoadBalancer. Default is "true". It may be set to "false" if the cluster load-balancer does not rely on NodePorts. If the caller requests specific NodePorts (by specifying a value), those requests will be respected, regardless of this field. This field may only be set for services with type LoadBalancer and will be cleared if the type is changed to any other type. This field is beta-level and is only honored by servers that enable the ServiceLBNodePortControl feature.
clusterIP : str, default is Undefined, optional
clusterIP is the IP address of the service and is usually assigned randomly. If an address is specified manually, is in-range (as per system configuration), and is not in use, it will be allocated to the service; otherwise creation of the service will fail. This field may not be changed through updates unless the type field is also being changed to ExternalName (which requires this field to be blank) or the type field is being changed from ExternalName (in which case this field may optionally be specified, as describe above). Valid values are "None", empty string (""), or a valid IP address. Setting this to "None" makes a "headless service" (no virtual IP), which is useful when direct endpoint connections are preferred and proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. If this field is specified when creating a Service of type ExternalName, creation will fail. This field will be wiped when updating a Service to type ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/\#virtual-ips-and-service-proxies
clusterIPs : [str], default is Undefined, optional
ClusterIPs is a list of IP addresses assigned to this service, and are usually assigned randomly. If an address is specified manually, is in-range (as per system configuration), and is not in use, it will be allocated to the service; otherwise creation of the service will fail. This field may not be changed through updates unless the type field is also being changed to ExternalName (which requires this field to be empty) or the type field is being changed from ExternalName (in which case this field may optionally be specified, as describe above). Valid values are "None", empty string (""), or a valid IP address. Setting this to "None" makes a "headless service" (no virtual IP), which is useful when direct endpoint connections are preferred and proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. If this field is specified when creating a Service of type ExternalName, creation will fail. This field will be wiped when updating a Service to type ExternalName. If this field is not specified, it will be initialized from the clusterIP field. If this field is specified, clients must ensure that clusterIPs[0] and clusterIP have the same value.|bool|Undefined|optional| -|**clusterIP**|str|Undefined|optional| -|**clusterIPs**|[str]|Undefined|optional| -|**externalIPs**|[str]|Undefined|optional| -|**externalName**|str|Undefined|optional| -|**externalTrafficPolicy**|str|Undefined|optional| -|**healthCheckNodePort**|int|Undefined|optional| -|**internalTrafficPolicy**|str|Undefined|optional| -|**ipFamilies**|[str]|Undefined|optional| -|**ipFamilyPolicy**|str|Undefined|optional| -|**loadBalancerClass**|str|Undefined|optional| -|**loadBalancerIP**|str|Undefined|optional| -|**loadBalancerSourceRanges**|[str]|Undefined|optional| -|**ports**|[[v1.ServicePort](doc_service_port#schema-serviceport)]|Undefined|optional| -|**publishNotReadyAddresses**|bool|Undefined|optional| -|**selector**|{str: str}|Undefined|optional| -|**sessionAffinity**|str|Undefined|optional| -|**type**|str|Undefined|optional| -|**sessionAffinityConfig**|[SessionAffinityConfig](doc_session_affinity_config#schema-sessionaffinityconfig)|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_session_affinity_config.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_session_affinity_config.md deleted file mode 100644 index 29dfe0ef..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_session_affinity_config.md +++ /dev/null @@ -1,16 +0,0 @@ -# session_affinity_config - -Source: [base/pkg/kusion_kubernetes/api/core/v1/session_affinity_config.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/session_affinity_config.k) - -This is the session\_affinity\_config module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema SessionAffinityConfig - -SessionAffinityConfig represents the configurations of session affinity. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**clientIP**
clientIP contains the configurations of Client IP based session affinity.|[ClientIPConfig](doc_client_ip_config#schema-clientipconfig)|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_storage_os_volume_source.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_storage_os_volume_source.md deleted file mode 100644 index 68a1ccde..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_storage_os_volume_source.md +++ /dev/null @@ -1,20 +0,0 @@ -# storage_os_volume_source - -Source: [base/pkg/kusion_kubernetes/api/core/v1/storage_os_volume_source.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/storage_os_volume_source.k) - -This is the storage\_os\_volume\_source module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema StorageOSVolumeSource - -Represents a StorageOS persistent volume resource. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**fsType**
Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.|str|Undefined|optional| -|**readOnly**
Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.|bool|Undefined|optional| -|**volumeName**
VolumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace.|str|Undefined|optional| -|**volumeNamespace**
VolumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to "default" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.|str|Undefined|optional| -|**secretRef**
SecretRef specifies the secret to use for obtaining the StorageOS API credentials. If not specified, default values will be attempted.|[LocalObjectReference](doc_local_object_reference#schema-localobjectreference)|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_sysctl.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_sysctl.md deleted file mode 100644 index 4c24dfec..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_sysctl.md +++ /dev/null @@ -1,17 +0,0 @@ -# sysctl - -Source: [base/pkg/kusion_kubernetes/api/core/v1/sysctl.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/sysctl.k) - -This is the sysctl module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema Sysctl - -Sysctl defines a kernel parameter to be set - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**name**
Name of a property to set|str|Undefined|**required**| -|**value**
Value of a property to set|str|Undefined|**required**| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_taint.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_taint.md deleted file mode 100644 index 60a35287..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_taint.md +++ /dev/null @@ -1,19 +0,0 @@ -# taint - -Source: [base/pkg/kusion_kubernetes/api/core/v1/taint.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/taint.k) - -This is the taint module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema Taint - -The node this Taint is attached to has the "effect" on any pod that does not tolerate the Taint. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**effect**
Required. The effect of the taint on pods that do not tolerate the taint. Valid effects are NoSchedule, PreferNoSchedule and NoExecute.|str|Undefined|**required**| -|**key**
Required. The taint key to be applied to a node.|str|Undefined|**required**| -|**timeAdded**
TimeAdded represents the time at which the taint was added. It is only written for NoExecute taints.|str|Undefined|optional| -|**value**
The taint value corresponding to the taint key.|str|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_tcp_socket_action.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_tcp_socket_action.md deleted file mode 100644 index 456be470..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_tcp_socket_action.md +++ /dev/null @@ -1,17 +0,0 @@ -# tcp_socket_action - -Source: [base/pkg/kusion_kubernetes/api/core/v1/tcp_socket_action.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/tcp_socket_action.k) - -This is the tcp\_socket\_action module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema TCPSocketAction - -TCPSocketAction describes an action based on opening a socket - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**host**
Optional: Host name to connect to, defaults to the pod IP.|str|Undefined|optional| -|**port**
Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA\_SVC\_NAME.|int \| str|Undefined|**required**| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_toleration.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_toleration.md deleted file mode 100644 index a8a5d0d8..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_toleration.md +++ /dev/null @@ -1,20 +0,0 @@ -# toleration - -Source: [base/pkg/kusion_kubernetes/api/core/v1/toleration.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/toleration.k) - -This is the toleration module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema Toleration - -The pod this Toleration is attached to tolerates any taint that matches the triple \ using the matching operator \. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**effect**
Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.|str|Undefined|optional| -|**key**
Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.|str|Undefined|optional| -|**operator**
Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.|str|Undefined|optional| -|**tolerationSeconds**
TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.|int|Undefined|optional| -|**value**
Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.|str|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_topology_spread_constraint.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_topology_spread_constraint.md deleted file mode 100644 index afdb8820..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_topology_spread_constraint.md +++ /dev/null @@ -1,19 +0,0 @@ -# topology_spread_constraint - -Source: [base/pkg/kusion_kubernetes/api/core/v1/topology_spread_constraint.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/topology_spread_constraint.k) - -This is the topology\_spread\_constraint module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema TopologySpreadConstraint - -TopologySpreadConstraint specifies how to spread matching pods among the given topology. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**maxSkew**
MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 1/1/0: | zone1 | zone2 | zone3 | | P | P | | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It's a required field. Default value is 1 and 0 is not allowed.
topologyKey : str, default is Undefined, required
TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each \ as a "bucket", and try to put balanced number of pods into each bucket. It's a required field.
whenUnsatisfiable : str, default is Undefined, required
WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location,|int|Undefined|**required**| -|**topologyKey**|str|Undefined|**required**| -|**whenUnsatisfiable**|str|Undefined|**required**| -|**labelSelector**|[apis.LabelSelector](../../../apimachinery/apis/doc_label_selector#schema-labelselector)|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_typed_local_object_reference.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_typed_local_object_reference.md deleted file mode 100644 index fabc7874..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_typed_local_object_reference.md +++ /dev/null @@ -1,18 +0,0 @@ -# typed_local_object_reference - -Source: [base/pkg/kusion_kubernetes/api/core/v1/typed_local_object_reference.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/typed_local_object_reference.k) - -This is the typed\_local\_object\_reference module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema TypedLocalObjectReference - -TypedLocalObjectReference contains enough information to let you locate the typed referenced object inside the same namespace. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**apiGroup**
APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.|str|Undefined|optional| -|**kind**
Kind is the type of resource being referenced|str|Undefined|**required**| -|**name**
Name is the name of resource being referenced|str|Undefined|**required**| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_volume.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_volume.md deleted file mode 100644 index 3eb6ab02..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_volume.md +++ /dev/null @@ -1,45 +0,0 @@ -# volume - -Source: [base/pkg/kusion_kubernetes/api/core/v1/volume.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/volume.k) - -This is the volume module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema Volume - -Volume represents a named volume in a pod that may be accessed by any container in the pod. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**name**
Volume's name. Must be a DNS\_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/\#names
awsElasticBlockStore : AWSElasticBlockStoreVolumeSource, default is Undefined, optional
AWSElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes\#awselasticblockstore
azureDisk : AzureDiskVolumeSource, default is Undefined, optional
AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
azureFile : AzureFileVolumeSource, default is Undefined, optional
AzureFile represents an Azure File Service mount on the host and bind mount to the pod.
cephfs : CephFSVolumeSource, default is Undefined, optional
CephFS represents a Ceph FS mount on the host that shares a pod's lifetime
cinder : CinderVolumeSource, default is Undefined, optional
Cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md
configMap : ConfigMapVolumeSource, default is Undefined, optional
ConfigMap represents a configMap that should populate this volume
csi : CSIVolumeSource, default is Undefined, optional
CSI (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature).
downwardAPI : DownwardAPIVolumeSource, default is Undefined, optional
DownwardAPI represents downward API about the pod that should populate this volume
emptyDir : EmptyDirVolumeSource, default is Undefined, optional
EmptyDir represents a temporary directory that shares a pod's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes\#emptydir
ephemeral : EphemeralVolumeSource, default is Undefined, optional
Ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed.|str|Undefined|**required**| -|**awsElasticBlockStore**|[AWSElasticBlockStoreVolumeSource](doc_aws_elastic_block_store_volume_source#schema-awselasticblockstorevolumesource)|Undefined|optional| -|**azureDisk**|[AzureDiskVolumeSource](doc_azure_disk_volume_source#schema-azurediskvolumesource)|Undefined|optional| -|**azureFile**|[AzureFileVolumeSource](doc_azure_file_volume_source#schema-azurefilevolumesource)|Undefined|optional| -|**cephfs**|[CephFSVolumeSource](doc_ceph_fs_volume_source#schema-cephfsvolumesource)|Undefined|optional| -|**cinder**|[CinderVolumeSource](doc_cinder_volume_source#schema-cindervolumesource)|Undefined|optional| -|**configMap**|[ConfigMapVolumeSource](doc_config_map_volume_source#schema-configmapvolumesource)|Undefined|optional| -|**csi**|[CSIVolumeSource](doc_csi_volume_source#schema-csivolumesource)|Undefined|optional| -|**downwardAPI**|[DownwardAPIVolumeSource](doc_downward_api_volume_source#schema-downwardapivolumesource)|Undefined|optional| -|**emptyDir**|[EmptyDirVolumeSource](doc_empty_dir_volume_source#schema-emptydirvolumesource)|Undefined|optional| -|**ephemeral**|[EphemeralVolumeSource](doc_ephemeral_volume_source#schema-ephemeralvolumesource)|Undefined|optional| -|**fc**|[FCVolumeSource](doc_fc_volume_source#schema-fcvolumesource)|Undefined|optional| -|**flexVolume**|[FlexVolumeSource](doc_flex_volume_source#schema-flexvolumesource)|Undefined|optional| -|**flocker**|[FlockerVolumeSource](doc_flocker_volume_source#schema-flockervolumesource)|Undefined|optional| -|**gcePersistentDisk**|[GCEPersistentDiskVolumeSource](doc_gce_persistent_disk_volume_source#schema-gcepersistentdiskvolumesource)|Undefined|optional| -|**gitRepo**|[GitRepoVolumeSource](doc_git_repo_volume_source#schema-gitrepovolumesource)|Undefined|optional| -|**glusterfs**|[GlusterfsVolumeSource](doc_glusterfs_volume_source#schema-glusterfsvolumesource)|Undefined|optional| -|**hostPath**|[HostPathVolumeSource](doc_host_path_volume_source#schema-hostpathvolumesource)|Undefined|optional| -|**iscsi**|[ISCSIVolumeSource](doc_iscsi_volume_source#schema-iscsivolumesource)|Undefined|optional| -|**nfs**|[NFSVolumeSource](doc_nfs_volume_source#schema-nfsvolumesource)|Undefined|optional| -|**persistentVolumeClaim**|[PersistentVolumeClaimVolumeSource](doc_persistent_volume_claim_volume_source#schema-persistentvolumeclaimvolumesource)|Undefined|optional| -|**photonPersistentDisk**|[PhotonPersistentDiskVolumeSource](doc_photon_persistent_disk_volume_source#schema-photonpersistentdiskvolumesource)|Undefined|optional| -|**portworxVolume**|[PortworxVolumeSource](doc_portworx_volume_source#schema-portworxvolumesource)|Undefined|optional| -|**projected**|[ProjectedVolumeSource](doc_projected_volume_source#schema-projectedvolumesource)|Undefined|optional| -|**quobyte**|[QuobyteVolumeSource](doc_quobyte_volume_source#schema-quobytevolumesource)|Undefined|optional| -|**rbd**|[RBDVolumeSource](doc_rbd_volume_source#schema-rbdvolumesource)|Undefined|optional| -|**scaleIO**|[ScaleIOVolumeSource](doc_scale_io_volume_source#schema-scaleiovolumesource)|Undefined|optional| -|**secret**|[SecretVolumeSource](doc_secret_volume_source#schema-secretvolumesource)|Undefined|optional| -|**storageos**|[StorageOSVolumeSource](doc_storage_os_volume_source#schema-storageosvolumesource)|Undefined|optional| -|**vsphereVolume**|[VsphereVirtualDiskVolumeSource](doc_vsphere_virtual_disk_volume_source#schema-vspherevirtualdiskvolumesource)|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_volume_device.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_volume_device.md deleted file mode 100644 index c968b66b..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_volume_device.md +++ /dev/null @@ -1,17 +0,0 @@ -# volume_device - -Source: [base/pkg/kusion_kubernetes/api/core/v1/volume_device.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/volume_device.k) - -This is the volume\_device module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema VolumeDevice - -volumeDevice describes a mapping of a raw block device within a container. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**devicePath**
devicePath is the path inside of the container that the device will be mapped to.|str|Undefined|**required**| -|**name**
name must match the name of a persistentVolumeClaim in the pod|str|Undefined|**required**| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_volume_mount.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_volume_mount.md deleted file mode 100644 index 57c4e0ed..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_volume_mount.md +++ /dev/null @@ -1,21 +0,0 @@ -# volume_mount - -Source: [base/pkg/kusion_kubernetes/api/core/v1/volume_mount.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/volume_mount.k) - -This is the volume\_mount module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema VolumeMount - -VolumeMount describes a mounting of a Volume within a container. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**mountPath**
Path within the container at which the volume should be mounted. Must not contain ':'.|str|Undefined|**required**| -|**mountPropagation**
mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10.|str|Undefined|optional| -|**name**
This must match the Name of a Volume.|str|Undefined|**required**| -|**readOnly**
Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.|bool|Undefined|optional| -|**subPath**
Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root).|str|Undefined|optional| -|**subPathExpr**
Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR\_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive.|str|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_volume_projection.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_volume_projection.md deleted file mode 100644 index 95573829..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_volume_projection.md +++ /dev/null @@ -1,19 +0,0 @@ -# volume_projection - -Source: [base/pkg/kusion_kubernetes/api/core/v1/volume_projection.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/volume_projection.k) - -This is the volume\_projection module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema VolumeProjection - -Projection that may be projected along with other supported volume types - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**configMap**
information about the configMap data to project|[ConfigMapProjection](doc_config_map_projection#schema-configmapprojection)|Undefined|optional| -|**downwardAPI**
information about the downwardAPI data to project|[DownwardAPIProjection](doc_downward_api_projection#schema-downwardapiprojection)|Undefined|optional| -|**secret**
information about the secret data to project|[SecretProjection](doc_secret_projection#schema-secretprojection)|Undefined|optional| -|**serviceAccountToken**
information about the serviceAccountToken data to project|[ServiceAccountTokenProjection](doc_service_account_token_projection#schema-serviceaccounttokenprojection)|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_vsphere_virtual_disk_volume_source.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_vsphere_virtual_disk_volume_source.md deleted file mode 100644 index e86fc5dd..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_vsphere_virtual_disk_volume_source.md +++ /dev/null @@ -1,19 +0,0 @@ -# vsphere_virtual_disk_volume_source - -Source: [base/pkg/kusion_kubernetes/api/core/v1/vsphere_virtual_disk_volume_source.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/vsphere_virtual_disk_volume_source.k) - -This is the vsphere\_virtual\_disk\_volume\_source module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema VsphereVirtualDiskVolumeSource - -Represents a vSphere volume resource. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**fsType**
Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.|str|Undefined|optional| -|**storagePolicyID**
Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName.|str|Undefined|optional| -|**storagePolicyName**
Storage Policy Based Management (SPBM) profile name.|str|Undefined|optional| -|**volumePath**
Path that identifies vSphere volume vmdk|str|Undefined|**required**| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_weighted_pod_affinity_term.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_weighted_pod_affinity_term.md deleted file mode 100644 index 9e8442a6..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_weighted_pod_affinity_term.md +++ /dev/null @@ -1,17 +0,0 @@ -# weighted_pod_affinity_term - -Source: [base/pkg/kusion_kubernetes/api/core/v1/weighted_pod_affinity_term.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/weighted_pod_affinity_term.k) - -This is the weighted\_pod\_affinity\_term module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema WeightedPodAffinityTerm - -The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**weight**
weight associated with matching the corresponding podAffinityTerm, in the range 1-100.|int|Undefined|**required**| -|**podAffinityTerm**
Required. A pod affinity term, associated with the corresponding weight.|[PodAffinityTerm](doc_pod_affinity_term#schema-podaffinityterm)|Undefined|**required**| - diff --git a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_windows_security_context_options.md b/docs/reference/model/kusion_kubernetes/api/core/v1/doc_windows_security_context_options.md deleted file mode 100644 index 7260a2b3..00000000 --- a/docs/reference/model/kusion_kubernetes/api/core/v1/doc_windows_security_context_options.md +++ /dev/null @@ -1,19 +0,0 @@ -# windows_security_context_options - -Source: [base/pkg/kusion_kubernetes/api/core/v1/windows_security_context_options.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/core/v1/windows_security_context_options.k) - -This is the windows\_security\_context\_options module in kusion\_kubernetes.api.core.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema WindowsSecurityContextOptions - -WindowsSecurityContextOptions contain Windows-specific options and credentials. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**gmsaCredentialSpec**
GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field.|str|Undefined|optional| -|**gmsaCredentialSpecName**
GMSACredentialSpecName is the name of the GMSA credential spec to use.|str|Undefined|optional| -|**hostProcess**
HostProcess determines if a container should be run as a 'Host Process' container. This field is alpha-level and will only be honored by components that enable the WindowsHostProcessContainers feature flag. Setting this field without the feature flag will result in errors when validating the Pod. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.|bool|Undefined|optional| -|**runAsUserName**
The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.|str|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/networking/v1/doc_http_ingress_path.md b/docs/reference/model/kusion_kubernetes/api/networking/v1/doc_http_ingress_path.md deleted file mode 100644 index 3643e7fa..00000000 --- a/docs/reference/model/kusion_kubernetes/api/networking/v1/doc_http_ingress_path.md +++ /dev/null @@ -1,18 +0,0 @@ -# http_ingress_path - -Source: [base/pkg/kusion_kubernetes/api/networking/v1/http_ingress_path.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/networking/v1/http_ingress_path.k) - -This is the http\_ingress\_path module in kusion\_kubernetes.api.networking.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema HTTPIngressPath - -HTTPIngressPath associates a path with a backend. Incoming urls matching the path are forwarded to the backend. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**path**
Path is matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional "path" part of a URL as defined by RFC 3986. Paths must begin with a '/' and must be present when using PathType with value "Exact" or "Prefix".
pathType : str, default is Undefined, required
PathType determines the interpretation of the Path matching. PathType can be one of the following values: \* Exact: Matches the URL path exactly. \* Prefix: Matches based on a URL path prefix split by '/'. Matching is
done on a path element by element basis. A path element refers is the
list of labels in the path split by the '/' separator. A request is a
match for path p if every p is an element-wise prefix of p of the
request path. Note that if the last element of the path is a substring
of the last element in request path, it is not a match (e.g. /foo/bar
matches /foo/bar/baz, but does not match /foo/barbaz).|str|Undefined|optional| -|**pathType**|str|Undefined|**required**| -|**backend**|[IngressBackend](doc_ingress_backend#schema-ingressbackend)|Undefined|**required**| - diff --git a/docs/reference/model/kusion_kubernetes/api/networking/v1/doc_http_ingress_rule_value.md b/docs/reference/model/kusion_kubernetes/api/networking/v1/doc_http_ingress_rule_value.md deleted file mode 100644 index b2bc837a..00000000 --- a/docs/reference/model/kusion_kubernetes/api/networking/v1/doc_http_ingress_rule_value.md +++ /dev/null @@ -1,16 +0,0 @@ -# http_ingress_rule_value - -Source: [base/pkg/kusion_kubernetes/api/networking/v1/http_ingress_rule_value.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/networking/v1/http_ingress_rule_value.k) - -This is the http\_ingress\_rule\_value module in kusion\_kubernetes.api.networking.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema HTTPIngressRuleValue - -HTTPIngressRuleValue is a list of http selectors pointing to backends. In the example: http://\/\?\ -\> backend where where parts of the url correspond to RFC 3986, this resource will be used to match against everything after the last '/' and before the first '?' or '\#'. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**paths**
A collection of paths that map requests to backends.|[[v1.HTTPIngressPath](doc_http_ingress_path#schema-httpingresspath)]|Undefined|**required**| - diff --git a/docs/reference/model/kusion_kubernetes/api/networking/v1/doc_ingress.md b/docs/reference/model/kusion_kubernetes/api/networking/v1/doc_ingress.md deleted file mode 100644 index b61458f0..00000000 --- a/docs/reference/model/kusion_kubernetes/api/networking/v1/doc_ingress.md +++ /dev/null @@ -1,19 +0,0 @@ -# ingress - -Source: [base/pkg/kusion_kubernetes/api/networking/v1/ingress.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/networking/v1/ingress.k) - -This is the ingress module in kusion\_kubernetes.api.networking.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema Ingress - -Ingress is a collection of rules that allow inbound connections to reach the endpoints defined by a backend. An Ingress can be configured to give services externally-reachable urls, load balance traffic, terminate SSL, offer name based virtual hosting etc. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**apiVersion**
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md\#resources|"networking.k8s.io/v1"|"networking.k8s.io/v1"|**required**| -|**kind**
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md\#types-kinds|"Ingress"|"Ingress"|**required**| -|**metadata**
Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md\#metadata|[apis.ObjectMeta](../../../apimachinery/apis/doc_object_meta#schema-objectmeta)|Undefined|optional| -|**spec**
Spec is the desired state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md\#spec-and-status|[IngressSpec](doc_ingress_spec#schema-ingressspec)|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/networking/v1/doc_ingress_backend.md b/docs/reference/model/kusion_kubernetes/api/networking/v1/doc_ingress_backend.md deleted file mode 100644 index 30c9dbf4..00000000 --- a/docs/reference/model/kusion_kubernetes/api/networking/v1/doc_ingress_backend.md +++ /dev/null @@ -1,17 +0,0 @@ -# ingress_backend - -Source: [base/pkg/kusion_kubernetes/api/networking/v1/ingress_backend.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/networking/v1/ingress_backend.k) - -This is the ingress\_backend module in kusion\_kubernetes.api.networking.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema IngressBackend - -IngressBackend describes all endpoints for a given service and port. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**resource**
Resource is an ObjectRef to another Kubernetes resource in the namespace of the Ingress object. If resource is specified, a service.Name and service.Port must not be specified. This is a mutually exclusive setting with "Service".|[v1.TypedLocalObjectReference](../../core/v1/doc_typed_local_object_reference#schema-typedlocalobjectreference)|Undefined|optional| -|**service**
Service references a Service as a Backend. This is a mutually exclusive setting with "Resource".|[IngressServiceBackend](doc_ingress_service_backend#schema-ingressservicebackend)|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/networking/v1/doc_ingress_rule.md b/docs/reference/model/kusion_kubernetes/api/networking/v1/doc_ingress_rule.md deleted file mode 100644 index 6b226e85..00000000 --- a/docs/reference/model/kusion_kubernetes/api/networking/v1/doc_ingress_rule.md +++ /dev/null @@ -1,17 +0,0 @@ -# ingress_rule - -Source: [base/pkg/kusion_kubernetes/api/networking/v1/ingress_rule.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/networking/v1/ingress_rule.k) - -This is the ingress\_rule module in kusion\_kubernetes.api.networking.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema IngressRule - -IngressRule represents the rules mapping the paths under a specified host to the related backend services. Incoming requests are first evaluated for a host match, then routed to the backend associated with the matching IngressRuleValue. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**host**
Host is the fully qualified domain name of a network host, as defined by RFC 3986. Note the following deviations from the "host" part of the URI as defined in RFC 3986: 1. IPs are not allowed. Currently an IngressRuleValue can only apply to
the IP in the Spec of the parent Ingress.|str|Undefined|optional| -|**http**|[HTTPIngressRuleValue](doc_http_ingress_rule_value#schema-httpingressrulevalue)|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/networking/v1/doc_ingress_service_backend.md b/docs/reference/model/kusion_kubernetes/api/networking/v1/doc_ingress_service_backend.md deleted file mode 100644 index b2a81459..00000000 --- a/docs/reference/model/kusion_kubernetes/api/networking/v1/doc_ingress_service_backend.md +++ /dev/null @@ -1,17 +0,0 @@ -# ingress_service_backend - -Source: [base/pkg/kusion_kubernetes/api/networking/v1/ingress_service_backend.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/networking/v1/ingress_service_backend.k) - -This is the ingress\_service\_backend module in kusion\_kubernetes.api.networking.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema IngressServiceBackend - -IngressServiceBackend references a Kubernetes Service as a Backend. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**name**
Name is the referenced service. The service must exist in the same namespace as the Ingress object.|str|Undefined|**required**| -|**port**
Port of the referenced service. A port name or port number is required for a IngressServiceBackend.|[ServiceBackendPort](doc_service_backend_port#schema-servicebackendport)|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/networking/v1/doc_ingress_spec.md b/docs/reference/model/kusion_kubernetes/api/networking/v1/doc_ingress_spec.md deleted file mode 100644 index f4c11b5a..00000000 --- a/docs/reference/model/kusion_kubernetes/api/networking/v1/doc_ingress_spec.md +++ /dev/null @@ -1,19 +0,0 @@ -# ingress_spec - -Source: [base/pkg/kusion_kubernetes/api/networking/v1/ingress_spec.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/networking/v1/ingress_spec.k) - -This is the ingress\_spec module in kusion\_kubernetes.api.networking.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema IngressSpec - -IngressSpec describes the Ingress the user wishes to exist. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**ingressClassName**
IngressClassName is the name of the IngressClass cluster resource. The associated IngressClass defines which controller will implement the resource. This replaces the deprecated `kubernetes.io/ingress.class` annotation. For backwards compatibility, when that annotation is set, it must be given precedence over this field. The controller may emit a warning if the field and annotation have different values. Implementations of this API should ignore Ingresses without a class specified. An IngressClass resource may be marked as default, which can be used to set a default value for this field. For more information, refer to the IngressClass documentation.|str|Undefined|optional| -|**rules**
A list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend.|[[v1.IngressRule](doc_ingress_rule#schema-ingressrule)]|Undefined|optional| -|**tls**
TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI.|[[v1.IngressTLS](doc_ingress_tls#schema-ingresstls)]|Undefined|optional| -|**defaultBackend**
DefaultBackend is the backend that should handle requests that don't match any rule. If Rules are not specified, DefaultBackend must be specified. If DefaultBackend is not set, the handling of requests that do not match any of the rules will be up to the Ingress controller.|[IngressBackend](doc_ingress_backend#schema-ingressbackend)|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/networking/v1/doc_ingress_tls.md b/docs/reference/model/kusion_kubernetes/api/networking/v1/doc_ingress_tls.md deleted file mode 100644 index 6106717e..00000000 --- a/docs/reference/model/kusion_kubernetes/api/networking/v1/doc_ingress_tls.md +++ /dev/null @@ -1,17 +0,0 @@ -# ingress_tls - -Source: [base/pkg/kusion_kubernetes/api/networking/v1/ingress_tls.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/networking/v1/ingress_tls.k) - -This is the ingress\_tls module in kusion\_kubernetes.api.networking.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema IngressTLS - -IngressTLS describes the transport layer security associated with an Ingress. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**hosts**
Hosts are a list of hosts included in the TLS certificate. The values in this list must match the name/s used in the tlsSecret. Defaults to the wildcard host setting for the loadbalancer controller fulfilling this Ingress, if left unspecified.|[str]|Undefined|optional| -|**secretName**
SecretName is the name of the secret used to terminate TLS traffic on port 443. Field is left optional to allow TLS routing based on SNI hostname alone. If the SNI host in a listener conflicts with the "Host" header field used by an IngressRule, the SNI host is used for termination and value of the Host header is used for routing.|str|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/networking/v1/doc_service_backend_port.md b/docs/reference/model/kusion_kubernetes/api/networking/v1/doc_service_backend_port.md deleted file mode 100644 index b32177d0..00000000 --- a/docs/reference/model/kusion_kubernetes/api/networking/v1/doc_service_backend_port.md +++ /dev/null @@ -1,17 +0,0 @@ -# service_backend_port - -Source: [base/pkg/kusion_kubernetes/api/networking/v1/service_backend_port.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/networking/v1/service_backend_port.k) - -This is the service\_backend\_port module in kusion\_kubernetes.api.networking.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema ServiceBackendPort - -ServiceBackendPort is the service port being referenced. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**name**
Name is the name of the port on the Service. This is a mutually exclusive setting with "Number".|str|Undefined|optional| -|**number**
Number is the numerical port number (e.g. 80) on the Service. This is a mutually exclusive setting with "Name".|int|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/rbac/v1/doc_aggregation_rule.md b/docs/reference/model/kusion_kubernetes/api/rbac/v1/doc_aggregation_rule.md deleted file mode 100644 index 192d66c2..00000000 --- a/docs/reference/model/kusion_kubernetes/api/rbac/v1/doc_aggregation_rule.md +++ /dev/null @@ -1,16 +0,0 @@ -# aggregation_rule - -Source: [base/pkg/kusion_kubernetes/api/rbac/v1/aggregation_rule.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/rbac/v1/aggregation_rule.k) - -This is the aggregation\_rule module in kusion\_kubernetes.api.rbac.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema AggregationRule - -AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**clusterRoleSelectors**
ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. If any of the selectors match, then the ClusterRole's permissions will be added|[[apis.LabelSelector](../../../apimachinery/apis/doc_label_selector#schema-labelselector)]|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/rbac/v1/doc_cluster_role.md b/docs/reference/model/kusion_kubernetes/api/rbac/v1/doc_cluster_role.md deleted file mode 100644 index bc44a0a4..00000000 --- a/docs/reference/model/kusion_kubernetes/api/rbac/v1/doc_cluster_role.md +++ /dev/null @@ -1,20 +0,0 @@ -# cluster_role - -Source: [base/pkg/kusion_kubernetes/api/rbac/v1/cluster_role.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/rbac/v1/cluster_role.k) - -This is the cluster\_role module in kusion\_kubernetes.api.rbac.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema ClusterRole - -ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**apiVersion**
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md\#resources|"rbac.authorization.k8s.io/v1"|"rbac.authorization.k8s.io/v1"|**required**| -|**kind**
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md\#types-kinds|"ClusterRole"|"ClusterRole"|**required**| -|**rules**
Rules holds all the PolicyRules for this ClusterRole|[[v1.PolicyRule](doc_policy_rule#schema-policyrule)]|Undefined|optional| -|**aggregationRule**
AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be stomped by the controller.|[AggregationRule](doc_aggregation_rule#schema-aggregationrule)|Undefined|optional| -|**metadata**
Standard object's metadata.|[apis.ObjectMeta](../../../apimachinery/apis/doc_object_meta#schema-objectmeta)|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/rbac/v1/doc_cluster_role_binding.md b/docs/reference/model/kusion_kubernetes/api/rbac/v1/doc_cluster_role_binding.md deleted file mode 100644 index 038f9353..00000000 --- a/docs/reference/model/kusion_kubernetes/api/rbac/v1/doc_cluster_role_binding.md +++ /dev/null @@ -1,20 +0,0 @@ -# cluster_role_binding - -Source: [base/pkg/kusion_kubernetes/api/rbac/v1/cluster_role_binding.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/rbac/v1/cluster_role_binding.k) - -This is the cluster\_role\_binding module in kusion\_kubernetes.api.rbac.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema ClusterRoleBinding - -ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, and adds who information via Subject. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**apiVersion**
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md\#resources|"rbac.authorization.k8s.io/v1"|"rbac.authorization.k8s.io/v1"|**required**| -|**kind**
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md\#types-kinds|"ClusterRoleBinding"|"ClusterRoleBinding"|**required**| -|**subjects**
Subjects holds references to the objects the role applies to.|[[v1.Subject](doc_subject#schema-subject)]|Undefined|optional| -|**metadata**
Standard object's metadata.|[apis.ObjectMeta](../../../apimachinery/apis/doc_object_meta#schema-objectmeta)|Undefined|optional| -|**roleRef**
RoleRef can only reference a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error.|[RoleRef](doc_role_ref#schema-roleref)|Undefined|**required**| - diff --git a/docs/reference/model/kusion_kubernetes/api/rbac/v1/doc_policy_rule.md b/docs/reference/model/kusion_kubernetes/api/rbac/v1/doc_policy_rule.md deleted file mode 100644 index dfe69d4c..00000000 --- a/docs/reference/model/kusion_kubernetes/api/rbac/v1/doc_policy_rule.md +++ /dev/null @@ -1,20 +0,0 @@ -# policy_rule - -Source: [base/pkg/kusion_kubernetes/api/rbac/v1/policy_rule.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/rbac/v1/policy_rule.k) - -This is the policy\_rule module in kusion\_kubernetes.api.rbac.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema PolicyRule - -PolicyRule holds information that describes a policy rule, but does not contain information about who the rule applies to or which namespace the rule applies to. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**apiGroups**
APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed.|[str]|Undefined|optional| -|**nonResourceURLs**
NonResourceURLs is a set of partial urls that a user should have access to. \*s are allowed, but only as the full, final step in the path Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. Rules can either apply to API resources (such as "pods" or "secrets") or non-resource URL paths (such as "/api"), but not both.|[str]|Undefined|optional| -|**resourceNames**
ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.|[str]|Undefined|optional| -|**resources**
Resources is a list of resources this rule applies to. '\*' represents all resources.|[str]|Undefined|optional| -|**verbs**
Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. '\*' represents all verbs.|[str]|Undefined|**required**| - diff --git a/docs/reference/model/kusion_kubernetes/api/rbac/v1/doc_role.md b/docs/reference/model/kusion_kubernetes/api/rbac/v1/doc_role.md deleted file mode 100644 index 8f4614d5..00000000 --- a/docs/reference/model/kusion_kubernetes/api/rbac/v1/doc_role.md +++ /dev/null @@ -1,19 +0,0 @@ -# role - -Source: [base/pkg/kusion_kubernetes/api/rbac/v1/role.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/rbac/v1/role.k) - -This is the role module in kusion\_kubernetes.api.rbac.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema Role - -Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**apiVersion**
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md\#resources|"rbac.authorization.k8s.io/v1"|"rbac.authorization.k8s.io/v1"|**required**| -|**kind**
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md\#types-kinds|"Role"|"Role"|**required**| -|**rules**
Rules holds all the PolicyRules for this Role|[[v1.PolicyRule](doc_policy_rule#schema-policyrule)]|Undefined|optional| -|**metadata**
Standard object's metadata.|[apis.ObjectMeta](../../../apimachinery/apis/doc_object_meta#schema-objectmeta)|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/api/rbac/v1/doc_role_binding.md b/docs/reference/model/kusion_kubernetes/api/rbac/v1/doc_role_binding.md deleted file mode 100644 index 2a487e59..00000000 --- a/docs/reference/model/kusion_kubernetes/api/rbac/v1/doc_role_binding.md +++ /dev/null @@ -1,20 +0,0 @@ -# role_binding - -Source: [base/pkg/kusion_kubernetes/api/rbac/v1/role_binding.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/rbac/v1/role_binding.k) - -This is the role\_binding module in kusion\_kubernetes.api.rbac.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema RoleBinding - -RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given namespace only have effect in that namespace. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**apiVersion**
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md\#resources|"rbac.authorization.k8s.io/v1"|"rbac.authorization.k8s.io/v1"|**required**| -|**kind**
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md\#types-kinds|"RoleBinding"|"RoleBinding"|**required**| -|**subjects**
Subjects holds references to the objects the role applies to.|[[v1.Subject](doc_subject#schema-subject)]|Undefined|optional| -|**metadata**
Standard object's metadata.|[apis.ObjectMeta](../../../apimachinery/apis/doc_object_meta#schema-objectmeta)|Undefined|optional| -|**roleRef**
RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error.|[RoleRef](doc_role_ref#schema-roleref)|Undefined|**required**| - diff --git a/docs/reference/model/kusion_kubernetes/api/rbac/v1/doc_role_ref.md b/docs/reference/model/kusion_kubernetes/api/rbac/v1/doc_role_ref.md deleted file mode 100644 index 2973d6ef..00000000 --- a/docs/reference/model/kusion_kubernetes/api/rbac/v1/doc_role_ref.md +++ /dev/null @@ -1,18 +0,0 @@ -# role_ref - -Source: [base/pkg/kusion_kubernetes/api/rbac/v1/role_ref.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/rbac/v1/role_ref.k) - -This is the role\_ref module in kusion\_kubernetes.api.rbac.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema RoleRef - -RoleRef contains information that points to the role being used - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**apiGroup**
APIGroup is the group for the resource being referenced|str|Undefined|**required**| -|**kind**
Kind is the type of resource being referenced|str|Undefined|**required**| -|**name**
Name is the name of resource being referenced|str|Undefined|**required**| - diff --git a/docs/reference/model/kusion_kubernetes/api/rbac/v1/doc_subject.md b/docs/reference/model/kusion_kubernetes/api/rbac/v1/doc_subject.md deleted file mode 100644 index ca7ac448..00000000 --- a/docs/reference/model/kusion_kubernetes/api/rbac/v1/doc_subject.md +++ /dev/null @@ -1,19 +0,0 @@ -# subject - -Source: [base/pkg/kusion_kubernetes/api/rbac/v1/subject.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/rbac/v1/subject.k) - -This is the subject module in kusion\_kubernetes.api.rbac.v1 package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema Subject - -Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, or a value for non-objects such as user and group names. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**apiGroup**
APIGroup holds the API group of the referenced subject. Defaults to "" for ServiceAccount subjects. Defaults to "rbac.authorization.k8s.io" for User and Group subjects.|str|Undefined|optional| -|**kind**
Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount". If the Authorizer does not recognized the kind value, the Authorizer should report an error.|str|Undefined|**required**| -|**name**
Name of the object being referenced.|str|Undefined|**required**| -|**namespace**
Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty the Authorizer should report an error.|str|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/apimachinery/apis/doc_label_selector.md b/docs/reference/model/kusion_kubernetes/apimachinery/apis/doc_label_selector.md deleted file mode 100644 index 0cb9fd61..00000000 --- a/docs/reference/model/kusion_kubernetes/apimachinery/apis/doc_label_selector.md +++ /dev/null @@ -1,17 +0,0 @@ -# label_selector - -Source: [base/pkg/kusion_kubernetes/apimachinery/apis/label_selector.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/apimachinery/apis/label_selector.k) - -This is the label\_selector module in kusion\_kubernetes.apimachinery.apis package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema LabelSelector - -A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**matchExpressions**
matchExpressions is a list of label selector requirements. The requirements are ANDed.|[[apis.LabelSelectorRequirement](doc_label_selector_requirement#schema-labelselectorrequirement)]|Undefined|optional| -|**matchLabels**
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.|{str: str}|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/apimachinery/apis/doc_label_selector_requirement.md b/docs/reference/model/kusion_kubernetes/apimachinery/apis/doc_label_selector_requirement.md deleted file mode 100644 index ba0f29d1..00000000 --- a/docs/reference/model/kusion_kubernetes/apimachinery/apis/doc_label_selector_requirement.md +++ /dev/null @@ -1,18 +0,0 @@ -# label_selector_requirement - -Source: [base/pkg/kusion_kubernetes/apimachinery/apis/label_selector_requirement.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/apimachinery/apis/label_selector_requirement.k) - -This is the label\_selector\_requirement module in kusion\_kubernetes.apimachinery.apis package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema LabelSelectorRequirement - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**key**
key is the label key that the selector applies to.|str|Undefined|**required**| -|**operator**
operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.|str|Undefined|**required**| -|**values**
values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.|[str]|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/apimachinery/apis/doc_managed_fields_entry.md b/docs/reference/model/kusion_kubernetes/apimachinery/apis/doc_managed_fields_entry.md deleted file mode 100644 index 45bfb2c8..00000000 --- a/docs/reference/model/kusion_kubernetes/apimachinery/apis/doc_managed_fields_entry.md +++ /dev/null @@ -1,22 +0,0 @@ -# managed_fields_entry - -Source: [base/pkg/kusion_kubernetes/apimachinery/apis/managed_fields_entry.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/apimachinery/apis/managed_fields_entry.k) - -This is the managed\_fields\_entry module in kusion\_kubernetes.apimachinery.apis package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema ManagedFieldsEntry - -ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource that the fieldset applies to. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**apiVersion**
APIVersion defines the version of this resource that this field set applies to. The format is "group/version" just like the top-level APIVersion field. It is necessary to track the version of a field set because it cannot be automatically converted.|str|Undefined|optional| -|**fieldsType**
FieldsType is the discriminator for the different fields format and version. There is currently only one possible value: "FieldsV1"|str|Undefined|optional| -|**fieldsV1**
FieldsV1 holds the first JSON version format as described in the "FieldsV1" type.|any|Undefined|optional| -|**manager**
Manager is an identifier of the workflow managing these fields.|str|Undefined|optional| -|**operation**
Operation is the type of operation which lead to this ManagedFieldsEntry being created. The only valid values for this field are 'Apply' and 'Update'.|str|Undefined|optional| -|**subresource**
Subresource is the name of the subresource used to update that object, or empty string if the object was updated through the main resource. The value of this field is used to distinguish between managers, even if they share the same name. For example, a status update will be distinct from a regular update using the same manager name. Note that the APIVersion field is not related to the Subresource field and it always corresponds to the version of the main resource.|str|Undefined|optional| -|**time**
Time is timestamp of when these fields were set. It should always be empty if Operation is 'Apply'|str|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/apimachinery/apis/doc_object_meta.md b/docs/reference/model/kusion_kubernetes/apimachinery/apis/doc_object_meta.md deleted file mode 100644 index 796257dc..00000000 --- a/docs/reference/model/kusion_kubernetes/apimachinery/apis/doc_object_meta.md +++ /dev/null @@ -1,31 +0,0 @@ -# object_meta - -Source: [base/pkg/kusion_kubernetes/apimachinery/apis/object_meta.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/apimachinery/apis/object_meta.k) - -This is the object\_meta module in kusion\_kubernetes.apimachinery.apis package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema ObjectMeta - -ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**annotations**
Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations
clusterName : str, default is Undefined, optional
The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.
creationTimestamp : str, default is Undefined, optional
CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.|{str: str}|Undefined|optional| -|**clusterName**|str|Undefined|optional| -|**creationTimestamp**|str|Undefined|optional| -|**deletionGracePeriodSeconds**|int|Undefined|optional| -|**deletionTimestamp**|str|Undefined|optional| -|**finalizers**|[str]|Undefined|optional| -|**generateName**|str|Undefined|optional| -|**generation**|int|Undefined|optional| -|**labels**|{str: str}|Undefined|optional| -|**managedFields**|[[apis.ManagedFieldsEntry](doc_managed_fields_entry#schema-managedfieldsentry)]|Undefined|optional| -|**name**|str|Undefined|optional| -|**namespace**|str|Undefined|optional| -|**ownerReferences**|[[apis.OwnerReference](doc_owner_reference#schema-ownerreference)]|Undefined|optional| -|**resourceVersion**|str|Undefined|optional| -|**selfLink**|str|Undefined|optional| -|**uid**|str|Undefined|optional| - diff --git a/docs/reference/model/kusion_kubernetes/apimachinery/apis/doc_owner_reference.md b/docs/reference/model/kusion_kubernetes/apimachinery/apis/doc_owner_reference.md deleted file mode 100644 index f796c3cc..00000000 --- a/docs/reference/model/kusion_kubernetes/apimachinery/apis/doc_owner_reference.md +++ /dev/null @@ -1,21 +0,0 @@ -# owner_reference - -Source: [base/pkg/kusion_kubernetes/apimachinery/apis/owner_reference.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/apimachinery/apis/owner_reference.k) - -This is the owner\_reference module in kusion\_kubernetes.apimachinery.apis package.
This file was generated by the KCL auto-gen tool. DO NOT EDIT.
Editing this file might prove futile when you re-run the KCL auto-gen generate command. - -## Schema OwnerReference - -OwnerReference contains enough information to let you identify an owning object. An owning object must be in the same namespace as the dependent, or be cluster-scoped, so there is no namespace field. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**apiVersion**
API version of the referent.|str|Undefined|**required**| -|**blockOwnerDeletion**
If true, AND if the owner has the "foregroundDeletion" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. Defaults to false. To set this field, a user needs "delete" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned.|bool|Undefined|optional| -|**controller**
If true, this reference points to the managing controller.|bool|Undefined|optional| -|**kind**
Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md\#types-kinds|str|Undefined|**required**| -|**name**
Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers\#names|str|Undefined|**required**| -|**uid**
UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers\#uids|str|Undefined|**required**| - diff --git a/docs/reference/model/kusion_models/kube/frontend/common/doc_metadata.md b/docs/reference/model/kusion_models/kube/frontend/common/doc_metadata.md deleted file mode 100644 index dcacb964..00000000 --- a/docs/reference/model/kusion_models/kube/frontend/common/doc_metadata.md +++ /dev/null @@ -1,17 +0,0 @@ -# metadata - -Source: [base/pkg/kusion_models/kube/frontend/common/metadata.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_models/kube/frontend/common/metadata.k) - -## Schema Metadata - -Metadata is the base schema of all models, which contains data
that helps uniquely identify the object. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**name**
The name of the resource.
Name must be unique within a namespace. It's required when creating
resources, although some resources may allow a client to request the
generation of an appropriate name automatically.
Name is primarily intended for creation idempotence and configuration
definition. Cannot be updated. More info:
http://kubernetes.io/docs/user-guide/identifiers\#names|str|Undefined|optional| -|**labels**
Labels is a map of string keys and values that can be used to
organize and categorize (scope and select) objects.
May match selectors of replication controllers and services.
More info: http://kubernetes.io/docs/user-guide/labels|{str: str}|Undefined|optional| -|**annotations**
Annotations is an unstructured key value map stored with a
resource that may be set by external tools to store and retrieve
arbitrary metadata. They are not queryable and should be preserved
when modifying objects.
More info: http://kubernetes.io/docs/user-guide/annotations|{str: str}|Undefined|optional| -|**namespace**
Namespaces are intended for use in environments with many users spread
across multiple teams, or projects.
For clusters with a few to tens of users, you should not need to create
or think about namespaces at all. Start using namespaces when you need the features they provide.
More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/|str|Undefined|optional| - diff --git a/docs/reference/model/kusion_models/kube/frontend/configmap/doc_configmap.md b/docs/reference/model/kusion_models/kube/frontend/configmap/doc_configmap.md deleted file mode 100644 index 78130d80..00000000 --- a/docs/reference/model/kusion_models/kube/frontend/configmap/doc_configmap.md +++ /dev/null @@ -1,30 +0,0 @@ -# configmap - -Source: [base/pkg/kusion_models/kube/frontend/configmap/configmap.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_models/kube/frontend/configmap/configmap.k) - -## Schema ConfigMap - -ConfigMap holds configuration data for pods to consume.
More info: https://kubernetes.io/docs/reference/kubernetes-api/config-and-storage-resources/config-map-v1/\#ConfigMap - -### Base Schema -[@base.pkg.kusion_models.kube.frontend.common.Metadata](../common/doc_metadata#schema-metadata) - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**data**
Data contains the configuration data.|{str: str}|Undefined|optional| -|**binaryData**
BinaryData contains the binary data.|{str: str}|Undefined|optional| -### Examples -```python -configmap = ConfigMap { - name = "my-configmap" - namespace = "my-configmap-namespace" - data = { - foo = "bar" - bar = "foo" - } -} -``` - - diff --git a/docs/reference/model/kusion_models/kube/frontend/container/doc_container.md b/docs/reference/model/kusion_models/kube/frontend/container/doc_container.md deleted file mode 100644 index 84303f3c..00000000 --- a/docs/reference/model/kusion_models/kube/frontend/container/doc_container.md +++ /dev/null @@ -1,42 +0,0 @@ -# container - -Source: [base/pkg/kusion_models/kube/frontend/container/container.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_models/kube/frontend/container/container.k) - -## Schema Main - -Main describes the main container configuration that is expected to be run on the host. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**name**
A Container-level attribute.
The container name. Each container in a pod must have a unique name.|str|"main"|**required**| -|**command**
A Container-level attribute.
The startup command of main process. The image's entrypoint is used if this is not provided.|[str]|Undefined|optional| -|**args**
A Container-level attribute.
The startup arguments of main process. The image's cmd is used if this is not provided.|[str]|Undefined|optional| -|**useBuiltInEnv**
useBuiltInEnv indicates use built-in envs or not.|bool|False|optional| -|**env**
A Container-level attribute.
List of environment variables in the container.|[[env.Env](env/doc_env#schema-env)]|Undefined|optional| -|**envFrom**
A Container-level attribute.
List of sources to populate environment variables in the container.|[[env.EnvFromSource](env/doc_env#schema-envfromsource)]|Undefined|optional| -|**ports**
A Container-level attribute.
List of network ports in the container.|[[port.ContainerPort](port/doc_container_port#schema-containerport)]|Undefined|optional| -|**livenessProbe**
A Container-level attribute.
The probe to check whether container is live or not.|[p.Probe](probe/doc_probe#schema-probe)|Undefined|optional| -|**readinessProbe**
A Container-level attribute.
The probe to check whether container is ready or not.
The default value can be referred to presupposed template: base/pkg/kusion\_models/templates/sofa\_probe.k|[p.Probe](probe/doc_probe#schema-probe)|Undefined|optional| -|**startupProbe**
A Container-level attribute.
The probe to indicates that the Pod has successfully initialized.|[p.Probe](probe/doc_probe#schema-probe)|Undefined|optional| -|**lifecycle**|[lc.Lifecycle](lifecycle/doc_lifecycle#schema-lifecycle)|Undefined|optional| -|**workingDir**|str|Undefined|optional| -|**securityContext**|{str: any}|Undefined|optional| -### Examples -```python -import base.pkg.kusion_models.kube.frontend.container -import base.pkg.kusion_models.kube.frontend.container.probe as p - -main = container.Main { - name = "test" - livenessProbe = p.Probe { - handler = p.Http { - path = "/healthz" - } - initialDelaySeconds = 10 - } -} -``` - - diff --git a/docs/reference/model/kusion_models/kube/frontend/container/env/doc_env.md b/docs/reference/model/kusion_models/kube/frontend/container/env/doc_env.md deleted file mode 100644 index 979ecb8b..00000000 --- a/docs/reference/model/kusion_models/kube/frontend/container/env/doc_env.md +++ /dev/null @@ -1,57 +0,0 @@ -# env - -Source: [base/pkg/kusion_models/kube/frontend/container/env/env.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_models/kube/frontend/container/env/env.k) - -## Schema Env - -Env represents an environment variable present in a Container. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**name**
A Container-level attribute.
The env name. This must be a C\_IDENTIFIER.|str|Undefined|**required**| -|**value**
A Container-level attribute.
The simple literal value.|str|Undefined|optional| -|**valueFrom**
A Container-level attribute.
The ref source of this env.|[EnvValueFrom](#schema-envvaluefrom)|Undefined|optional| -## Schema EnvValueFrom - -EnvValueFrom represents the source of the value of an Env. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**fieldRef**
A Container-level attribute.
Selects a key of a field.|[ObjectFieldSelector](#schema-objectfieldselector)|Undefined|optional| -|**configMapKeyRef**
A Container-level attribute.
Selects a key of a ConfigMap.|[ObjectKeySelector](#schema-objectkeyselector)|Undefined|optional| -|**secretKeyRef**
A Container-level attribute.
Selects a key of a secret.|[ObjectKeySelector](#schema-objectkeyselector)|Undefined|optional| -## Schema ObjectKeySelector - -ObjectKeySelector contains enough information to let you locate the referenced object. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**name**
A Container-level attribute.
The name of object, typically a ConfigMap or Secret name.|str|Undefined|**required**| -|**key**
A Container-level attribute.
The key of the object to select from.|str|Undefined|**required**| -## Schema ObjectFieldSelector - -ObjectFieldSelector contains enough information to let you select field of an object. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**apiVersion**
A Container-level attribute.
Version of the schema the FieldPath is written in terms of.|str|Undefined|**required**| -|**fieldPath**
A Container-level attribute.
Path of the field to select of an object.|str|Undefined|**required**| -## Schema EnvFromSource - -EnvFromSource represents the source of a set of ConfigMaps or Secrets. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**configMapRef**
A Container-level attribute.
The ConfigMap name to select from.|str|Undefined|optional| -|**secretRef**
A Container-level attribute.
The Secret name to select from.|str|Undefined|optional| - diff --git a/docs/reference/model/kusion_models/kube/frontend/container/lifecycle/doc_lifecycle.md b/docs/reference/model/kusion_models/kube/frontend/container/lifecycle/doc_lifecycle.md deleted file mode 100644 index 62f5eea4..00000000 --- a/docs/reference/model/kusion_models/kube/frontend/container/lifecycle/doc_lifecycle.md +++ /dev/null @@ -1,34 +0,0 @@ -# lifecycle - -Source: [base/pkg/kusion_models/kube/frontend/container/lifecycle/lifecycle.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_models/kube/frontend/container/lifecycle/lifecycle.k) - -## Schema Lifecycle - -Lifecycle describes actions that the management system should take in response
to container lifecycle events. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**preStop**
A Container-level attribute.
The PreStop action is called immediately before a container is terminated.|[probe.Exec](../probe/doc_probe#schema-exec) \| [probe.Http](../probe/doc_probe#schema-http)|Undefined|optional| -|**postStart**
A Container-level attribute.
The PostStart action is called immediately after a container is created.|[probe.Exec](../probe/doc_probe#schema-exec) \| [probe.Http](../probe/doc_probe#schema-http)|Undefined|optional| -### Examples -```python -import base.pkg.kusion_models.kube.frontend.container.lifecycle as lc -import base.pkg.kusion_models.kube.frontend.container.probe as p - -p = lc.Lifecycle { - preStop = p.Exec { - command = [ - "timeout" - "--signal=9" - "1800s" - "sh" - "-c" - "bash -x /tmp/image-builder/boot/boot.sh" - ] - } -} -``` - - diff --git a/docs/reference/model/kusion_models/kube/frontend/container/port/doc_container_port.md b/docs/reference/model/kusion_models/kube/frontend/container/port/doc_container_port.md deleted file mode 100644 index 36d7217e..00000000 --- a/docs/reference/model/kusion_models/kube/frontend/container/port/doc_container_port.md +++ /dev/null @@ -1,25 +0,0 @@ -# container_port - -Source: [base/pkg/kusion_models/kube/frontend/container/port/container_port.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_models/kube/frontend/container/port/container_port.k) - -## Schema ContainerPort - -ContainerPort represents a network port in a single container. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**name**
If specified, this must be an IANA\_SVC\_NAME and unique within the pod.
Each named port in a pod must have a unique name.
Name for the port that can be referred to by services.|str|Undefined|optional| -|**protocol**
A Container-level attribute.
The protocol for port. Must be UDP, TCP or SCTP. Default is TCP.|"TCP" \| "UDP" \| "SCTP"|"TCP"|**required**| -|**containerPort**
A Container-level attribute.
The number of port to expose on the container's IP address.|int|Undefined|**required**| -### Examples -```python -p = ContainerPort { - name = "test" - protocol = "TCP" - containerPort = 8080 -} -``` - - diff --git a/docs/reference/model/kusion_models/kube/frontend/container/probe/doc_probe.md b/docs/reference/model/kusion_models/kube/frontend/container/probe/doc_probe.md deleted file mode 100644 index ffbc515d..00000000 --- a/docs/reference/model/kusion_models/kube/frontend/container/probe/doc_probe.md +++ /dev/null @@ -1,60 +0,0 @@ -# probe - -Source: [base/pkg/kusion_models/kube/frontend/container/probe/probe.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_models/kube/frontend/container/probe/probe.k) - -## Schema Probe - -Probe describes a health check to be performed against a container
to determine whether it is alive or ready to receive traffic. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**handler**
A Container-level attribute.
The action taken to determine the health of a container.|[probe.Exec](#schema-exec) \| [probe.Http](#schema-http) \| [probe.Tcp](#schema-tcp)|Undefined|**required**| -|**initialDelaySeconds**
A Container-level attribute.
The length of time before health checking is activated. In seconds.|int|Undefined|optional| -|**timeoutSeconds**
A Container-level attribute.
The length of time before health checking times out. In seconds.|int|Undefined|optional| -|**periodSeconds**
A Container-level attribute.
How often (in seconds) to perform the probe.|int|10|optional| -|**successThreshold**
A Container-level attribute.
Minimum consecutive successes for the probe to be considered successful after having failed.|int|Undefined|optional| -|**failureThreshold**
A Container-level attribute.
Minimum consecutive failures for the probe to be considered failed after having succeeded.|int|Undefined|optional| -### Examples -```python -import base.pkg.kusion_models.kube.frontend.container.probe as p - -probe = p.Probe { - handler = p.Http { - path = "/healthz" - } - initialDelaySeconds = 10 -} -``` - -## Schema Exec - -Exec describes a "run in container" action. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**command**
A Container-level attribute.
The command line to execute inside the container.|[str]|Undefined|**required**| -## Schema Http - -Http describes an action based on HTTP Get requests. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**path**
A Container-level attribute.
The Path to access on the HTTP server. e.g /healthz|str|Undefined|**required**| -|**port**
A Container-level attribute.
The Number of the port to access on the container.|int|Undefined|**required**| -|**scheme**
A Container-level attribute.
Scheme to use for connecting to the host, defaults to HTTP.|"HTTP" \| "HTTPS"|"HTTP"|**required**| -## Schema Tcp - -Tcp describes an action based on opening a socket. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**tcpSocket**
A Container-level attribute.
The TCP socket port to connect to.|int|Undefined|**required**| - diff --git a/docs/reference/model/kusion_models/kube/frontend/doc_server.md b/docs/reference/model/kusion_models/kube/frontend/doc_server.md deleted file mode 100644 index f370c53f..00000000 --- a/docs/reference/model/kusion_models/kube/frontend/doc_server.md +++ /dev/null @@ -1,61 +0,0 @@ -# server - -Source: [base/pkg/kusion_models/kube/frontend/server.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_models/kube/frontend/server.k) - -## Schema Server - -Server is abstaction of Deployment and StatefulSet. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**workloadType**
Application workload type, default to 'Deployment'|"Deployment" \| "StatefulSet"|"Deployment"|**required**| -|**replicas**
Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.|int|1|**required**| -|**image**
Docker image name.
More info: https://kubernetes.io/docs/concepts/containers/images|str|Undefined|**required**| -|**schedulingStrategy**
SchedulingStrategy represents scheduling strategy.|[strategy.SchedulingStrategy](strategy/doc_scheduling_strategy#schema-schedulingstrategy)|Undefined|**required**| -|**mainContainer**
MainContainer describes the main container configuration that is expected to be run on the host.|[container.Main](container/doc_container#schema-main)|Undefined|**required**| -|**sidecarContainers**
SidecarContainers describes the list of sidecar container configuration that is expected to be run on the host.|[[sidecar.Sidecar](sidecar/doc_sidecar#schema-sidecar)]|Undefined|optional| -|**initContainers**
InitContainers describes the list of sidecar container configuration that is expected to be run on the host.|[[sidecar.Sidecar](sidecar/doc_sidecar#schema-sidecar)]|Undefined|optional| -|**useBuiltInLabels**
UseBuiltInLabels indicates use built-in labels or not.|bool|False|optional| -|**labels**
Labels is a map of string keys and values that can be used to organize and categorize (scope and select) objects.
More info: http://kubernetes.io/docs/user-guide/labels|{str: str}|Undefined|optional| -|**annotations**
Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata.
More info: http://kubernetes.io/docs/user-guide/annotations|{str: str}|Undefined|optional| -|**useBuiltInSelector**
UseBuiltInSelector indicates use built-in selector or not.|bool|False|optional| -|**selector**
Label selector for pods. Existing ReplicaSets/ whose pods are selected by this will be the ones affected by this deployment.|{str: str}|Undefined|optional| -|**podMetadata**
PodMetadata is metadata that all persisted resources must have, which includes all objects users must create.|[apis.ObjectMeta](../../../kusion_kubernetes/apimachinery/apis/doc_object_meta#schema-objectmeta)|Undefined|optional| -|**volumes**
Volumes represents a named volume and corresponding mounts in containers.|[[volume.Volume](volume/doc_volume#schema-volume)]|Undefined|optional| -|**needNamespace**
NeedNamespace mark server is namespace scoped or not.|bool|True|optional| -|**enableMonitoring**
EnableMonitoring mark server is enable monitor or not.|bool|False|optional| -|**configMaps**
ConfigMaps is a list of ConfigMap which holds configuration data for server to consume.|[[configmap.ConfigMap](configmap/doc_configmap#schema-configmap)]|Undefined|optional| -|**secrets**
Secrets is a list of Secret which hold secret data of a certain type. |[[secret.Secret](secret/doc_secret#schema-secret)]|Undefined|optional| -|**services**
Services is a list of Service which partition a single Kubernetes cluster into multiple virtual clusters.|[[service.Service](service/doc_service#schema-service)]|Undefined|optional| -|**ingresses**
Ingresses is a list of Ingress which is collection of rules that allow inbound connections to reach the endpoints defined by a backend.|[[ingress.Ingress](ingress/doc_ingress#schema-ingress)]|Undefined|optional| -|**serviceAccount**
ServiceAccount is used to run this pod.|[sa.ServiceAccount](serviceaccount/doc_service_account#schema-serviceaccount)|Undefined|optional| -### Examples -```python -import base.pkg.kusion_models.kube.frontend -import base.pkg.kusion_models.kube.frontend.container -import base.pkg.kusion_models.kube.templates.resource as res_tpl - -appConfiguration: frontend.Server { - mainContainer = container.Main { - name = "php-redis" - env = [ - { - name = "GET_HOSTS_FROM" - value = "dns" - } - ] - ports = [{containerPort = 80}] - } - selector = { - tier = "frontend" - } - podMetadata.labels: { - tier = "frontend" - } - schedulingStrategy.resource = res_tpl.tiny -} -``` - - diff --git a/docs/reference/model/kusion_models/kube/frontend/ingress/doc_ingress.md b/docs/reference/model/kusion_models/kube/frontend/ingress/doc_ingress.md deleted file mode 100644 index 46075e47..00000000 --- a/docs/reference/model/kusion_models/kube/frontend/ingress/doc_ingress.md +++ /dev/null @@ -1,38 +0,0 @@ -# ingress - -Source: [base/pkg/kusion_models/kube/frontend/ingress/ingress.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_models/kube/frontend/ingress/ingress.k) - -## Schema Ingress - -Ingress is a collection of rules that allow inbound connections to reach the endpoints defined by a backend.
An Ingress can be configured to give services externally-reachable urls, load balance traffic, terminate SSL, offer name based virtual hosting etc. - -### Base Schema -[@base.pkg.kusion_models.kube.frontend.common.Metadata](../common/doc_metadata#schema-metadata) - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**rules**
A list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend.|[[v1.IngressRule](../../../../kusion_kubernetes/api/networking/v1/doc_ingress_rule#schema-ingressrule)]|Undefined|optional| -### Examples -```python -ingress.Ingress { - name = "example-ingress" - rules = [ - { - http.paths = [ - { - path = "/apple" - pathType = "Prefix" - backend.service: { - name = "app-service" - port.number = 5678 - } - } - ] - } - ] -} -``` - - diff --git a/docs/reference/model/kusion_models/kube/frontend/resource/doc_resource.md b/docs/reference/model/kusion_models/kube/frontend/resource/doc_resource.md deleted file mode 100644 index f960a171..00000000 --- a/docs/reference/model/kusion_models/kube/frontend/resource/doc_resource.md +++ /dev/null @@ -1,27 +0,0 @@ -# resource - -Source: [base/pkg/kusion_models/kube/frontend/resource/resource.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_models/kube/frontend/resource/resource.k) - -## Schema Resource - -Resource describes the compute resource requirements. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**cpu**
A Container-level attribute.
CPU, in cores, default 1 core. (500m = .5 cores)|int \| number_multiplier|1|optional| -|**memory**
A Container-level attribute.
Memory, in bytes, default 1024Mi. (500Gi = 500GiB = 500 \* 1024 \* 1024 \* 1024)|number\_multiplier|1024Mi|optional| -|**disk**
A Container-level attribute.
Local disk storage, in bytes, default 10Gi. (500Gi = 500GiB = 500 \* 1024 \* 1024 \* 1024)|number\_multiplier|10Gi|optional| -### Examples -```python -import base.pkg.kusion_models.kube.frontend.resource as res - -res = res.Resource { - cpu = 2 - memory = 2048Mi - disk = 20Gi -} -``` - - diff --git a/docs/reference/model/kusion_models/kube/frontend/resource/doc_resource_requirements.md b/docs/reference/model/kusion_models/kube/frontend/resource/doc_resource_requirements.md deleted file mode 100644 index 8746a2f1..00000000 --- a/docs/reference/model/kusion_models/kube/frontend/resource/doc_resource_requirements.md +++ /dev/null @@ -1,33 +0,0 @@ -# resource_requirements - -Source: [base/pkg/kusion_models/kube/frontend/resource/resource_requirements.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_models/kube/frontend/resource/resource_requirements.k) - -## Schema ResourceRequirements - -ResourceRequirements describes the compute resource requirements.. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**limits**
A Container-level attribute.
Limits describes the maximum amount of compute resources allowed.
More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/|[Resource](doc_resource#schema-resource)|Undefined|**required**| -|**requests**
A Container-level attribute.
Requests describes the minimum amount of compute resources required.
If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value.
More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/|[Resource](doc_resource#schema-resource)|Undefined|**required**| -### Examples -```python -import base.pkg.kusion_models.kube.frontend.resource as res - -res = res.ResourceRequirements { - limits = { - cpu = 1 - memory = 1Gi - disk = 20Gi - } - requests = { - cpu = 500m - memory = 512Mi - disk = 10Gi - } -} -``` - - diff --git a/docs/reference/model/kusion_models/kube/frontend/secret/doc_secret.md b/docs/reference/model/kusion_models/kube/frontend/secret/doc_secret.md deleted file mode 100644 index e7845d48..00000000 --- a/docs/reference/model/kusion_models/kube/frontend/secret/doc_secret.md +++ /dev/null @@ -1,32 +0,0 @@ -# secret - -Source: [base/pkg/kusion_models/kube/frontend/secret/secret.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_models/kube/frontend/secret/secret.k) - -## Schema Secret - -Secret holds secret data of a certain type.
The total bytes of the values in the Data field
must be less than MaxSecretSize bytes. - -### Base Schema -[@base.pkg.kusion_models.kube.frontend.common.Metadata](../common/doc_metadata#schema-metadata) - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**data**
Data contains the secret data. Each key must consist of alphanumeric characters, '-', '\_' or '.'.
More info: https://kubernetes.io/docs/concepts/configuration/secret/\#restriction-names-data|{str: str}|Undefined|optional| -|**stringData**
stringData allows specifying non-binary secret data in string form.
More info: https://kubernetes.io/docs/concepts/configuration/secret/\#restriction-names-data|{str: str}|Undefined|optional| -|**type**
Used to facilitate programmatic handling of secret data.
More info: https://kubernetes.io/docs/concepts/configuration/secret/\#secret-types|str|Undefined|optional| -### Examples -```python -secret = Secret { - name = "my-secret" - namespace = "my-secret-namespace" - data = { - foo = bar - bar = foo - } - $type = "kubernetes.io/service-account-token" -} -``` - - diff --git a/docs/reference/model/kusion_models/kube/frontend/service/doc_service.md b/docs/reference/model/kusion_models/kube/frontend/service/doc_service.md deleted file mode 100644 index b7612d20..00000000 --- a/docs/reference/model/kusion_models/kube/frontend/service/doc_service.md +++ /dev/null @@ -1,49 +0,0 @@ -# service - -Source: [base/pkg/kusion_models/kube/frontend/service/service.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_models/kube/frontend/service/service.k) - -## Schema Service - -Service are Kubernetes objects which partition a single Kubernetes cluster into multiple virtual clusters.
More info: https://kubernetes.io/docs/reference/kubernetes-api/service-resources/service-v1/\#Service - -### Base Schema -[@base.pkg.kusion_models.kube.frontend.common.Metadata](../common/doc_metadata#schema-metadata) - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**selector**
Route service traffic to pods with label keys and values matching this selector.
More info: https://kubernetes.io/docs/concepts/services-networking/service/|{str: str}|Undefined|optional| -|**ports**
The list of ports that are exposed by this service.
More info: https://kubernetes.io/docs/concepts/services-networking/service/\#virtual-ips-and-service-proxies|[[v1.ServicePort](../../../../kusion_kubernetes/api/core/v1/doc_service_port#schema-serviceport)]|Undefined|optional| -|**clusterIP**
clusterIP is the IP address of the service and is usually assigned randomly by the master.
More info: https://kubernetes.io/docs/concepts/services-networking/service/\#virtual-ips-and-service-proxies|str|None|optional| -|**type**
determines how the Service is exposed.
More info: https://kubernetes.io/docs/concepts/services-networking/service/\#publishing-services-service-types|str|"ClusterIP"|optional| -|**externalIPs**
externalIPs is a list of IP addresses for which nodes in the cluster will also accept traffic for this service.|[str]|Undefined|optional| -|**externalName**
externalName is the external reference that discovery mechanisms will return as an alias for this service (e.g. a DNS CNAME record).|str|Undefined|optional| -|**externalTrafficPolicy**
externalTrafficPolicy denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints.|str|Undefined|optional| -|**healthCheckNodePort**
healthCheckNodePort specifies the healthcheck nodePort for the service.|int|Undefined|optional| -|**ipFamilyPolicy**
ipFamilyPolicy represents the dual-stack-ness requested or required by this Service, and is gated by the "IPv6DualStack" feature gate.|str|Undefined|optional| -|**loadBalancerIP**
Only applies to Service Type: LoadBalancer LoadBalancer will get created with the IP specified in this field.|str|Undefined|optional| -|**loadBalancerSourceRanges**
If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs.
This field will be ignored if the cloud-provider does not support the feature.
More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/|[str]|Undefined|optional| -|**publishNotReadyAddresses**
publishNotReadyAddresses indicates that any agent which deals with endpoints for this Service should disregard any indications of ready/not-ready.|bool|Undefined|optional| -|**sessionAffinity**
Supports "ClientIP" and "None". Used to maintain session affinity.
More info: https://kubernetes.io/docs/concepts/services-networking/service/\#virtual-ips-and-service-proxies|str|Undefined|optional| -|**sessionAffinityConfig**
sessionAffinityConfig contains the configurations of session affinity.|{str: any}|Undefined|optional| -### Examples -```python -service = Service { - name = "my-service-name" - namespace = "my-service-name" - labels.env = "dev" - ports = [ - { - name = "grpc-xds" - port = 15010 - } - { - name = "https-xds" - port = 15011 - } - ] -} -``` - - diff --git a/docs/reference/model/kusion_models/kube/frontend/serviceaccount/doc_service_account.md b/docs/reference/model/kusion_models/kube/frontend/serviceaccount/doc_service_account.md deleted file mode 100644 index ea85b846..00000000 --- a/docs/reference/model/kusion_models/kube/frontend/serviceaccount/doc_service_account.md +++ /dev/null @@ -1,39 +0,0 @@ -# service_account - -Source: [base/pkg/kusion_models/kube/frontend/serviceaccount/service_account.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_models/kube/frontend/serviceaccount/service_account.k) - -## Schema ServiceAccount - -A service account provides an identity for processes that run in a Pod.
ServiceAccount binds together:
- a name, understood by users, and perhaps by peripheral systems, for an identity
- a principal that can be authenticated and authorized
- a set of secrets
More info: https://kubernetes.io/docs/reference/kubernetes-api/authentication-resources/service-account-v1/\#ServiceAccount - -### Base Schema -[@base.pkg.kusion_models.kube.frontend.common.Metadata](../common/doc_metadata#schema-metadata) - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**imagePullSecrets**
ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images in pods that reference this ServiceAccount.
More info: https://kubernetes.io/docs/concepts/containers/images/\#specifying-imagepullsecrets-on-a-pod
secrets: [{str:str}], default is Undefined, optional.
Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount.
More info: https://kubernetes.io/docs/concepts/configuration/secret|[{str: str}]|Undefined|optional| -|**secrets**|[{str: str}]|Undefined|optional| -### Examples -```python -my_service_account = ServiceAccount { - name: "my-service-account" - namespace = "my-service-account-namespace" - labels: { - tier: "monitoring" - } - imagePullSecrets: [ - { - name: "my-secret" - } - ] - secrets: [ - { - name: "my-secret" - } - ] -} -``` - - diff --git a/docs/reference/model/kusion_models/kube/frontend/sidecar/doc_sidecar.md b/docs/reference/model/kusion_models/kube/frontend/sidecar/doc_sidecar.md deleted file mode 100644 index c15a712d..00000000 --- a/docs/reference/model/kusion_models/kube/frontend/sidecar/doc_sidecar.md +++ /dev/null @@ -1,43 +0,0 @@ -# sidecar - -Source: [base/pkg/kusion_models/kube/frontend/sidecar/sidecar.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_models/kube/frontend/sidecar/sidecar.k) - -## Schema Sidecar - -Sidecar describes the sidecar container configuration that is expected to be run on the host. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**name**
A Container-level attribute.
The container name. Each container in a pod must have a unique name.|str|Undefined|**required**| -|**command**
A Container-level attribute.
The startup command of main process. The image's entrypoint is used if this is not provided.|[str]|Undefined|optional| -|**args**
A Container-level attribute.
The startup arguments of main process. The image's cmd is used if this is not provided.|[str]|Undefined|optional| -|**env**
A Container-level attribute.
List of environment variables in the container.|[[env.Env](../container/env/doc_env#schema-env)]|Undefined|optional| -|**envFrom**
A Container-level attribute.
List of sources to populate environment variables in the container.|[[env.EnvFromSource](../container/env/doc_env#schema-envfromsource)]|Undefined|optional| -|**ports**|[[port.ContainerPort](../container/port/doc_container_port#schema-containerport)]|Undefined|optional| -|**resource**
A Pod-level attribute.
Sidecar container resource. |str \| [resource.Resource](../resource/doc_resource#schema-resource)|"1A Container-level attribute.
Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images|str|Undefined|**required**| -|**readinessProbe**
A Container-level attribute.
The probe to check whether container is ready or not.|[p.Probe](../container/probe/doc_probe#schema-probe)|Undefined|optional| -|**livenessProbe**
A Container-level attribute.
The probe to check whether container is live or not.|[p.Probe](../container/probe/doc_probe#schema-probe)|Undefined|optional| -|**startupProbe**
A Container-level attribute.
The probe to indicates that the Pod has successfully initialized.|[p.Probe](../container/probe/doc_probe#schema-probe)|Undefined|optional| -|**lifecycle**
Actions that the management system should take in response to container lifecycle events.
Cannot be updated.|[lc.Lifecycle](../container/lifecycle/doc_lifecycle#schema-lifecycle)|Undefined|optional| -|**workingDir**
Container's working directory. If not specified, the container runtime's default will be used,
which might be configured in the container image. Cannot be updated.|str|Undefined|optional| -|**securityContext**
SecurityContext defines the security options the container should be run with.
If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.
More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/|{str: any}|Undefined|optional| -### Examples -```python -import base.pkg.kusion_models.kube.frontend.sidecar as s -import base.pkg.kusion_models.kube.frontend.container.probe as p - -sidecar = s.Sidecar { - name = "test" - livenessProbe = p.Probe { - handler = p.Http { - httpPath = "/healthz" - } - initialDelaySeconds = 10 - } -} -``` - - diff --git a/docs/reference/model/kusion_models/kube/frontend/sidecar/doc_simple_sidecar.md b/docs/reference/model/kusion_models/kube/frontend/sidecar/doc_simple_sidecar.md deleted file mode 100644 index ae507d36..00000000 --- a/docs/reference/model/kusion_models/kube/frontend/sidecar/doc_simple_sidecar.md +++ /dev/null @@ -1,26 +0,0 @@ -# simple_sidecar - -Source: [base/pkg/kusion_models/kube/frontend/sidecar/simple_sidecar.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_models/kube/frontend/sidecar/simple_sidecar.k) - -## Schema SimpleSidecar - -Simple sidecar describes the sidecar container configuration that is expected to be run on the host. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**name**
The sidecar name. e.g. 'odp','kmi','antmonitor'.|str|Undefined|**required**| -|**version**
The sidecar version. e.g. 'v1.2.3'.|str|Undefined|**required**| -|**extInfo**
The extended information.|{str: any}|Undefined|optional| -### Examples -```python -import base.pkg.kusion_models.kube.frontend.sidecar as s - -sidecar = s.SimpleSidecar { - name = "test" - version = "v1.2.3" -} -``` - - diff --git a/docs/reference/model/kusion_models/kube/frontend/strategy/doc_scheduling_strategy.md b/docs/reference/model/kusion_models/kube/frontend/strategy/doc_scheduling_strategy.md deleted file mode 100644 index c3677135..00000000 --- a/docs/reference/model/kusion_models/kube/frontend/strategy/doc_scheduling_strategy.md +++ /dev/null @@ -1,14 +0,0 @@ -# scheduling_strategy - -Source: [base/pkg/kusion_models/kube/frontend/strategy/scheduling_strategy.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_models/kube/frontend/strategy/scheduling_strategy.k) - -## Schema SchedulingStrategy - -SchedulingStrategy represents scheduling strategy. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**resource**
A Pod-level attribute.
Main container resource.|str \| [resource.Resource](../resource/doc_resource#schema-resource)|Undefined|optional| - diff --git a/docs/reference/model/kusion_models/kube/frontend/volume/doc_volume.md b/docs/reference/model/kusion_models/kube/frontend/volume/doc_volume.md deleted file mode 100644 index c296bd97..00000000 --- a/docs/reference/model/kusion_models/kube/frontend/volume/doc_volume.md +++ /dev/null @@ -1,121 +0,0 @@ -# volume - -Source: [base/pkg/kusion_models/kube/frontend/volume/volume.k](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_models/kube/frontend/volume/volume.k) - -## Schema Volume - -Volume represents a named volume and corresponding mounts in containers. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**name**
Volume's name. Must be a DNS\_LABEL and unique within the pod.
More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/\#names|str|Undefined|**required**| -|**volumeSource**
VolumeSource represents the location and type of the mounted volume.|[volume.EmptyDir](#schema-emptydir) \| [volume.Secret](#schema-secret) \| [volume.ConfigMap](#schema-configmap) \| [volume.FlexVolume](#schema-flexvolume) \| [volume.HostPath](#schema-hostpath) \| [volume.DownwardAPI](#schema-downwardapi) \| [volume.CSI](#schema-csi)|Undefined|**required**| -|**mounts**
Volumes to mount into the container's filesystem.|[[volume.Mount](#schema-mount)]|Undefined|optional| -### Examples -```python -volume = v.Volume { - name = "kubeconfig" - volumeSource = v.Secret { - secretName = "kubeconfig" - defaultMode = 420 - } - mounts = [ - v.Mount { - path = "/etc/kubernetes/kubeconfig" - readOnly = true - } - ] -} -``` - -## Schema Mount - -Mount represents a mounting of a Volume within a container. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**container**
A Pod-level attribute.
Name of container to mount, \* represents all containers.|str|*|**required**| -|**path**
A Container-level attribute.
Path within the container at which the volume should be mounted.|str|Undefined|**required**| -|**subPath**
A Container-level attribute.
Path within the volume from which the container's volume should be mounted.|str|Undefined|optional| -|**readOnly**
A Container-level attribute.
Mounted read-only if true, read-write otherwise.|bool|False|optional| -## Schema EmptyDir - -EmptyDir represents a temporary directory that shares a pod's lifetime. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**medium**
A Pod-level attribute.
What type of storage medium should back this directory.|"" \| "Memory"|""|**required**| -|**sizeLimit**
A Pod-level attribute.
Total amount of local storage required for this EmptyDir volume.|str|Undefined|optional| -## Schema Secret - -Secret represents a secret that should populate this volume. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**secretName**
A Pod-level attribute.
Name of the secret in the pod's namespace to use.|str|Undefined|**required**| -|**items**
A Pod-level attribute.
Key-value pairs projected into the volume.|[{str: str}]|Undefined|optional| -|**defaultMode**
A Pod-level attribute.
Mode bits used to set permissions on created files by default.|int|Undefined|optional| -## Schema ConfigMap - -ConfigMap represents a secret that should populate this volume. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**name**
A Pod-level attribute.
Name of the configMap in the pod's namespace to use.|str|Undefined|**required**| -|**items**
A Pod-level attribute.
Key-value pairs projected into the volume.|[{str: str}]|Undefined|optional| -|**defaultMode**
A Pod-level attribute.
Mode bits used to set permissions on created files by default.|int|Undefined|optional| -## Schema FlexVolume - -FlexVolume represents a secret that should populate this volume. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**driver**
A Pod-level attribute.
Driver is the name of the driver to use for this volume.|str|Undefined|**required**| -|**fsType**
A Pod-level attribute.
Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs".
The default filesystem depends on FlexVolume script.|str|Undefined|optional| -|**options**
A Pod-level attribute.
Extra command options if any.|{str: str}|Undefined|optional| -|**readOnly**
A Pod-level attribute.
Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.|bool|False|optional| -## Schema HostPath - -HostPath represents a secret that should populate this volume. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**path**
A Pod-level attribute.
Path of the directory on the host. If the path is a symlink, it will follow the link to the real path.
More info: https://kubernetes.io/docs/concepts/storage/volumes\#hostpath|str|Undefined|**required**| -|**type**
A Pod-level attribute.
Type for HostPath Volume Defaults to ""
More info: https://kubernetes.io/docs/concepts/storage/volumes\#hostpath|str|Undefined|optional| -## Schema DownwardAPI - -DownwardAPI represents a secret that should populate this volume. - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**defaultMode**
A Pod-level attribute.
Mode bits used to set permissions on created files by default.|int|Undefined|optional| -|**items**
A Pod-level attribute.
Items is a list of downward API volume file|[{str: any}]|Undefined|optional| -## Schema CSI - -CSI (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature). - -### Attributes - -|Name and Description|Type|Default Value|Required| -|--------------------|----|-------------|--------| -|**driver**
A Pod-level attribute.
Driver is the name of the driver to use for this volume.|str|Undefined|optional| -|**fsType**
A Pod-level attribute.
Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs".
The default filesystem depends on FlexVolume script.|str|Undefined|optional| -|**readOnly**
A Pod-level attribute.
Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.|bool|False|optional| -|**volumeAttributes**
A Pod-level attribute.
Extra command options if any.|{str: str}|Undefined|optional| - diff --git a/docs/user_docs/concepts/_category_.json b/docs/user_docs/concepts/_category_.json deleted file mode 100644 index 983c10df..00000000 --- a/docs/user_docs/concepts/_category_.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "label": "Architecture & Concepts", - "position": 3 -} diff --git a/docs/user_docs/concepts/arch.md b/docs/user_docs/concepts/arch.md deleted file mode 100644 index 01c95e07..00000000 --- a/docs/user_docs/concepts/arch.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -sidebar_position: 1 ---- - -# Architecture -![](https://raw.githubusercontent.com/KusionStack/kusion/main/docs/arch.png) - - -KusionStack includes three core components: - -- [KCL](https://github.com/KusionStack/KCLVM): A constraint-based record & functional language used in configuration and policy scenarios. -- [Konfig](https://github.com/KusionStack/konfig): A mono-codespace for programmable app configuration and shared schemas. -- [Kusion](https://github.com/KusionStack/kusion): The engine to deliver intentions to clouds. - -The image above illustrates the workflow of KusionStack and how it works. In the next section, we will describe each of these components in detail. diff --git a/docs/user_docs/concepts/index.md b/docs/user_docs/concepts/index.md deleted file mode 100644 index dc1ba1cc..00000000 --- a/docs/user_docs/concepts/index.md +++ /dev/null @@ -1,3 +0,0 @@ -# Architecture & Concepts - -In this section, we will introduce the architecture of KusinStack and some core concepts. \ No newline at end of file diff --git a/docs/user_docs/concepts/kcl-lang.md b/docs/user_docs/concepts/kcl-lang.md deleted file mode 100644 index 99614d00..00000000 --- a/docs/user_docs/concepts/kcl-lang.md +++ /dev/null @@ -1,35 +0,0 @@ ---- -sidebar_position: 2 ---- - -# KCL - -[Kusion Configuration Language (KCL)](https://github.com/KusionStack/KCLVM) is an open source constraint-based record and functional language. KCL improves the writing of a large number of complex configurations through mature programming language technology and practice, and is committed to building better modularity, scalability and stability around configuration, simpler logic writing, fast automation and good ecological extensionality. - -## Features - -![](/img/docs/user_docs/intro/kcl.png) - -+ **Easy-to-use**: Originated from high-level languages ​​such as Python and Golang, incorporating functional language features with low side effects. -+ **Well-designed**: Independent Spec-driven syntax, semantics, runtime and system modules design. -+ **Quick modeling**: [Schema](https://kusionstack.io/docs/reference/lang/lang/tour#schema)-centric configuration types and modular abstraction. -+ **Rich capabilities**: Configuration with type, logic and policy based on [Config](https://kusionstack.io/docs/reference/lang/lang/codelab/simple), [Schema](https://kusionstack.io/docs/reference/lang/lang/tour/#schema), [Lambda](https://kusionstack.io/docs/reference/lang/lang/tour/#function), [Rule](https://kusionstack.io/docs/reference/lang/lang/tour/#rule). -+ **Stability**: Configuration stability built on [static type system](https://kusionstack.io/docs/reference/lang/lang/tour/#type-system), [constraints](https://kusionstack.io/docs/reference/lang/lang/tour/#validation), and [rules](https://kusionstack.io/docs/reference/lang/lang/tour#rule). -+ **Scalability**: High scalability through [automatic merge mechanism](https://kusionstack.io/docs/reference/lang/lang/tour/#-operators-1) of isolated config blocks. -+ **Fast automation**: Gradient automation scheme of [CRUD APIs](https://kusionstack.io/docs/reference/lang/lang/tour/#kcl-cli-variable-override), [multilingual SDKs](https://kusionstack.io/docs/reference/lang/xlang-api/overview), [language plugin](https://github.com/KusionStack/kcl-plugin) -+ **High performance**: High compile time and runtime performance using Rust & C and [LLVM](https://llvm.org/), and support compilation to native code and [WASM](https://webassembly.org/). -+ **API affinity**: Native support API ecological specifications such as [OpenAPI](https://github.com/KusionStack/kcl-openapi), Kubernetes CRD, Kubernetes YAML spec. -+ **Development friendly**: Friendly development experiences with rich [language tools](https://kusionstack.io/docs/reference/cli/kcl/) (Format, Lint, Test, Vet, Doc, etc.) and [IDE plugins](https://github.com/KusionStack/vscode-kcl). -+ **Safety & maintainable**: Domain-oriented, no system-level functions such as native threads and IO, low noise and security risk, easy maintenance and governance. -+ **Production-ready**: Widely used in production practice of platform engineering and automation at Ant Group. - -## What is it for? - -You can use KCL to - -+ Generate low-level static configuration data like JSON, YAML, etc. -+ Reduce boilerplate in configuration data with the schema modeling. -+ Define schemas with rule constraints for configuration data and validate them automatically. -+ Organize, simplify, unify and manage large configurations without side effects. -+ Manage large configurations scalably with isolated configuration blocks. -+ Used as a platform engineering lang to deliver modern app with KusionStack. diff --git a/docs/user_docs/concepts/konfig.md b/docs/user_docs/concepts/konfig.md deleted file mode 100644 index 87aab2d6..00000000 --- a/docs/user_docs/concepts/konfig.md +++ /dev/null @@ -1,72 +0,0 @@ ---- -sidebar_position: 3 ---- - -# Konfig - -Konfig is a monorepo that stores configurations about operation intentions mainly described by KCL. Users can write configuration (config), type (schema), function (lambda) and policy (rule) through the record and functional language KCL. Konfig provides an out-of-the-box cloud-native application model, allowing users to quickly start the journey of cloud-native delivery. Developers can write base models and application models through KCL's modular language features, which helps the platform side quickly reveal capabilities through "building blocks", and application developers can define application models as needed. - -Konfig repository is a unified programming interface for all operation scenarios and is a cooperation workspace for application developers, platform developers and SREs. - -For more details, please refer to [Model Overview](https://KusionStack.io/docs/reference/model/overview) - -## Konfig Structure - -The overall structure of the configuration library is as follows: - -```bash -. -├── Makefile # use Makefile to encapsulate common commands -├── README.md # configuration library instructions -├── appops # application operation and maintenance directory -│ ├── guestbook-frontend -│ ├── http-echo -│ └── nginx-example -├── base # Kusion Model repository -│ ├── examples # Kusion Model example code -│ │ ├── monitoring # monitoring configuration example -│ │ ├── k8s # Kubernetes resource configuration example -│ │ └── infra # infrastructure configuration example -│ └── pkg -│ ├── kusion_kubernetes # Kubernetes low-level model library -│ ├── kusion_models # core model library -│ ├── kusion_prometheus # Prometheus low-level model library -│ └── kusion_provider # infrastructure low-level model library -├── hack # python scripts -└── kcl.mod # core library configuration file -``` - -## Project & Stack -![](/img/docs/user_docs/concepts/project-stack.png) - -Project and Stack are logical isolation concepts used to orginize the Konfig. -### Project - -Any folder that contains the file `project.yaml` will be regarded as a Project, and the `project.yaml` is used to describe the metadata of this Project like `name` and `tenant`. Projects must have clear business semantics and must belong to a tenant. Users can map an application or an operation scenario to a Project. - -### Stack - -Like Project, any folder that contains the file `stack.yaml` will be regarded as a Stack and `stack.yaml` is used to describe the metadata of this Stack. Stack is a set of `.k` files that represents the smallest operation unit that can be configured and deployed individually. It tends to represent different stages in the CI/CD process, such as dev, gray, prod, etc. - -### Relationship between Project and Stack - -A Project contains one or more Stacks, and a Stack must belong to and can only belong to one Project. Users can interpret the meaning of Project and Stack according to their own needs and flexibly organize the Konfig structure. We provide the following example as a best practice according to our experiences: - -```bash -appops/nginx-example -├── README.md # Project readme -├── base # common configurations for all stacks -│ └── base.k -├── dev # dev stack -│ ├── ci-test # CI test configs -│ │ ├── settings.yaml # test data -│ │ └── stdout.golden.yaml # expected test result -│ ├── kcl.yaml # kcl config -│ ├── main.k -│ └── stack.yaml # Stack metadata -└── project.yaml # Project metadata -``` - -The Project represents an application and Stack represents different environments of this application, such as dev, pre and prod, etc. Common configurations can be stored in a `base` directory under this Project. - - diff --git a/docs/user_docs/concepts/kusion.md b/docs/user_docs/concepts/kusion.md deleted file mode 100644 index cbe235d6..00000000 --- a/docs/user_docs/concepts/kusion.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -sidebar_position: 4 ---- - -# Kusion - -Kusion engine is to compile and deliver intents in Konfig to hybrid runtime on multi-cloud with less complexity and a consistent experience. - -![](/img/docs/user_docs/intro/kusion-engine.png) - - It consists of 3 parts: `Operation Engine`, `Runtimes` and `State`, we will describe each of these components below. - - -## Operation Engine - -Operation Engine is the entry point of the whole Kusion Engine and is responsible for Kusion basic operations like Preview, Apply, Destroy, etc. The main workflow of this part is to parse resources described in Konfig, figure out which resource should be modified according to the specified operation type, and execute this operation to the real infra resources. During this workflow, Runtimes and State will be involved. - -## Runtimes - -Runtime is an interface between the actual infrastructure and Kusion. All operations attempting to manipulate an infra resource should be delegated to one Runtime to make this operation affect the actual infrastructure. On the other hand, any runtime that implements this interface can be manipulated by Kusion. - -## State -State is a record of an operation's result. It is a mapping between resources in Konfig and the actual infra resource. State is often used as a data source for 3-way merge/diff in operations like Apply or Preview. - -State can be stored in many storage mediums like filesystems, OSS, HTTP servers, etc. - -## How Kusion works -Let's get operation `Preview` as an example to demonstrate how the three parts cooperate in an actual operation. - - 1. `Operation Engine` parses resources in Konfig and converts them into a DAG - 2. Walk this DAG: - 1. Get the latest `State` from the actual infra by the `Runtime` - 2. Get the last operation `State` from the `State` storage medium - 3. Merge/Diff three states: desired state described in Konfig, live state from `Runtime` and prior state from `State` storage medium, and return the diff result to the console \ No newline at end of file diff --git a/docs/user_docs/getting-started/_category_.json b/docs/user_docs/getting-started/_category_.json deleted file mode 100644 index 1bf165d4..00000000 --- a/docs/user_docs/getting-started/_category_.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "label": "Get Started", - "position": 1 -} diff --git a/docs/user_docs/getting-started/_konfig.md b/docs/user_docs/getting-started/_konfig.md deleted file mode 100644 index 5209a51d..00000000 --- a/docs/user_docs/getting-started/_konfig.md +++ /dev/null @@ -1,60 +0,0 @@ ---- -sidebar_position: 9 ---- - -# Kusion 模型库 - -**Kusion 模型库**也叫做 `Kusion Model`,是 KusionStack 中预置的、用 KCL 描述的配置模型,它提供给用户开箱即用、高度抽象的配置接口,模型库最初朴素的出发点就是改善 YAML 用户的效率和体验,我们希望通过将代码更繁杂的模型抽象封装到统一的模型中,从而简化用户侧配置代码的编写。 - -Konfig 仓库地址:https://github.com/KusionStack/konfig - -![](/img/docs/user_docs/getting-started/konfig-arch-01.png) - -## 1. 目录结构 - -先克隆 Kusion 模型库:`git clone git@github.com:KusionStack/Konfig.git`。 - -Konfig 配置大库整体结构如下: - -```bash -. -├── Makefile # 通过 Makefile 封装常用命令 -├── README.md # 配置大库说明 -├── appops # 应用运维目录,用来放置所有应用的 KCL 运维配置 -│ ├── guestbook-frontend -│ ├── http-echo -│ └── nginx-example -├── base # Kusion Model 模型库 -│ ├── examples # Kusion Model 样例代码 -│ │ ├── monitoring # 监控配置样例 -│ │ ├── native # Kubernetes 资源配置样例 -│ │ └── server # 云原生应用运维配置模型样例 -│ └── pkg -│ ├── kusion_kubernetes # Kubernetes 底层模型库 -│ ├── kusion_models # 核心模型库 -│ └── kusion_prometheus # Prometheus 底层模型库 -├── hack # 放置一些脚本 -└── kcl.mod # 大库配置文件,通常用来标识大库根目录位置以及大库所需依赖 -``` - -## 2. 测试 Konfig 代码 - -在安装完成 Kusion 工具之后,在 Konfig 根目录执行 `make check-all` 测试大库全部应用(参考 [Konfig](/docs/user_docs/concepts/konfig)),或者执行 `make check-http-echo` 测试 `appops/http-echo` 应用。 - -如果需要单独测试 `appops/http-echo` 应用的 dev 版本,可以进入 `appops/http-echo/dev` 目录执行 `kusion compile` 命令(或者通过更底层的 `kcl -Y kcl.yaml ./ci-test/settings.yaml` 命令),输出的文件在 `appops/http-echo/dev/ci-test/stdout.golden.yaml` 文件。 - -## 3. 添加应用 - -在 [快速开始/Usecase](/docs/user_docs/getting-started/usecase) 我们已经展示如何快速添加一个应用(参考 [Konfig](/docs/user_docs/concepts/konfig))。 - -## 4. Konfig 架构图 - -之所以用一个大的仓库管理全部的 IaC 配置代码,是由于不同代码包的研发主体不同,会引发出包管理和版本管理的问题,从而导致平台侧需要支持类似编译平台的能力。采用大库模式下,业务配置代码、基础配置代码在一个大库中,因此代码间的版本依赖管理比较简单,平台侧处理也比较简单,定位唯一代码库的目录及文件即可,代码互通,统一管理,便于查找、修改、维护(大库模式也是 Google 等头部互联网公司内部实践的模式)。 - -下面是 Konfig 的架构图: - -![](/img/docs/user_docs/getting-started/konfig-arch-01.png) - -核心模型内部通过前端模型和后端模型两层抽象简化前端用户的配置代码,底层模型则是通过 [KCL OpenAPI](/docs/reference/cli/openapi) 工具自动生成。 - -模型的更详细文档可参考 [参考手册/Kusion 模型库](/docs/reference/model)。 diff --git a/docs/user_docs/getting-started/getting-started.md b/docs/user_docs/getting-started/getting-started.md deleted file mode 100644 index 1a60bd23..00000000 --- a/docs/user_docs/getting-started/getting-started.md +++ /dev/null @@ -1,5 +0,0 @@ -# Get Started - -Get started include a quick overview of the KCL configuration language, the Kusion model library and how to deploy CodeCity examples with KusionStack. - - diff --git a/docs/user_docs/getting-started/install/_category_.json b/docs/user_docs/getting-started/install/_category_.json deleted file mode 100644 index beb4e74f..00000000 --- a/docs/user_docs/getting-started/install/_category_.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "label": "Download & Install", - "position": 1 -} diff --git a/docs/user_docs/getting-started/install/docker.md b/docs/user_docs/getting-started/install/docker.md deleted file mode 100644 index 295371ca..00000000 --- a/docs/user_docs/getting-started/install/docker.md +++ /dev/null @@ -1,65 +0,0 @@ ---- -sidebar_position: 2 ---- - -# Docker - -If the environment do not supported, you can choose the Docker version of KusionStack. First install the [Docker](https://www.docker.com/) environment and start the Docker service. Then use the `docker info` command to verify that the Docker service has been started succeed. - -KusionStack image: https://hub.docker.com/r/kusionstack/kusion - -## 1. Latest Version - -Pull the latest version with the following command: - -```shell -$ docker pull kusionstack/kusion -Using default tag: latest -latest: Pulling from kusion/kusion -... -kusionstack/kusion:latest -$ -``` - -Then use the following command to check the KCL version: - -```shell -$ docker run --rm -it kusionstack/kusion kcl --version -kclvm version is 0.4.1; checksum: *** -$ -``` - -## 2. Custom Version - -Check the list of image [versions](https://hub.docker.com/r/kusionstack/kusion/tags) at first, pull the latest image of kusion with the following command (the Kusion image include the KCL command tools): - -```shell -$ docker pull kusionstack/kusion -... -``` - -Then use the following command to check the KCL version: - -```shell -$ docker run --rm -it kusionstack/kusion:v0.4.1 kcl --version -kclvm version is 0.4.1 -$ -``` - -## 3. Run KCL - -If you want to verify the execution of the KCL program, you can first create a `hello.k` file with the following content: - -```python -hello = "world" -``` - -Then execute the `hello.k` file with the following command: - -```shell -$ docker run --rm -it -v `pwd`:/root/hello.k kusionstack/kusion kcl /root/hello.k -hello: world -$ -``` - -The output this the YAML format data, content is `hello: world`. diff --git a/docs/user_docs/getting-started/install/index.md b/docs/user_docs/getting-started/install/index.md deleted file mode 100644 index 7121adab..00000000 --- a/docs/user_docs/getting-started/install/index.md +++ /dev/null @@ -1,3 +0,0 @@ -# Download & Install - -KusionStack is installed and managed by the kusionup tool, or use KusionStack in Docker. diff --git a/docs/user_docs/getting-started/install/kusionup.md b/docs/user_docs/getting-started/install/kusionup.md deleted file mode 100644 index ab94e971..00000000 --- a/docs/user_docs/getting-started/install/kusionup.md +++ /dev/null @@ -1,212 +0,0 @@ ---- -sidebar_position: 1 ---- - -# Install by Kusionup - -It is recommended to install `kusion` through the `kusionup` tool, which is an elegant tool for managing multiple versions of `kusion`, you can use it to: - -- Install any version of `kusion` with one click -- Flexibly switch between different versions of `kusion` -- Customize your local `kusion` version - -## 1. Install Kusionup and Latest Kusion - -The following script will install the `kusionup` tool and the latest version of `kusion` tools with one-click: - -```bash -brew install KusionStack/tap/kusionup && kusionup init --skip-prompt && source $HOME/.kusionup/env -``` - -And you can later upgrade `kusionup` with `brew`: - -```bash -brew update -brew upgrade KusionStack/tap/kusionup -``` - -The installation script creates a `$HOME/.kusionup` directory and contains these files: - -- `$HOME/.kusionup/bin` is a directory that contains the `kusionup` binary file -- `$HOME/.kusionup/env` is a file that declares environment variables used by `kusionup` and `kusion` tools -- `$HOME/.kusionup/current` is a soft link to the currently active `kusion` tools -- `$HOME/.kusionup/$VERSION` are directories of different versions of `kusion` tools. For example, the latest version will be installed by default to the `$HOME/.kusionup/latest` directory - -**💡 Install the custom version**: - -The above installation script by default installs the latest `kusion` tools. And if you want to customize it and install a specific version(github@v0.7.0 as an example) directly, please try the following command: - -```bash -brew install KusionStack/tap/kusionup && kusionup init --skip-install && source $HOME/.kusionup/env && kusionup reinstall github@v0.7.0 -``` - -**💡 Installation failure troubleshooting: - - -**❓ Issue 1**:M1 Mac openssl `dylib` library cannot be found or SSL module is not available - -1. Make sure you have arm64e-version homebrew installed at `/opt/homebrew`. If not, you can first install it: - -```bash -/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" -# add to path environment -export PATH=/opt/homebrew/bin:$PATH -``` - -2. Install `openssl@1.1` with `brew` - -```bash -brew install openssl@1.1 -``` - -**❓ Issue 2**: mac KCLVM `gettext` dylib cannot be found - -![image.png](https://intranetproxy.alipay.com/skylark/lark/0/2022/png/317257/1646538731635-b1e290a5-465d-4838-b8d1-7f22cb48e267.png#clientId=uc50abf48-5ee8-4&crop=0&crop=0&crop=1&crop=1&from=paste&height=200&id=ub5ce78d1&margin=%5Bobject%20Object%5D&name=image.png&originHeight=400&originWidth=1158&originalType=binary&ratio=1&rotation=0&showTitle=false&size=238920&status=done&style=none&taskId=ue75303e6-140d-450f-84de-464da45a473&title=&width=579) - -- Use the `which` command to find the location of your own `gettext` (assuming `/Users/UserName/tools/homebrew/bin/gettext`) - -```python -which gettext -``` - -- Use the `otool -L` command to get the location of `libintl.8.dylib` - -```python -C02Y90Q4JHD2:bin yueyi$ otool -L /Users/yueyi/tools/homebrew/bin/gettext -/Users/yueyi/tools/homebrew/bin/gettext: - /System/Library/Frameworks/CoreFoundation.framework/Versions/A/CoreFoundation (compatibility version 150.0.0, current version 1675.129.0) - /Users/yueyi/tools/homebrew/Cellar/gettext/0.21/lib/libintl.8.dylib (compatibility version 11.0.0, current version 11.0.0) - /usr/lib/libiconv.2.dylib (compatibility version 7.0.0, current version 7.0.0) - /usr/lib/libSystem.B.dylib (compatibility version 1.0.0, current version 1281.100.1) -``` - -- Copy `/Users/yueyi/tools/homebrew/Cellar/gettext/0.21/lib/libintl.8.dylib` to `/usr/local/opt/gettext/lib/libintl.8.dylib` - -## 2. Manage Multiple Versions of Kusion - -During the `kusionup` installation the default version of `kusion` is installed. Then you can check and navigate through all the installed versions of `kusion`: - -```bash -$ kusionup -Use the arrow keys to navigate: ↓ ↑ → ← -? Select a version: - ▸ latest -``` - -Also, you can list all the available versions of `kusion`: - -```bash -$ kusionup ls-ver -cdn@latest -cdn@v0.4.2 -cdn@v0.4.1 -github@latest -github@v0.4.2 -github@v0.4.1 -``` - -To install a specific version of ``kusion`, you can use `kusionup install $VERSION`: - -```shell -# choose the cdn to speed up installation -$ kusionup install cdn@latest -Downloaded 0.0% ( 2426 / 139988826 bytes) ... -Downloaded 11.4% ( 16003466 / 139988826 bytes) ... -Downloaded 21.0% ( 29433014 / 139988826 bytes) ... -Downloaded 32.2% ( 45077686 / 139988826 bytes) ... -Downloaded 41.9% ( 58642898 / 139988826 bytes) ... -Downloaded 51.2% ( 71647010 / 139988826 bytes) ... -Downloaded 61.6% ( 86258486 / 139988826 bytes) ... -Downloaded 71.2% ( 99667706 / 139988826 bytes) ... -Downloaded 81.5% (114078806 / 139988826 bytes) ... -Downloaded 91.5% (128134166 / 139988826 bytes) ... -Downloaded 100.0% (139988826 / 139988826 bytes) -INFO[0055] Unpacking /root/.kusionup/kusion@latest/kusion-linux.tgz ... -INFO[0061] Success: latest downloaded in /root/.kusionup/kusion@latest -INFO[0061] Default Kusion is set to 'latest' - -$ kusion version -releaseVersion: v0.4.1 -...... -``` - -Use `kusionup show` command to take a view of all the installed versions and spot the active version: - -```bash -$ kusionup show -| VERSION | ACTIVE | -|---------------|--------| -| cdn@latest | * | -| cdn@v0.4.1 | | -``` - -You can also `kusionup remove $VERSION` to uninstall a specific version of `kusion`: - -```bash -# try remove the latest version -$ kusionup remove latest -INFO[0000] Removing latest - -$ kusionup -Use the arrow keys to navigate: ↓ ↑ → ← -? Select a version: - ▸ cdn@v0.4.1 # there is no latest version -``` - -## 3. Kusionup Command Usage Reference - -```bash -$ kusionup -h -The Kusion installer - -Usage: - kusionup [flags] - kusionup [command] - -Available Commands: - default Set the default Kusion version - help Help about any command - install Install Kusion with a version - ls-ver List Kusion versions to install - remove Remove Kusion with a version - show Show installed Kusion - version Show kusionup version - -Flags: - -h, --help help for kusionup - -v, --verbose Verbose - -Use "kusionup [command] --help" for more information about a command. -``` - -## 4. Kusionup Tips - -**How to add a custom version of `kusion` to the `kusionup` toggle list?** - -You might need to add a local version of `kusion` for debugging and this can be done by following commands: - -```bash -# place your debug version of kusion tools to the kusion-debug directory -$ cp -r $HOME/.kusionup/kusion-debug - -# switch to the debug version -$ kusionup -Use the arrow keys to navigate: ↓ ↑ → ← -? Select a version: - cdn@latest - ▸ debug -``` - -**Note:** the subdirectories under the `.kusionup` must be named with a "kusion-" prefix and with a version number as a suffix - -## 5. Install the VS Code Extension - -To improve the KCL development on VS Code, there are VS Code - extensions for both VS Code Web IDE and VS Code. - -The [VS Code Web IDE](https://vscode.dev) can be reached through the browser, and you can search and install the [KCL for vscode.dev](https://marketplace.visualstudio.com/items?itemName=kcl.kcl-vscode-web-extension) in the VS Code Extension tab. And here's the syntax highlighting view you'll get: - -![](/img/docs/user_docs/getting-started/install/ide-vscode.png) - -The KCL extension for the local VS Code IDE provides more rich language support for the KCL language such as highlighting, auto-completion, quick info hover and code navigation, etc. Although the extension is not a must-required part of Kusion, it is recommended to install it to improve coding efficiency. - diff --git a/docs/user_docs/getting-started/kcl.md b/docs/user_docs/getting-started/kcl.md deleted file mode 100644 index 437cb672..00000000 --- a/docs/user_docs/getting-started/kcl.md +++ /dev/null @@ -1,152 +0,0 @@ ---- -sidebar_position: 3 ---- - -# KCL Quick Start - -KCL (Kusion Configuration Language) is Kusion's built-in cloud-native domain configuration and policy language. At the beginning of its design, KCL was inspired by Python3, and at the same time absorbed the conceptual design of declarative and OOP programming paradigms. In this section we will quickly demonstrate the basic features of the KCL language. - -## 1. Hello KCL - -The best way to learn a new language is to write a few small programs, and the same goes for configuring languages. We can write KCL programs just like writing configuration. - -Here is a simple `hello.k`: - -```python -hello = "KCL" -``` - -Set the `hello` attribute to the `"KCL"` string. Then save the code to the `hello.k` file. - -How to execute this program depends on the specific development environment, we first assume that the local macOS or Linux system has installed the `kcl` command (or enter the **Docker** environment test by `docker run --rm -it kusionstack/kusion bash`) and then run the following command: - -```shell -$ kcl hello.k -hello: KCL -``` - -The effect of command line execution is shown as follows: - -![](/img/docs/user_docs/getting-started/hello.gif) - -The output is configuration data in YAML format. Although this program is simple, we can verify the basic usage of the development environment and the `kcl` command line by executing the KCL configuration program to the output. - -## 2. A little more complicated configuration - -In addition to the common key-value pairs, common configuration data also has nested dictionary and list types, and the value basic type includes boolean and numeric types in addition to strings. Here's a slightly more complex `server.k` configuration: - -```python -# This is a KCL document - -title = "KCL Example" - -owner = { - name = "The KCL Authors" - data = "2020-01-02T03:04:05" -} - -database = { - enabled = True - ports = [8000, 8001, 8002] - data = [["delta", "phi"], [3.14]] - temp_targets = {cpu = 79.5, case = 72.0} -} - -servers = [ - {ip = "10.0.0.1", role = "frontend"} - {ip = "10.0.0.2", role = "backend"} -] -``` - -where `#` begins with a line comment. The value of `owner` is a dictionary. The value of the dictionary contains the content in the form of `{}`. The key-value inside the dictionary is similar to the `hello = "KCL"` example. `database` is another dictionary in which the value of the dictionary attribute appears boolean `True`, list `[]` and dictionary `{}`, in which the value of the numeric type also appears in the list and dictionary. The `servers` attribute is a list with dictionaries nested inside the list (dictionaries and lists, as well as the `schema` that will be discussed later, can be nested within each other). - -The YAML output of this configuration is as follows: - -```yaml -$ kcl server.k -title: KCL Example -owner: - name: The KCL Authors - data: '2020-01-02T03:04:05' -database: - enabled: true - ports: - - 8000 - - 8001 - - 8002 - data: - - - delta - - phi - - - 3.14 - temp_targets: - cpu: 79.5 - case: 72.0 -servers: -- ip: 10.0.0.1 - role: frontend -- ip: 10.0.0.2 - role: backend -``` - -## 3. Define the structure of the configuration using KCL schema - -The KCL provides abstract support for attributes with a fixed attribute structure and default value behavior through the `schema` syntax. - -For example, the configuration of `database` in the above example is generally the default value. We can define a structure for the default configuration of the database: - -```python -schema DatabaseConfig: - enabled: bool = True - ports: [int] = [8000, 8001, 8002] - data: [[str|float]] = [["delta", "phi"], [3.14]] - temp_targets: {str: float} = {cpu = 79.5, case = 72.0} -``` - -`enabled` is a boolean type; `ports` is an integer list type; `data` is a list of lists, and the inner list elements are strings or floats; `temp_targets` is a dictionary type, and the attribute value of the dictionary is floating point type. And each attribute of `DatabaseConfig` defines a default value. - -Then pass `database = DatabaseConfig {}` to generate a structure with the same attributes as the default values. We can also modify the default value: - -```python -database = DatabaseConfig { - ports = [2020, 2021] -} -``` - -`schema DatabaseConfig` not only provides default values for attributes, but also adds type information to attributes. Therefore, if we accidentally writes the wrong attribute value type, KCL will give a friendly error prompt, such as the following example where `ports` is wrongly written as a floating point type: - -```python -database = DatabaseConfig { - ports = [1.2, 1.3] -} -``` - -When executed, an error similar to the following will be generated (the displayed file path depends on the local environment): - -```shell -$ kcl server.k -KCL Compile Error[E2G22] : The type got is inconsistent with the type expected - ---> File /path/to/server.k:8:2 - 8 | ports = [1.2, 1.3] - 5 ^ -> got [float(1.2)|float(1.3)] - ---> File /path/to/server.k:3:2 - 3 | ports: [int] = [8000, 8001, 8002] - 5 ~ -> expect [int] -expect [int], got [float(1.2)|float(1.3)] -``` - -Similarly we can encapsulate the attributes of the `servers` section with the following code: - -```python -schema ServerConfig: - ip: str - role: "frontend" | "backend" - -servers = [ - ServerConfig {ip = "10.0.0.1", role = "frontend"} - ServerConfig {ip = "10.0.0.2", role = "backend"} -] -``` - -The attribute `ip` of `ServerConfig` is a string type, and no default value is given. We must manually add the value of the `ip` attribute when generating the `ServerConfig` type attribute, otherwise the KCL will report a missing required attribute error. The `role` attribute is a `"frontend" | "backend"` enumerated string type. - -In addition, `schema` can also combine `check`, `mixin`, optional attributes, inheritance and extension modules to achieve more complex configuration and policy data abstraction, details can be found at [here](../../reference/lang/lang/tour.md). diff --git a/docs/user_docs/getting-started/usecase.md b/docs/user_docs/getting-started/usecase.md deleted file mode 100644 index b6e18d0f..00000000 --- a/docs/user_docs/getting-started/usecase.md +++ /dev/null @@ -1,112 +0,0 @@ ---- -sidebar_position: 2 ---- - -# Use Cases -This tutorial will demonstrate how to deliver an App with a Loadbalancer in one Kusion command. - -## Prerequisites - -- [Kusion](/docs/user_docs/getting-started/install) -- [Kubernetes](https://kubernetes.io/) or [Kind](https://kind.sigs.k8s.io/) - -## Init Project - -Firstly, let's clone the Konfig repo and enter the root directory: - -```shell -git clone git@github.com:KusionStack/konfig.git && cd konfig -``` - -After this step, we can init this tutorial project with online templates -```shell -kusion init --online -``` - -All init templates are listed as follows: - -```shell -➜ konfig git:(main) ✗ kusion init --online -? Please choose a template: [Use arrows to move, type to filter] -> code-city Code City metaphor for visualizing Go source code in 3D. - deployment-multi-stack A minimal kusion project of multi stacks - deployment-single-stack A minimal kusion project of single stack -``` - -Select `code-city` and press `Enter`. After that, we will see hints below and use the default value to config this project and stack. - -![](/img/docs/user_docs/getting-started/choose-template.gif) - - -After this process, we can get the whole file hierarchy with this command -```shell -cd code-city && tree -``` - -```shell -➜ konfig git:(main) ✗ cd code-city && tree -. -├── base -│   └── base.k -├── dev -│   ├── ci-test -│   │   └── settings.yaml -│   ├── kcl.yaml -│   ├── main.k -│   └── stack.yaml -└── project.yaml - -3 directories, 6 files -``` - More details about the directory structure can be found in -[Konfig](/docs/user_docs/concepts/konfig). - -### Review Config Files - -```python -# main.k -import base.pkg.kusion_models.kube.frontend - -# The application configuration in stack will overwrite -# the configuration with the same attribute in base. -appConfiguration: frontend.Server { - image = "howieyuen/gocity:latest" -} -``` -`main.k` only contains 4 lines. Line 1 imports a pkg that contains the model `Server` which is an abstract model representing the App we will deliver later. This model hides the complexity of Kubernetes `Deployment` and `Service` and only one field `image` is needed to make this App ready to use. - -More details about Konfig Models can be found in [Konfig](https://github.com/KusionStack/konfig) - -## Delivery -```shell -cd dev && kusion apply --watch -``` -Go to the `dev` folder and we will deliver this App into a Kubernetes cluster with one command `kusion apply --watch` - -![](/img/docs/user_docs/getting-started/apply.gif) - -Check `Deploy` status -```shell -kubectl -ncode-city get deploy -``` -The expected output is shown as follows: - -```shell -➜ dev git:(main) ✗ kubectl -ncode-city get deploy -NAME READY UP-TO-DATE AVAILABLE AGE -code-citydev 1/1 1 1 1m -``` - -Port-forward our App with the `service` -```shell -kubectl port-forward -ncode-city svc/gocity 4000:4000 -``` -```shell -➜ dev git:(main) ✗ kubectl port-forward -ncode-city svc/gocity 4000:4000 -Forwarding from 127.0.0.1:4000 -> 4000 -Forwarding from [::1]:4000 -> 4000 -``` - -Visit [http://localhost:4000/#/github.com/KusionStack/kusion](http://localhost:4000/#/github.com/KusionStack/kusion) in your browser and enjoy. - -![](/img/docs/user_docs/getting-started/gocity.png) diff --git a/docs/user_docs/guides/adopting/from_kubernetes.md b/docs/user_docs/guides/adopting/from_kubernetes.md deleted file mode 100644 index b1bb89bb..00000000 --- a/docs/user_docs/guides/adopting/from_kubernetes.md +++ /dev/null @@ -1,106 +0,0 @@ -# From Kubernetes - -## 1. Kubernetes OpenAPI Spec - -Starting from Kubernetes 1.4, the alpha support for the OpenAPI specification (known as Swagger 2.0 before it was donated to the OpenAPI Initiative) was introduced, and the API descriptions follow the [OpenAPI Spec 2.0](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/2.0.md). And since Kubernetes 1.5, Kubernetes supports [directly extracting models from source code and then generating the OpenAPI spec file](https://github.com/kubernetes/kube-openapi) to automatically keep the specifications and documents up to date with the operation and models. - -In addition, Kubernetes CRD uses [OpenAPI V3.0] validation](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#validation) to describe a custom schema (in addition to the built-in attributes apiVersion, Kind, and metadata), that APIServer uses to validate the CR during the resource creation and update phases. - -## 2. KCL OpenAPI Support - -The `kcl-openapi` tool supports extracting and generating KCL schemas from Kubernetes OpenAPI/CRD. the [KCLOpenapi Spec](/docs/reference/cli/openapi/spec) defines the mapping between the OpenAPI specification and the KCL language features. - -The `kcl-openapi` tool will be installed by default when installing [Kusion tools pack](/docs/user_docs/getting-started/install). For a quick start with the tool, see [KCL OpenAPI tool](/docs/reference/cli/openapi) - -## 3. Migrate From Kubernetes To Kusion - -The entirely OpenAPI definition of the Kubernetes built-in model is stored in the [Kubernetes OpenAPI-Spec File](https://github.com/kubernetes/kubernetes/blob/master/api/openapi-spec/swagger.json). Taking this file as input, the KCLOpenapi tool can generate all model schemas of the corresponding version. In the following sections, we will introduce how to migrate from Kubernetes to Kusion with a deployment release scenario as an example. Assume that your project is using [Kubernetes Deployment] (https://kubernetes.io/docs/concepts/workloads/controllers/deployment/) to define Deployment configuration, Migrating to Kusion requires only the following steps: - -### 3.1 Write Config Based On The Kusion Models - -We provide an out-of-the-box `kusion_models` package for you to quickly start. It contains a well-designed frontend model called [`Server schema`](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_models/kube/frontend/server.k). You can declare their configurations by initializing the `Server schema`. For the description and usage of the schema and its attributes, please refer to the [Server schema documentation](https://kusionstack.io/docs/reference/model/kusion_models/kube/frontend/doc_server). - -And as you may have some inventory of Kubernetes configuration data, Kusion plans to provide a `kube2kcl` converting tool to translate them into KCL configuration instances of `Server schema`. This tool is under development. - -Once you have configured the model and migrated the data, you can continue your Kusion tour by maintaining and deploying the configurations and you can find guidelines in [Kubernetes - Use Kusion for Application Deployment and maintenance](/docs/user_docs/guides/working-with-k8s/). - -### 3.2 Build Your Custom Frontend Models - -The existing Kusion Models may not meet your specific business requirements, then you can also design your custom frontend model package. In Konfig's `kusion_kubernetes` directory, there's a copy of the generated Kubernetes 1.22 models and you can design your custom models based on it. And you can also develop your custom scripts to migrate your configuration data as what `kube2kcl` tool does. - -#### 3.2.1 Convert Kubernetes Deployment Into KCL Schema - -We already have a copy of [generated Kubernetes 1.22 models](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/apps/v1/deployment.k) under the `base/pkg/kusion_kubernetes` directory in the Konfig repository. You can skip this step and use the existing models, or you can generate other versions of that if needed. - -Now let's generate a v1.23 version of Kubernetes models. From [Kubernetes v1.23 OpenAPI Spec](https://github.com/kubernetes/kubernetes/blob/release-1.23/api/openapi-spec/swagger.json), we can find the definition of the `apps/v1.Deployment` model, and here is a partial excerpt: - -```json -{ - "definitions": { - "io.k8s.api.apps.v1.Deployment": { - "description": "Deployment enables declarative updates for Pods and ReplicaSets.", - "properties": { - "apiVersion": { - "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - "type": "string" - }, - "kind": { - "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - "type": "string" - }, - "metadata": { - "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", - "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" - }, - "spec": { - "$ref": "#/definitions/io.k8s.api.apps.v1.DeploymentSpec", - "description": "Specification of the desired behavior of the Deployment." - }, - "status": { - "$ref": "#/definitions/io.k8s.api.apps.v1.DeploymentStatus", - "description": "Most recently observed status of the Deployment." - } - }, - "type": "object", - "x-kubernetes-group-version-kind": [ - { - "group": "apps", - "kind": "Deployment", - "version": "v1" - } - ] - } - }, - "info": { - "title": "Kubernetes", - "version": "unversioned" - }, - "paths": {}, - "swagger": "2.0" -} -``` - -You can save the above spec as `deployment.json` and run `kcl-openapi generate model -f deployment.json`, and the KCL Schemas will be generated and output to your current workspace. Other Kubernetes models can also be saved in that spec file and can be generated similarly. - -#### 3.2.2 Design Custom Frontend Models - -Since the Kubernetes built-in models are atomistic and kind of complex to beginners, we recommend taking the native model of Kubernetes as the backend output model and designing a batch of frontend models which could become a more abstract, friendlier and simpler interface to the user. You can refer to the design pattern in the [`Server Schema in the Konfig repo`](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_models/kube/frontend/server.k). - -#### 3.2.3 Migrate The Configuration Data - - -You can develop your custom scripts to migrate your configuration data automatically. Kusion will later provide writing scaffolding and writing guidelines for this script. - -## 4. Migrate From Kubernetes CRD - -If you developed CRDs, you can generate the KCL version of the CRD schemas and declare CRs based on that. - -* Generate KCL Schema from CRD - - ``` - kcl-openapi generate model --crd --skip-validation -f - ``` - -* Define CR based on CRDs in KCL - - You can initialize the CRD schema to define a CR, or further, you can use the generated schema as a backend model and design a frontend interface for users to initialize. The practice is similar to what `Kusion Models` does on Kubernetes built-in models. diff --git a/docs/user_docs/guides/adopting/index.md b/docs/user_docs/guides/adopting/index.md deleted file mode 100644 index 94f85dcf..00000000 --- a/docs/user_docs/guides/adopting/index.md +++ /dev/null @@ -1,8 +0,0 @@ -# Adopting KusionStack - -If you start using kusion in a brand new project, what you only need to do is to write and manage your infrastructure configuration from scratch, and we provide user guide documents for infra running on different runtimes to guide you through this process. - -However, for infrastructure which has already been managed by Kubernetes, you may have some inventory of configuration models and data. In this case, kusion provides automated tools to help you migrate from Kubernetes quickly: - -- For Kubernetes users, Kusion provides a `kcl-openapi` tool to translate Kubernetes OpenAPI to KCL model code, so that the existing Kubernetes core models can be directly included in the KCL models scope. -- For Kubernetes CRD users such as istio, the `kcl-openapi` tool can also convert CRDs into KCL model code. diff --git a/docs/user_docs/guides/argocd/_category_.json b/docs/user_docs/guides/argocd/_category_.json deleted file mode 100644 index 17630407..00000000 --- a/docs/user_docs/guides/argocd/_category_.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "label": "ArgoCD", - "position": 7 -} diff --git a/docs/user_docs/guides/argocd/argocd.md b/docs/user_docs/guides/argocd/argocd.md deleted file mode 100644 index 5d83e218..00000000 --- a/docs/user_docs/guides/argocd/argocd.md +++ /dev/null @@ -1 +0,0 @@ -# ArgoCD diff --git a/docs/user_docs/guides/argocd/drift-detection-by-argocd.md b/docs/user_docs/guides/argocd/drift-detection-by-argocd.md deleted file mode 100644 index de89f849..00000000 --- a/docs/user_docs/guides/argocd/drift-detection-by-argocd.md +++ /dev/null @@ -1,148 +0,0 @@ -# Drift Detection by ArgoCD - -## Prerequisite - -Install ArgoCD: - -```bash -kubectl create namespace argocd -kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj/argo-cd/stable/manifests/install.yaml -``` - -## Config ArgoCD Plugin with Kusion - -ArgoCD has already had some common built-in plugins, including helm, jsonnet, and kustomize. -For KCL, as a brand-new configuration language, if you want to use ArgoCD to complete drift detection, -you need to follow its plugin mechanism and configure Kusion as a third-party plugin. -The specific operations are as follows: - -1. Download [patch](https://github.com/KusionStack/examples/blob/main/kusion/argo-cd/patch-argocd-cm.yaml) file: - -```shell -wget -q https://raw.githubusercontent.com/KusionStack/examples/main/kusion/argo-cd/patch-argocd-cm.yaml -``` - -2. Update configuration - -```shell -kubectl -n argocd patch cm/argocd-cm -p "$(cat patch-argocd-cm.yaml)" -``` - -## Update ArgoCD Deployment - -After completing the first step, ArgoCD will recognize the Kusion plugin, -but the Kusion plugin has not been loaded into the ArgoCD image. -To implement configuration drift detection, we have to tune the Deployment of argocd-repo-server. - -1. Download [patch](https://github.com/KusionStack/examples/blob/main/kusion/argo-cd/patch-argocd-repo-server.yaml) file - -```shell -wget -q https://raw.githubusercontent.com/KusionStack/examples/main/kusion/argo-cd/patch-argocd-repo-server.yaml -``` - -2. Update configuration - -```shell -kubectl -n argocd patch deploy/argocd-repo-server -p "$(cat patch-argocd-repo-server.yaml)" -``` - -3. Update complete - -```shell -kubectl get pod -n argocd -l app.kubernetes.io/name=argocd-repo-server -``` - -## Create KCL Project - -At this point, the preparation work has been completed, and now the verification process is started. -Here we use example projects from the open-source [Konfig](https://github.com/KusionStack/konfig) library. - -1. Enable local port forwarding - -```shell -kubectl port-forward svc/argocd-server -n argocd 8080:443 -``` - -2. Login to ArgoCD - -```shell -argocd login localhost:8080 -``` - -3. Create ArgoCD Application - -```shell -argocd app create guestbook-test \ ---repo https://github.com/KusionStack/konfig.git \ ---path appops/guestbook-frontend/prod \ ---dest-namespace default \ ---dest-server https://kubernetes.default.svc \ ---config-management-plugin kusion -``` - -:::info - -If you are using a private repository, you need to configure the private repository access with private key credentials before executing the create command. - -Please refer [Private Repositories](https://argo-cd.readthedocs.io/en/stable/user-guide/private-repositories/#ssh-private-key-credential) for more details. -::: - -After successfully creating, you can see the following output: - -``` -application 'guestbook-test' created -``` - -Through the ArgoCD UI, you can see that the created applications have not been synchronized yet. -Here, you can manually synchronize or set automatic synchronization. - -![](/img/docs/user_docs/guides/argocd/out-of-sync.jpg) - -4. Set synchronization policy (only `unsynced` resources): - -```shell -argocd app set guestbook-test --sync-option ApplyOutOfSyncOnly=true -``` - -:::info - -For more information on synchronization strategies, see [Sync Options](https://argo-cd.readthedocs.io/en/stable/user-guide/sync-options/) -::: - -Sync succeeded: - -![](/img/docs/user_docs/guides/argocd/synced.jpg) - -## Configure Drift Detection - -At this point, the ArgoCD monitoring KCL project has been completed, implement configuration drift detection and achieve result consistency. -Let's modify the mirror version of `guestbook-test` to implement configuration changes. - -1. Update image - -```diff - appConfiguration: frontend.Server { -- image = "gcr.io/google-samples/gb-frontend:v4" -+ image = "gcr.io/google-samples/gb-frontend:v5" - schedulingStrategy.resource = res_tpl.tiny - } -``` - -2. Compile Again - -```shell -kusion compile -w appops/guestbook-frontend/prod -``` - -3. Git commit and push - -```shell -git add . -git commit -m "mannual drifted config for appops/guestbook-frontend/prod" -git push origin main -``` - -4. Drift configuration auto-convergence - -![](/img/docs/user_docs/guides/argocd/reconcile-drifted-config.jpg) - diff --git a/docs/user_docs/guides/organizing-projects-stacks/_category_.json b/docs/user_docs/guides/organizing-projects-stacks/_category_.json deleted file mode 100644 index 4c2795ce..00000000 --- a/docs/user_docs/guides/organizing-projects-stacks/_category_.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "label": "Project Best Practices", - "position": 5 -} diff --git a/docs/user_docs/guides/organizing-projects-stacks/mapping.md b/docs/user_docs/guides/organizing-projects-stacks/mapping.md deleted file mode 100644 index d6cad498..00000000 --- a/docs/user_docs/guides/organizing-projects-stacks/mapping.md +++ /dev/null @@ -1,44 +0,0 @@ -# Mapping - -In [Project & stack](/docs/user_docs/concepts/konfig#project--stack) section, we have already introduced the basics of `Project` and `Stack`. -A configuration library consists of Project, and the further logical isolation under Project is Stack. Each Project contains one or more Stack. -Project and Stack can choose the mapping relationship based on user needs. -For example, users can map an application to a Project, or map an operation and maintenance scenario to a Project, -such as site construction and operation and maintenance. -In this section, several best practices for mapping relationships are detailed. - -## Cloud Native: Applications and Clusters - -In the application-centric operation and maintenance system, applications are the core object of DevOps operation and maintenance. -In cloud-native scenarios, applications are usually deployed in Kubernetes clusters, -So you can map `Project` to applications and `Stack` to clusters. -An application's configuration is distinct within different clusters, the differentiated configuration is stored in each stack directory, -and the common configuration of all clusters is stored in the base directory. - -| Concept | Mapping TO | -| ------- | ----------- | -| Project | Application | -| Stack | Cluster | - -## Single-tenancy: Applications and Environments - -An application usually needs to be deployed to multiple environments, such as dev, test, and prod. -In a single-tenant scenario, a recommended practice is to map `Project` to applications and `Stack` to environments. - -| Concept | Mapping To | -| ------- | ----------- | -| Project | Application | -| Stack | Environment | - -## Multi-tenancy: Applications and Environments - -In an application-centric operation and maintenance system in a multi-tenant scenario, -we recommend appending tenant information to the app's name. -The application name is unique among different tenants, that is, -`Project` is mapped to the unique application, `Project Name` is the unique name of all apps, -and `Stack` is mapped to the environment configuration of the unique app. - -| Concept | Mapping To | -| ------- | ----------- | -| Project | Application+Tenant | -| Stack | Environment | diff --git a/docs/user_docs/guides/organizing-projects-stacks/organizing-projects-stacks.md b/docs/user_docs/guides/organizing-projects-stacks/organizing-projects-stacks.md deleted file mode 100644 index f96a8e13..00000000 --- a/docs/user_docs/guides/organizing-projects-stacks/organizing-projects-stacks.md +++ /dev/null @@ -1 +0,0 @@ -# Project Best Practices diff --git a/docs/user_docs/guides/prometheus/_category_.json b/docs/user_docs/guides/prometheus/_category_.json deleted file mode 100644 index baac31dc..00000000 --- a/docs/user_docs/guides/prometheus/_category_.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "label": "Prometheus", - "position": 8 -} diff --git a/docs/user_docs/guides/prometheus/recording-and-alerting.md b/docs/user_docs/guides/prometheus/recording-and-alerting.md deleted file mode 100644 index 85a67ba6..00000000 --- a/docs/user_docs/guides/prometheus/recording-and-alerting.md +++ /dev/null @@ -1,470 +0,0 @@ ---- -sidebar_position: 1 ---- - -# Recording and Alerting - -This guide will show you how to set up an Alertmanager cluster integrating with a Prometheus instance based on Prometheus Operator, and use PromethuesRules to record metrics and push alerts. - -## Introduction - -Prometheus is an open-source system monitoring and alerting toolkit. It collects and stores its metrics as time series data, i.e. metrics information is stored with the timestamp at which it was recorded, alongside optional key-value pairs called labels. - -This diagram illustrates the architecture of Prometheus and some of its ecosystem components: - -![](/img/docs/user_docs/guides/prometheus/structure.png) - -Prometheus scrapes metrics from instrumented jobs, either directly or via an intermediary push gateway for short-lived jobs. It stores all scraped samples locally and runs rules over this data to either aggregate and record new time series from existing data or generate alerts. Grafana or other API consumers can be used to visualize the collected data. - -## Prerequisites - -To follow this guide, you need to complete the following steps: - -1、Install Kusion - -We recommend using the official installation tool _kusionup_ which supports multi-version management. -See [Download and Install](/docs/user_docs/getting-started/install) for more details. - -2、Clone Konfig repo - -In this guide, we need some KCL models that [Konfig](https://github.com/KusionStack/konfig.git) offers. -For more details on KCL language, please refer to [Tour of KCL](/docs/reference/lang/lang/tour). - -3、Running Kubernetes cluster - -There must be a running Kubernetes cluster and a [kubectl](https://Kubernetes.io/docs/tasks/tools/#kubectl) command line tool. -If you don't have a cluster yet, you can use [Minikube](https://minikube.sigs.k8s.io/docs/tutorials/multi_node/) to start one of your own. - -4、Install Prometheus Operator - -Install Prometheus Operator is quite simple, you only need to run: - -```bash -kubectl create -f bundle.yaml -``` - -For more details, please check [Prometheus Operator Quickstart](https://github.com/prometheus-operator/prometheus-operator#quickstart). - -## Setup - -There is a project named `prometheus-install` in Konfig mono repo, which contains the full configuration of setting up Prometheus and Alertmanager: - -- an Alertmanager cluster -- an AlertmanagerConfig object -- an Alertmanager Service -- a Prometheus cluster -- required RBAC -- a Prometheus Service - - -:::info -If you can't wait to experience one-click setup, please jump to the [One-click Setup](#one-click-setup) section. -::: - -### Setup Alertmanager - -By default, the Alertmanager instances will start with a minimal configuration which isn’t useful since it doesn’t send any notification when receiving alerts. - -You have 3 options to provide the [Alertmanager configuration](https://prometheus.io/docs/alerting/configuration/): - -1. You can use a native Alertmanager configuration file stored in a Kubernetes secret. -2. You can use `spec.alertmanagerConfiguration` to reference an AlertmanagerConfig object in the same namespace which defines the main Alertmanager configuration. -3. You can define `spec.alertmanagerConfigSelector` and `spec.alertmanagerConfigNamespaceSelector` to tell the operator which AlertmanagerConfigs objects should be selected and merged with the main Alertmanager configuration. - -:::tip -Option 2 is chosen in the [`prometheus-install`](https://github.com/KusionStack/konfig/tree/main/base/examples/monitoring/prometheus-install) project. -::: - -1. The following code snippet is AlertmanagerConfig, which will send notifications to a fictitious webhook service: - -```py -_alertmanager_config: monitoringv1alpha1.AlertmanagerConfig{ - metadata = { - name = "main" - namespace = _common_namespace - labels = { - "alertmanagerConfig" = "main" - } - } - spec = { - route = { - groupBy = ["job"] - groupWait = "30s" - groupInterval = "5m" - repeatInterval = "12h" - receiver = "webhook" - } - receivers = [ - { - name = "webhook" - webhookConfigs = [ - { - url = "http://example.com/" - } - ] - } - ] - } -} -``` - -2. Setting up an Alertmanager cluster with 3 replicas, reference the AlertmanagerConfig object: - -```py -_alertmanager: monitoringv1.Alertmanager{ - metadata = { - name = "main" - namespace = "default" - } - spec = { - replicas = 3 - # using AlertmanagerConfig for global configuration - alertmanagerConfiguration = { - name = _alertmanager_config.metadata.name - } - } -} -``` - -3. Expose Alertmanager service for integrating with Prometheus instances. -Creating a Kubernetes Service listening target port `9093`: - -```py -_alertmanager_svc: corev1.Service{ - metadata = { - name = "alertmanager" - namespace = _common_namespace - } - spec = { - selector = { - "alertmanager" = _alertmanager.metadata.name - } - ports = [ - { - name = "web" - port = 9093 - targetPort = "web" - } - { - name = "reloader-web" - port = 8080 - targetPort = "reloader-web" - } - ] - sessionAffinity = "ClientIP" - } -} -``` - -:::tip -For complete configuration, please check source code file: [`prometheus-install/prod/main.k`](https://github.com/KusionStack/konfig/blob/main/base/examples/monitoring/prometheus-install/prod/main.k). -::: - -This Alertmanager cluster is now fully functional and highly available, but no alerts are fired against it. Because you have not set up Prometheus yet. - -### Setup Prometheus - -Before you set up Prometheus, you must first create the RBAC rules for the Prometheus service account beforehand. - -1. ClusterRole: - -```py -_prometheus_clusterrole: rbac.ClusterRole { - metadata = { - name = "prometheus" - namespace = "default" - } - rules = [ - { - apiGroups = [""] - resources = ["nodes", "nodes/metrics", "services", "endpoints", "pods"] - verbs = ["get", "list", "watch"] - } - { - apiGroups = [""] - resources = ["configmaps"] - verbs = ["get"] - } - { - apiGroups = ["networking.k8s.io"] - resources = ["ingresses"] - verbs = ["get", "list", "watch"] - } - { - nonResourceURLs = ["/metrics"] - verbs = ["get"] - } - ] -} -``` - -:::tip -For full configuration of RBAC rules,please check source code file: [`prometheus-install/base/base.k`](https://github.com/KusionStack/konfig/blob/main/base/examples/monitoring/prometheus-install/base/base.k). -::: - -2. Setting up a Prometheus object with 2 replicas which will send alerts to the Alertmanager cluster: - -```py -_prometheus: monitoringv1.Prometheus{ - metadata = { - name = "main" - namespace = "default" - } - spec = { - # specify service account, default sa has no permissions - serviceAccountName = "prometheus" - replicas = 2 - # ruleSelector is nil meaning that the operator picks up no rule - ruleSelector = { - matchLabels = { - "role" = "alert-rules" - "prometheus" = "main" - } - } - serviceMonitorSelector = { - matchLabels = { - "prometheus" = "main" - } - } - # intergating with alert manager by its service - alerting = { - alertmanagers = [ - { - name = _alertmanager_svc.metadata.name - namespace = _alertmanager_svc.metadata.namespace - port = _alertmanager_svc.spec.ports[0].name - } - ] - } - } -} -``` - -3. Lastly, for easy validation, expose the Prometheus admin API. -Creating a Kubernetes service listening target port `9090`: - -```py -_prometheus_svc: corev1.Service{ - metadata = { - name = "prometheus" - namespace = "default" - } - spec = { - selector = { - "prometheus" = _prometheus.metadata.name - } - ports = [ - { - name = "web" - port = 9090 - targetPort = "web" - } - { - name = "reloader-web" - port = 8080 - targetPort = "reloader-web" - } - ] - sessionAffinity = "ClientIP" - } -} -``` - -Prometheus Admin API allows access to delete series for a certain time range, clean up tombstones, capture snapshots, etc. -More information about the admin API can be found in [Prometheus official documentation](https://prometheus.io/docs/prometheus/latest/querying/api/#tsdb-admin-apis). - -:::tip -For complete configuration, please check source code file: [`prometheus-install/prod/main.k`](https://github.com/KusionStack/konfig/blob/main/base/examples/monitoring/prometheus-install/prod/main.k). -::: - -### One-click Setup - -Now you can deploy them with one click. Firstly, enter the stack dir of project `prometheus-install` in the konfig repo: - -```bash -cd konfig/base/examples/monitoring/prometheus-install/prod -``` - -Then, run `kusion apply`: - -```bash -kusion apply -``` - -The output is similar to: - -``` -✔︎ Compiling in stack prod... - -Stack: prod ID Action - * ├─ rbac.authorization.k8s.io/v1:ClusterRole:default:prometheus Create - * ├─ monitoring.coreos.com/v1:Alertmanager:default:main Create - * ├─ monitoring.coreos.com/v1alpha1:AlertmanagerConfig:default:main Create - * ├─ monitoring.coreos.com/v1:Prometheus:default:main Create - * ├─ rbac.authorization.k8s.io/v1:ClusterRoleBinding:default:prometheus Create - * ├─ v1:ServiceAccount:default:prometheus Create - * ├─ v1:Service:default:alertmanager Create - * └─ v1:Service:default:prometheus Create - -? Do you want to apply these diffs? [Use arrows to move, type to filter] - yes -> details - no -``` - -Choose `yes` to start deploying. After finishing deploying, run the following command, which will forward local port `30900` to k8s service port `9090`: - -```bash -kubectl port-forward svc/prometheus-example 30900:9090 -``` - -Now, you can open the Prometheus web interface, [http://127.0.0.1:30900](http://127.0.0.1:30900/), and go to the "Status > Runtime & Build Information" page and check that Prometheus has discovered 3 Alertmanager instances. - -![](/img/docs/user_docs/guides/prometheus/alertmanager.jpg) - -## PrometheusRule - -The PrometheusRule custom resource definition (CRD) declaratively defines desired Prometheus rules to be consumed by Prometheus instances, including alerting and recording rules. These rules are reconciled by the Operator and dynamically loaded without requiring any restart of Prometheus Rules. - -### Recording Rules - -Recording rules allow you to precompute frequently needed or computationally expensive expressions and save their result as a new set of time series. Querying the precomputed result will then often be much faster than executing the original expression every time it is needed. This is especially useful for dashboards, which need to query the same expression repeatedly every time they refresh. - -The following code snippet takes the node information as an example to the recording rules: - -```py -_sum_of_node_memory = """\ -sum( - node_memory_MemAvailable_bytes{job="node-exporter"} or - ( - node_memory_Buffers_bytes{job="node-exporter"} + - node_memory_Cached_bytes{job="node-exporter"} + - node_memory_MemFree_bytes{job="node-exporter"} + - node_memory_Slab_bytes{job="node-exporter"} - ) -) by (cluster) -""" - -_node_cpu = """\ -sum(rate(node_cpu_seconds_total{job="node-exporter",mode!="idle",mode!="iowait",mode!="steal"}[5m])) / -count(sum(node_cpu_seconds_total{job="node-exporter"}) by (cluster, instance, cpu)) -""" -``` - -`_sum_of_node_memory` records the sum of node available memory in bytes. - -`_node_cpu` calculates the average rate of increase of node CPU every 5 minutes. - -:::tip -For complete configuration, please check source code file: [`prometheus-rules/record/main.k`](https://github.com/KusionStack/konfig/blob/main/base/examples/monitoring/prometheus-rules/record/main.k). -::: - -Now, you can create the recording rule above. - -1、Enter the `record` directory of project `prometheus-rules`: - -```bash -cd konfig/base/examples/monitoring/prometheus-rules/record -``` - -2、Apply these rules: - -```bash -kusion apply --yes -``` - -3、Check the Prometheus instance has loaded these rules: - -```bash -kubectl port-forward svc/prometheus-example 30900:9090 -``` - -Now, you can open the Prometheus web interface, [http://127.0.0.1:30900](http://127.0.0.1:30900/), and go to the "Status > Rules" page and check that Prometheus has loaded `node.rules`: - -![](/img/docs/user_docs/guides/prometheus/node-rules.jpg) - -#### Further Reading - -If you want to see the generating line graph from the [Recording Rules](#recording-rules) section, you need to deploy a `node-exporter` server in the default namespace. - -:::info -How to install node-exporter? Please check here: [`node-exporter.yaml`](https://github.com/KusionStack/examples/blob/main/prometheus/node-exporter.yaml) -::: - -Then, you will see, the sum of node memory in bytes: - -![](/img/docs/user_docs/guides/prometheus/node-memory.jpg) - -and the average rate of increase of node CPU every 5 minutes: - -![](/img/docs/user_docs/guides/prometheus/node-cpu.jpg) - -### Alerting Rules - -Alerting rules allow you to define alert conditions based on Prometheus expression language expressions and to send notifications about firing alerts to an external service. Whenever the alert expression results in one or more vector elements at a given point in time, the alert counts as active for these elements' label sets. - -The following code snippet is an example of alerting rules: - -```py -_alerts: monitoringv1.PrometheusRule { - metadata = { - name = "alert" - namespace = "default" - labels: { - "prometheus": "main", - "role": "alert-rules", - } - } - spec = { - groups = [ - { - name = "alert.rules" - rules = [ - { - alert: "WebhookAlert" - # vector(s scalar) returns the scalar s as a vector with no labels. - expr: "vector(1)" - } - ] - } - ] - } -} -``` - -Using internal function `vector(1)` will always return a vector 1, which means always triggering an alert. - -:::tip -For complete configuration, please check source code file: [`prometheus-rules/alert/main.k`](https://github.com/KusionStack/konfig/blob/main/base/examples/monitoring/prometheus-rules/alert/main.k). -::: - -Now, you can apply the alerting rules: - -1、Enter the stack `alert` of project `prometheus-rules`: - -```bash -cd konfig/base/examples/monitoring/prometheus-rules/alert -``` - -2、Apply these rules: - -```bash -kusion apply --yes -``` - -3、Check the Prometheus instance has loaded these rules: - -Since you have already done the port forward step, you just need to refresh the "Status > Rules" page and check that Prometheus has loaded `alert.rules`: - -![](/img/docs/user_docs/guides/prometheus/alert-rules.jpg) - -4、Check the Alertmanager has received the alert successfully: - -```bash -kubectl port-forward svc/alertmanager-example 30903:9093 -``` - -Now, you can open the Alertmanager web interface, [http://127.0.0.1:30903](http://127.0.0.1:30903/) and see the example alert: - -![](/img/docs/user_docs/guides/prometheus/alert.jpg) - diff --git a/docs/user_docs/guides/sensitive-data-solution/_category_.json b/docs/user_docs/guides/sensitive-data-solution/_category_.json deleted file mode 100644 index 5cb5f54d..00000000 --- a/docs/user_docs/guides/sensitive-data-solution/_category_.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "label": "Manage Sensitive Information", - "position": 10 -} diff --git a/docs/user_docs/guides/sensitive-data-solution/index.md b/docs/user_docs/guides/sensitive-data-solution/index.md deleted file mode 100644 index 2ebe0160..00000000 --- a/docs/user_docs/guides/sensitive-data-solution/index.md +++ /dev/null @@ -1 +0,0 @@ -# Information Security \ No newline at end of file diff --git a/docs/user_docs/guides/sensitive-data-solution/vault-agent.md b/docs/user_docs/guides/sensitive-data-solution/vault-agent.md deleted file mode 100644 index 6b35be7f..00000000 --- a/docs/user_docs/guides/sensitive-data-solution/vault-agent.md +++ /dev/null @@ -1,318 +0,0 @@ -# Vault Agent - -This guide will show you that KCL/Kusion solves the secret management problem by integrating Vault. -We will pass the database username and password into the Pod, involving 3 Kubernetes resources: - -- Namespace -- Deployment -- ServiceAccount - -:::tip - -This guide requires you to have a basic understanding of Kubernetes. -If you are not familiar with the relevant concepts, please refer to the links below: -- [Learn Kubernetes Basics](https://Kubernetes.io/docs/tutorials/Kubernetes-basics/) -- [Namespace](https://Kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/) -- [Deployment](https://Kubernetes.io/docs/concepts/workloads/controllers/deployment/) -- [ServiceAccount](https://Kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) -::: - -## Prerequisites - -Before we start, we need to complete the following steps: - -1、Install Kusion - -We recommend using the official installation tool _kusionup_ which supports multi-version management. -See [Download and Install](/docs/user_docs/getting-started/install) for more details. - -2、Clone Konfig repo - -In this guide, we need some KCL models that [Konfig](https://github.com/KusionStack/konfig.git) offers. -For more details on KCL language, please refer to [Tour of KCL](/docs/reference/lang/lang/tour). - -3、Running Kubernetes cluster - -There must be a running Kubernetes cluster and a [kubectl](https://Kubernetes.io/docs/tasks/tools/#kubectl) command line tool. -If you don't have a cluster yet, you can use [Minikube](https://minikube.sigs.k8s.io/docs/tutorials/multi_node/) to start one of your own. - -4、Available Helm CLI - -The Helm tool is used to deploy the Vault Server and Agent Injector. -If you haven't installed Helm, please refer to [Install Helm](https://helm.sh/docs/intro/install/). - -## Install Vault - -We recommend deploying the vault server and agent on Kubernetes by _Helm Chart_. -[Helm](https://helm.sh/docs/helm/) is a package manager, -it can install and configure Vault and its related components in different modes. -Helm chart implements conditionalization and parameterization of templates. -These parameters can be set via command line arguments or defined in YAML files. - -1、Add HashiCorp helm repo: -```bash -helm repo add hashicorp https://helm.releases.hashicorp.com -``` - -2、Update to cache HashiCorp's latest version: -```bash -helm repo update -``` - -3、Install Vault server and agent, and start in development mode: -```bash -helm install vault hashicorp/vault --set "server.dev.enabled=true" -``` - -`server.dev.enabled=true` indicates that Vault is started in developer mode on a single pod. - -4、Check all pods in the default namespace: -```bash -kubectl get pod -``` - -The output is similar to: -``` -NAME READY STATUS RESTARTS AGE -vault-0 1/1 Running 0 2d1h -vault-agent-injector-58b6d499-k9x9r 1/1 Running 0 2d1h -``` - -Pod `vault-0` is the Vault server running in **dev** mode, -pod `vault-agent-injector-58b6d499-k9x9r` is an agent that injects data according to `metadata.annotations`. - -:::caution - -To simplify the demonstration, start the Vault server in **dev** mode. -In this mode, the vault server will automatically initialize and unseal. -**DO NOT** use it in a production environment. -::: - -## Configure Vault - -Vault stores secrets in its database, and users need to configure the relevant confidential data and enable Vault's Kubernetes authentication. - -### Create a Secret - -We must enable the k/v engine of Vault, and save the secret data(username and password of database) in it. -Then, in the [Create Annotated Pods](#create-annotated-pods) section, the database username and password will be injected into the pod. - -1、Start an interactive shell session on the `vault-0` pod: -```bash -kubectl exec -it vault-0 -- /bin/sh -``` - -2、Enable the k/v engine at the path `path=internal` -```bash -vault secrets enable -path=internal kv-v2 -``` - -The output is similar to: -```bash -Success! Enabled the kv-v2 secrets engine at: internal/ -``` - -:::tip - -For more detail on the k/v secrets engine, see [Static Secrets: Key/Value Secret](https://learn.hashicorp.com/tutorials/vault/static-secrets). -::: - -3、Create a secret at the path `internal/database/config` with username and password: -```bash -vault kv put internal/database/config username="db-readonly-username" password="db-secret-password" -``` - -The output is similar to: -``` -Key Value ---- ----- -created_time 2022-03-13T08:40:02.1133715Z -deletion_time n/a -destroyed false -version 1 -``` - -4、Verify that the secret is readable at the path `internal/database/config`: -```bash -vault kv get internal/database/config -``` - -The output is similar to: -``` -======= Metadata ======= -Key Value ---- ----- -created_time 2022-03-13T08:40:02.1133715Z -custom_metadata -deletion_time n/a -destroyed false -version 1 - -====== Data ====== -Key Value ---- ----- -password db-secret-password -username db-readonly-username -``` - -Now the confidential data is created, please don't exit the Pod. - -### Enable Kubernetes Authentication - -Vault provides a Kubernetes authentication method that enables clients to authenticate with a Kubernetes ServiceAccount Token. -The Kubernetes resources that access the secret and create the volume authenticate through this method through a `role`. - -1、Continue with the terminal in the previous step, and enable the Kubernetes authentication method: -```bash -vault auth enable kubernetes -``` - -The output is similar to: -``` -Success! Enabled Kubernetes auth method at: Kubernetes/ -``` - -2、Configure authentication rules, depending on the Kubernetes API address, ServiceAccount token, certificate, and the issuer of the Kubernetes ServiceAccount(required for Kubernetes 1.21+): -```bash -vault write auth/Kubernetes/config \ - Kubernetes_host="https://$Kubernetes_PORT_443_TCP_ADDR:443" \ - token_reviewer_jwt="$(cat /var/run/secrets/Kubernetes.io/serviceaccount/token)" \ - Kubernetes_ca_cert=@/var/run/secrets/Kubernetes.io/serviceaccount/ca.crt \ - issuer="https://Kubernetes.default.svc.cluster.local" -``` - -The output is similar to: -``` -Success! Data written to: auth/Kubernetes/config -``` - -When Kubernetes creates pods, mount `token_reviewer_jwt` and `Kubernetes_ca_cert` into them. -The environment variable `KUBERNETES_PORT_443_TCP_ADDR` references the internal network address of the Kubernetes host. - -3、Create a policy named `kcl-vault-agent-agent-policy`: -```bash -vault policy write kcl-vault-agent-agent-policy - < deletion_time: destroyed:false version:1] -``` - -You can see the unformatted database username and password, which are configured in the [Create a secret](#create-a-secret) section. - -#### Formatted Output - -Unformatted data is unreasonable and not read directly for applications. -Regarding formatting, Vault also provides some [template instructions](https://www.vaultproject.io/docs/agent/template). -In this example, you only need to uncomment the code of `main.k` and re-apply the configurations. - -The following shows commented code in `main.k`: -```py -podMetadata = apis.ObjectMeta { - annotations = { - "vault.hashicorp.com/agent-inject" = "true" - "vault.hashicorp.com/role" = "kcl-vault-agent-role" - "vault.hashicorp.com/agent-inject-secret-database-config.txt" = "internal/data/database/config" - "vault.hashicorp.com/agent-inject-status" = "update" - "vault.hashicorp.com/agent-inject-template-database-config.txt" = """\ -{{- with secret "internal/data/database/config" -}} -postgresql://{{ .Data.data.username }}:{{ .Data.data.password }}@postgres:5432/wizard -{{- end -}}""" -``` - -Apply again: -```bash -kusion apply --yes=true -``` - -Check the secret data after the `Deployment` rolling update is finished: -```bash -kubectl exec -n kcl-vault-agent \ - $(kubectl get pod -n kcl-vault-agent -l app=kcl-vault-agent-test -o jsonpath="{.items[0].metadata.name}") \ - --container kcl-vault-agent-test -- cat /vault/secrets/database-config.txt -``` - -The output is similar to: -``` -postgresql://db-readonly-username:db-secret-password@postgres:5432/wizard -``` -As you can see, the confidential data is injected successfully and the result is rendered in the format specified by annotation. - -At this point, we have completed the KCL/Kusion integration Vault agent to realize secret management. - -## What's Next - -- Learn about secret management with [Vault CSI Provider](/docs/user_docs/guides/sensitive-data-solution/vault-csi-provider) \ No newline at end of file diff --git a/docs/user_docs/guides/sensitive-data-solution/vault-csi-provider.md b/docs/user_docs/guides/sensitive-data-solution/vault-csi-provider.md deleted file mode 100644 index fbc54cf1..00000000 --- a/docs/user_docs/guides/sensitive-data-solution/vault-csi-provider.md +++ /dev/null @@ -1,301 +0,0 @@ -# Vault CSI Provider - -This guide will show you that KCL/Kusion solves the secret management problem by integrating Vault CSI Provider. -We will pass the database username and password into the Pod, involving 3 kubernetes built-in resources and 1 custom resource: - -- Namespace -- Deployment -- ServiceAccount -- SecretProviderClass - -:::tip - -This guide requires you to have a basic understanding of Kubernetes. -If you are not familiar with the relevant concepts, please refer to the links below: -- [Learn Kubernetes Basics](https://kubernetes.io/docs/tutorials/kubernetes-basics/) -- [Namespace](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/) -- [Deployment](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/) -- [ServiceAccount](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) -- [SecretProviderClass](https://secrets-store-csi-driver.sigs.k8s.io/concepts.html#custom-resource-definitions-crds) -::: - -## Prerequisites - -Before we start, we need to complete the following steps first: - -1、Install Kusion - -We recommend using the official installation tool _kusionup_ which supports multi-version management. -See [Download and Install](/docs/user_docs/getting-started/install) for more details. - -2、Clone Konfig repo - -In this guide, we need some KCL models that [Konfig](https://github.com/KusionStack/konfig.git) offers. -For more details on KCL language, please refer to [Tour of KCL](/docs/reference/lang/lang/tour). - -3、Running Kubernetes cluster - -There must be a running Kubernetes cluster and a [kubectl](https://Kubernetes.io/docs/tasks/tools/#kubectl) command line tool. -If you don't have a cluster yet, you can use [Minikube](https://minikube.sigs.k8s.io/docs/tutorials/multi_node/) to start one of your own. - -4、Available Helm CLI - -The Helm tool is used to deploy the Vault server and CSI driver. -If you haven't installed Helm, please refer to [Install Helm](https://helm.sh/docs/intro/install/). - -## Install Vault server and CSI driver - -We recommend deploying the Vault server and CSI driver on Kubernetes by _Helm Chart_. -[Helm](https://helm.sh/docs/helm/) is a package manager, -which can install and configure Vault and its related components in different modes. -Helm chart implements conditionalization and parameterization of templates. -These parameters can be set via command line arguments or defined in YAML files. - -### Install Vault server - -1、Add HashiCorp helm repository: -```bash -helm repo add hashicorp https://helm.releases.hashicorp.com -``` - -2、Update to cache HashiCorp's latest version: -```bash -helm repo update -``` - -3、Install Vault server, start in development mode, disable Injector and enable CSI: -```bash -helm install vault hashicorp/vault \ - --set "server.dev.enabled=true" \ - --set "injector.enabled=false" \ - --set "csi.enabled=true" -``` -`server.dev.enabled=true` indicates that Vault is started in developer mode on a single pod. -`injector.enabled=false` indicates that the Injector service is disabled; -`csi.enabled=true` Indicates that the Vault CSI Pod is enabled. -If you already have Vault installed, you can use the `helm upgrade` command to update Vault's deployment mode. - -4、Check all pods in the default namespace: -```bash -kubectl get pod -NAME READY STATUS RESTARTS AGE -vault-0 1/1 Running 0 17m -vault-csi-provider-456hl 1/1 Running 0 17m -``` - -Wait until the status of `vault-0` is `Running` and ready (`1/1`) before continuing with this guide. - -### Install CSI driver - -[Secrets Store CSI Driver](https://secrets-store-csi-driver.sigs.k8s.io/introduction.html) -`secrets-store.csi.k8s.io` allows Kubernetes to mount multiple secrets, keys, -and certs stored in enterprise-grade external secrets stores into their pods as a volume. -Once the volume is attached, the data in it is mounted into the container’s file system. - -:::tip - -The [Container Storage Interface (CSI)](https://github.com/container-storage-interface/spec/blob/master/spec.md) -is a standard for exposing arbitrary block and file storage systems -to containerized workloads on Container Orchestration Systems (COs) like Kubernetes. -Using CSI third-party storage providers can write and deploy plugins exposing new storage systems in Kubernetes -without ever having to touch the core Kubernetes code. -::: - -1、Add CSI driver helm repository: -```bash -helm repo add secrets-store-csi-driver https://kubernetes-sigs.github.io/secrets-store-csi-driver/charts -``` - -2、Install Kubernetes-Secrets-Store-CSI-Driver: -```bash -helm install csi secrets-store-csi-driver/secrets-store-csi-driver --namespace kube-system -``` - -3、Check CSI driver pods: -```bash -kubectl --namespace=kube-system get pods -l "app=secrets-store-csi-driver" -NAME READY STATUS RESTARTS AGE -csi-secrets-store-csi-driver-2wl2f 3/3 Running 0 2m -``` - -Wait until the status of pod `csi-secrets-store-csi-driver-2wl2f` is `Running` and is ready (`3/3`) before continuing with this guide. - -## Configure Vault - -Vault stores confidential data in its database, and users need to configure the relevant confidential data and enable Vault's Kubernetes authentication. - -### Create a secret - -In [Create a pod with a secret mounted](#create-a-pod-with-a-secret-mounted) section, -the volume mounted in Pod expects secret stored at path `secret/data/db-pass`. -When Vault is run in development a K/V secret engine is enabled at the path `/secret`. - -1、start an interactive shell session on the `vault-0` pod: -```bash -kubectl exec -it vault-0 -- /bin/sh -``` - -2、Create a secret at the path `secret/db-pass` with a password: -```bash -vault kv put secret/db-pass password="db-secret-password" -``` - -The output is similar to: -``` -Key Value ---- ----- -created_time 2022-03-17T07:45:06.3767973Z -custom_metadata -deletion_time n/a -destroyed false -version 1 -``` - -3、Verify that the secret is readable at the path `secret/db-pass`. -```bash -vault kv get secret/db-pass -``` - -The output is similar to: -``` -======= Metadata ======= -Key Value ---- ----- -created_time 2022-03-17T07:45:06.3767973Z -custom_metadata -deletion_time n/a -destroyed false -version 1 - -====== Data ====== -Key Value ---- ----- -password db-secret-password -``` -For now, the confidential data is created, please don't exit the vault pod immediately. - -### Enable Kubernetes authentication - -Vault provides a Kubernetes authentication method that enables clients to authenticate with a Kubernetes ServiceAccount Token. -The Kubernetes resources that access the secret and create the volume authenticate through this method through a `role`. - -1、Continue with the terminal in the previous step, and enable the Kubernetes authentication method: -```bash -vault auth enable kubernetes -``` - -The output is similar to: -``` -Success! Enabled kubernetes auth method at: kubernetes/ -``` - -2、Configure authentication rules, depending on the Kubernetes API address, ServiceAccount token, certificate, and the issuer of the Kubernetes ServiceAccount(required for Kubernetes 1.21+): -```bash -vault write auth/kubernetes/config \ - kubernetes_host="https://$KUBERNETES_PORT_443_TCP_ADDR:443" \ - token_reviewer_jwt="$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" \ - kubernetes_ca_cert=@/var/run/secrets/kubernetes.io/serviceaccount/ca.crt \ - issuer="https://kubernetes.default.svc.cluster.local" -``` - -The output is similar to: -``` -Success! Data written to: auth/kubernetes/config -``` - -When Kubernetes creates pods, mount `token_reviewer_jwt` and `Kubernetes_ca_cert` into them. -The environment variable `KUBERNETES_PORT_443_TCP_ADDR` references the internal network address of the Kubernetes host. - -3、Create a policy named `kcl-vault-csi-policy`: -```bash -vault policy write kcl-vault-csi-policy - <. -Press ^C at any time to quit. - -✔ project name: deployment-single-stack -✔ project description: A minimal kusion project of single stack -✔ Stack: dev -✔ ClusterName: kubernetes-dev -✔ Image: gcr.io/google-samples/gb-frontend:v4 -Created project 'deployment-single-stack' -``` - -Now, we have successfully initialized a KCL project `deployment-single-stack`, which contains a `dev` stack. -`project name` and `project description` are provided by KCL template, and another three fields require users to fill in. -`Stack` represents the name of a configuration set, which is used to isolate with other stacks. -`ClusterName` represents the cluster name, it will be recorded into `metadata.annotations`. -`Image` represents the image address of the app's main container. - -:::info - -See [Project&Stack](/user_docs/concepts/konfig.md) for more details about Project and Stack. -::: - -The directory structure is as follows: - -``` -deployment-single-stack -├── README.md -├── base -│ └── base.k -├── dev -│ ├── ci-test -│ │ └── settings.yaml -│ ├── kcl.yaml -│ ├── main.k -│ └── stack.yaml -├── kusion.yaml -└── project.yaml - -3 directories, 8 files -``` - -It can be seen that the project has three levels of directories, and each level has its design significance. - -First level: -- `project.yaml` represents project-level properties. -- `kusion.yaml` is the template configuration file, which is not relevant to the operation of this guide. - -Second level: -- `base` directory stores common configurations for all stacks. -- `dev` directory stores the customized configuration: - - `dev/kcl.yaml` stores static compilation configuration. - - `dev/main.k` stores specific configurations of `dev` stack. - - `dev/stack.yaml` stores stack information. - -Third level: -- `dev/ci-test` directory stores the dynamic compilation configuration and final output. - -By default, the compilation output goes to the `stdout.golden.yaml` file in this directory. -In general, the `.k` file is the KCL source code, and the `.yaml` is the configuration file. - -## Compiling - -At this point, the development of the project has been completed with the help of the built-in template provided by Kusion. -The programming language of the project is KCL, not JSON/YAML which Kubernetes recognizes, so it needs to be compiled to get the final output. - -Enter stack dir `deployment-single-stack/dev` and compile: - -```bash -cd deployment-single-stack/dev && kusion compile -``` - -The output is saved in the `deployment-single-stack/dev/ci-test/stdout.golden.yaml` file by default. - -:::tip - -For instructions on the kusion command line tool, execute `kusion -h`, or refer to the tool's online [documentation](/docs/reference/cli/kusionctl/overview)。 -::: - -## Applying - -完成编译,现在开始下发配置。通过查看 `stdout.golden.yaml` 文件,可以看到 3 个资源: -Compilation is completed, and now apply the configuration. At the `stdout.golden.yaml` file, you can see 3 resources: - -- a Deployment named `deployment-single-stackdev` -- a Namespace named `deployment-single-stack` -- a Service named `frontend-service` - -该文件的内容已经是 Kubernetes 能够识别的配置,可以使用 `kubectl apply -f stdout.golden.yaml` 直接下发配置, -也可以使用 `kusion apply` 完成配置编译并下发(该命令包含了配置编译)。 -The content of this file can be directly accepted by Kubernetes. -You can run `kusion apply` or `kubectl apply -f stdout.golden.yaml` to directly apply the configuration. - -:::tip - -It is recommended to use the Kusion CLI, the compilation output in this example is the complete YAML declaration, -but not all KCL project compilation results are the same. -::: - -Execute command: - -```bash -kusion apply -``` - -The output is similar to: - -``` -SUCCESS Compiling in stack dev... - -Stack: dev Provider Type Name Plan - * ├─ kubernetes v1:Namespace deployment-single-stack[0] Create - * ├─ kubernetes apps/v1:Deployment deployment-single-stackdev[0] Create - * └─ kubernetes v1:Service frontend-service[0] Create - -✔ yes -Start applying diffs...... - SUCCESS Creating Namespace/deployment-single-stack - SUCCESS Creating Deployment/deployment-single-stackdev - SUCCESS Creating Service/frontend-service -Creating Service/frontend-service [3/3] ███████████████████████████████████████████ 100% | 0s - -Apply complete! Resources: 3 created, 0 updated, 0 deleted. -``` - -After the configuration applying successfully, you can use the `kubectl` to check the actual status of these resources. - -1、 Check Namespace - -```bash -kubectl get ns -``` - -The output is similar to: - -``` -NAME STATUS AGE -argocd Active 59d -default Active 72d -deployment-single-stack Active 10m -``` - -2、Check Deployment - -```bash -kubectl get deploy -n deployment-single-stack -``` - -The output is similar to: - -``` -NAME READY UP-TO-DATE AVAILABLE AGE -deployment-single-stackdev 1/1 1 1 11m -``` - -3、Check Service - -```bash -kubectl get svc -n deployment-single-stack -``` - -The output is similar to: - -``` -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -frontend-service NodePort 10.0.0.0 80:10001/TCP 11m -``` - -4、Validate app - -Using the `kubecl` tool, forward native port `30000` to the service port `80`. - -```bash -kubectl port-forward svc/frontend-service -n deployment-single-stack-xx 30000:80 -``` - -Open browser and visit [http://127.0.0.1:30000](http://127.0.0.1:30000): - -![](/img/docs/user_docs/guides/working-with-k8s/app-preview.jpg) diff --git a/docs/user_docs/guides/working-with-k8s/2-container.md b/docs/user_docs/guides/working-with-k8s/2-container.md deleted file mode 100644 index 7946f628..00000000 --- a/docs/user_docs/guides/working-with-k8s/2-container.md +++ /dev/null @@ -1,31 +0,0 @@ -# Configure Main Container - -The attribute `mainContainer` of `the Server` model is used to declare the main container configuration of the application. -For the abstract definition of the main container, please see [here](/docs/reference/model/kusion_models/kube/frontend/container/doc_container) for more details. - -## Prerequisites - -Please refer to the [prerequisites](/docs/user_docs/guides/working-with-k8s/deploy-server#prerequisites) in the guide for deploying an application. - -## Example - -```py -appConfiguration: frontend.Server { - # main container - mainContainer = container.Main { - # container name - name = "main" - # container envs - env = [ - { - name = "HOST_NAME" - value = "example.com" - } - ] - # container ports - ports = [ - { containerPort = 80 } - ] - } -} -``` diff --git a/docs/user_docs/guides/working-with-k8s/3-monitoring.md b/docs/user_docs/guides/working-with-k8s/3-monitoring.md deleted file mode 100644 index 3c555f88..00000000 --- a/docs/user_docs/guides/working-with-k8s/3-monitoring.md +++ /dev/null @@ -1,85 +0,0 @@ -# Enable Monitoring - -This guide shows you how to use the KCL language and its corresponding CLI tool Kusion to complete the monitoring and deployment of a Kubernetes application Prometheus. -The demo sample is mainly composed of the following components: - -- Namespace -- Deployment -- PodMonitor - -:::tip - -This guide requires you to have a basic understanding of Kubernetes and Prometheus. -If you are not familiar with the relevant concepts, please refer to the links below: - -- [Learn Kubernetes Basics](https://kubernetes.io/docs/tutorials/kubernetes-basics/) -- [Prometheus Introduction](https://prometheus.io/docs/introduction/overview/) -::: - -## Prerequisites - -Before starting, in addition to referring [here](/docs/user_docs/guides/working-with-k8s/deploy-server#prerequisites), -the following preparations need to be completed: - -- Deploy Prometheus Operator in your cluster - -Follow the steps in [kube-prometheus](https://github.com/prometheus-operator/kube-prometheus) to deploy the Prometheus Operator in your cluster - -## Example - -Enable monitoring by setting `enableMonitoring` to `True`, and add the business container port number configuration `8080` - -```py -import base.pkg.kusion_models.kube.frontend -import base.pkg.kusion_models.kube.frontend.container -import base.pkg.kusion_models.kube.frontend.container.env as e -import base.pkg.kusion_models.kube.frontend.container.port as cp -import base.pkg.kusion_models.kube.frontend.container.probe as p - -# The application configuration in stack will overwrite -# the configuration with the same attribute in base. -appConfiguration: frontend.Server { - # Main container configuration - mainContainer: container.Main { - name = "prometheus-example-app" - ports = [ - cp.ContainerPort { - name = "web" - containerPort = 8080 - } - ] - } - enableMonitoring = True -} -``` - -## Applying - -Run the following command: - -```bash -kusion apply -``` - -The output is similar to: - -``` - SUCCESS Compiling in stack prod... - -Stack: prod Provider Type Name Plan - * ├─ kubernetes v1:Namespace prometheus-example-app[0] Create - * ├─ kubernetes monitoring.coreos.com/v1:PodMonitor prometheus-example-appprod[0] Create - * └─ kubernetes apps/v1:Deployment prometheus-example-appprod[0] Create -``` - -## Validate Result - -We can see that in addition to deploying k8s `Deployment` and `Namespace` resources, -`PodMonitor` is also deployed to configure Prometheus to monitor target pods. -After the resources are created, you can use the following commands to check the Prometheus monitoring panel. - -``` -kubectl --namespace monitoring port-forward svc/prometheus-k8s 9090 -``` - -Finally, access the monitoring panel via [http://localhost:9090](http://localhost:9090) and see the monitoring metrics of the application. diff --git a/docs/user_docs/guides/working-with-k8s/4-service.md b/docs/user_docs/guides/working-with-k8s/4-service.md deleted file mode 100644 index fb18624b..00000000 --- a/docs/user_docs/guides/working-with-k8s/4-service.md +++ /dev/null @@ -1,52 +0,0 @@ -# Expose Service - -The attribute `services` of the `Server` model is used to declare your app's network configuration. -For the abstract definition of `service`, please see [here](/docs/reference/model/kusion_models/kube/frontend/service/doc_service) for more details. - -## Prerequisites - -Please refer to the [prerequisites](/docs/user_docs/guides/working-with-k8s/deploy-server#prerequisites) in the guide for deploying an application. - -## Example - -Add the Service configuration in `dev/main.k` or `base/base.k` of the sample code: - -```py -import base.pkg.kusion_models.kube.frontend -import base.pkg.kusion_models.kube.frontend.service - -appConfiguration: frontend.Server { - # 添加 Service 配置 - services = [ - service.Service { - name = "app" - type = "NodePort" - ports = [ - { - "port" = 80 - } - ] - } - ] -} -``` - -The code above is a sample configuration, you can add custom configuration according to the actual situation. - -## Applying - -Re-run steps in [Applying](/docs/user_docs/guides/working-with-k8s/deploy-server#applying), new service configuration can be applied. - -``` -$ kusion apply -SUCCESS Compiling in stack dev... - -Stack: dev Provider Type Name Plan - * ├─ kubernetes v1:Namespace demo UnChange - * ├─ kubernetes v1:Service demo-service Update - * └─ kubernetes apps/v1:Deployment demodev UnChange - -✔ yes -SUCCESS Updating v1:Service -Updating v1:Service [1/1] ████████████████████████████████ 100% | 0s -``` diff --git a/docs/user_docs/guides/working-with-k8s/5-image-upgrade.md b/docs/user_docs/guides/working-with-k8s/5-image-upgrade.md deleted file mode 100644 index ed93cbb4..00000000 --- a/docs/user_docs/guides/working-with-k8s/5-image-upgrade.md +++ /dev/null @@ -1,41 +0,0 @@ -# Upgrade Image - -The attribute `image` of the `Server` model is used to declare your app's business container image. -For the definition of `image`, please see [here](/docs/reference/model/kusion_models/kube/frontend/doc_server) for more details. - -## Prerequisites - -Please refer to the [prerequisites](/docs/user_docs/guides/working-with-k8s/deploy-server#prerequisites) in the guide for deploying an application. - -## Example - -Re-assign the image value in `dev/main.k`: - -```py -import base.pkg.kusion_models.kube.frontend - -appConfiguration: frontend.Server { - # set image to your want - # before: image = "gcr.io/google-samples/gb-frontend:v4" - # after: - image = "gcr.io/google-samples/gb-frontend:v5" -} -``` - -## Applying - -Re-run steps in [Applying](/docs/user_docs/guides/working-with-k8s/deploy-server#applying), update image is completed. - -``` -$ kusion apply -SUCCESS Compiling in stack dev... - -Stack: dev Provider Type Name Plan - * ├─ kubernetes v1:Namespace demo UnChange - * ├─ kubernetes v1:Service demo-service UnChange - * └─ kubernetes apps/v1:Deployment demodev Update - -✔ yes -SUCCESS Updating apps/v1:Deployment -Updating apps/v1:Deployment [1/1] ████████████████████████████████ 100% | 0s -``` diff --git a/docs/user_docs/guides/working-with-k8s/6-resource-spec.md b/docs/user_docs/guides/working-with-k8s/6-resource-spec.md deleted file mode 100644 index 978ebc81..00000000 --- a/docs/user_docs/guides/working-with-k8s/6-resource-spec.md +++ /dev/null @@ -1,79 +0,0 @@ -# Configure Resource Specification - -The attribute `schedulingStrategy` of the `Server` model is used to declare the resource spec of an application's business container. -About the definition of resource spec, please see [here](/docs/reference/model/kusion_models/kube/frontend/resource/doc_resource) for more details. - -## Prerequisites - -Please refer to the [prerequisites](/docs/user_docs/guides/working-with-k8s/deploy-server#prerequisites) in the guide for deploying an application. - -## Example - -Re-assign the `schedulingStrategy.resource` value. - -There are two ways to modify the resource spec, one is to modify the values of `cpu` and `memory` in the resource expression: - -```py -import base.pkg.kusion_models.kube.frontend -import base.pkg.kusion_models.kube.frontend.resource as res - -appConfiguration: frontend.Server { - # modify the values of cpu and memory in the resource expression - # before: schedulingStrategy.resource = "cpu=100m,memory=100Mi,disk=1Gi" - # after(scale up): - schedulingStrategy.resource = res.Resource { - cpu = 500m - memory = 500Mi - disk = 1Gi - } -} -``` - -The other is to use the preset resource value to replace the original value to expand the application: - -```py -import base.pkg.kusion_models.kube.frontend -import base.pkg.kusion_models.kube.templates.resource as res_tpl - -appConfiguration: frontend.Server { - # use the preset resource value - # before: schedulingStrategy.resource = "cpu=100m,memory=100Mi,disk=1Gi" - # after(scale up): - schedulingStrategy.resource = res_tpl.large -} -``` - -The code above is a sample configuration, you can add custom configurations according to the actual situation: - -```py -import base.pkg.kusion_models.kube.frontend.resource as res - -schema SchedulingStrategy: - """ SchedulingStrategy represents scheduling strategy. - - Attributes - ---------- - resource: str | res.Resource, default is "1 Note: All KCL variables can be assigned the null value `None` and the undefined value `Undefined`. - -## 3. What do some KCL variable names prefixed with `_` underscore mean? What's the difference between without the `_` underscore prefix? In what scenarios are they suitable for use? - -A variable with an underscore prefix in KCL represents a **hidden**, **mutable** variable, **hidden** means a variable with an underscore prefix will not be output to YAML, and **mutable** means that a variable with an underscore prefix can be repeatedly assigned multiple times, and a variable without an underscore prefix is immutable after being assigned. - -```python -name = 'Foo' # Exported and immutable variable -name = 'Bar' # Error: An exported variable can only be assigned a value once -``` - -```python -_name = 'Foo' # Hidden and mutable variable -_name = 'Bar' - -schema Person: - _name: str # hidden and mutable -``` - -## 4. How to add elements to a dict? - -We can use the union operator `|` or the dict unpacking operator `**` to add elements into a dict, and we can use `in` and `not in` operators to determine whether the dict variable contains a certain key. - -```python -_left = {key = {key1 = "value1"}, intKey = 1} # Note: `=` denotes override the value. -_right = {key = {key2 = "value2"}, intKey = 2} -dataUnion = _left | _right # {"key": {"key1": "value1", "key2": "value2"}, "intKey": 2} -dataUnpack = {**_left, **_right} # {"key": {"key1": "value1", "key2": "value2"}, "intKey": 2} -``` - -The output YAML is - -```yaml -dataUnion: - key: - key1: value1 - key2: value2 -dataUnpack: - key: - key2: value2 -``` - -It is also possible to add key-value pair to a dict using the `string interpolation` or the string `format` method. - -```python -dictKey1 = "key1" -dictKey2 = "key2" -data = { - "${dictKey1}" = "value1" - "{}".format(dictKey2) = "value2" -} -``` - -The output YAML is - -```yaml -dictKey1: key1 -dictKey2: key2 -data: - key1: value1 - key2: value2 -``` - -## 5. How to modify elements in dict? - -We can use the union operator `|`, or the unpacking operator `**` to modify the elements in the dict - -```python -_data = {key = "value"} # {"key": "value"} -_data = _data | {key = "override_value1"} # {"key": "override_value1"} -_data = {**_data, **{key = "override_value2"}} # {"key": "override_value2"} -``` - -If we want to delete a value with a key of `key` in the dict, we can use the unpacking operator `**{key = Undefined}` or the merge operator `| {key = Undefined}` to overwrite, the value of the key is Undefined after overwriting, and no YAML output will be done. - -## 6. How to add elements to list? - -There are two ways to add elements to a list: - -- Use `+`, `+=` and slice to concatenate list variables to add elements to the list - -```python -_args = ["a", "b", "c"] -_args += ["end"] # Add elements "end" to the end of the list: ["a", "b", "c", "end"] -_args = _args[:2] + ["x"] + _args[2:] # Insert element "x" at list index 2: ["a", "b", "x", "c", "end"] -_args = ["start"] + _args # Add elements "start" to the head of the list: ["start", "a", "b", "x", "c", "end"] -``` - -- Use the `*` unpacking operator to concatenate and merge lists - -```python -_args = ["a", "b", "c"] -_args = [*_args, "end"] # Add elements "end" to the end of the list: ["a", "b", "c", "end"] -_args = ["start", *_args] # Add elements "start" to the head of the list: ["start", "a", "b", "x", "c", "end"] -``` - -> Note: When the consecutive variables are `None/Undefined`, using `+` may cause an error, then we can use the list unpacking operator `*` or use the `or` operator to take the default value of the list to avoid null values judge. - -```python -data1 = [1, 2, 3] -data2 = None -data3 = [*data1, *data2] # Ok: [1, 2, 3] -data4 = data1 + data2 or [] # OK: [1, 2, 3], We can use the `or` operator to take the default value of data2 as [], when data2 is None/Undefined, take the empty list [] for calculation. -data5 = data1 + data2 # Error: can only concatenate list (not "NoneType") to list -``` - -## 7. How to modify/delete elements in list? - -There are two ways to modify the elements in the list: - -- Use slice to directly modify the value at an index of a list - -```python -_index = 1 -_args = ["a", "b", "c"] -_args = _args[:index] + ["x"] + _args[index+1:] # Modify the element of list index 1 to "x": ["a", "x", "c"] -``` - -- Use the list comprehension to modify elements in a list - -```python -_args = ["a", "b", "c"] -_args = ["x" if a == "b" else a for a in _args] # Change the value of "b" in the list to "x": ["a", "x", "c"] -``` - -There are two ways to delete elements in a list: - -- Use the list comprehension to delete elements with the `if` condition expressions. -- Use `filter` expression to filter elements. - -For example, if we want to delete a number greater than 2 in a list `[1, 2, 3, 4, 5]`, we can write as follows: - -```python -originList = [1, 2, 3, 4, 5] -oneWayDeleteListItem = [item for item in originList if item <= 2] -anotherWayDeleteListItem = filter item in originList { - item <= 2 -} -``` - -The output YAML is - -```yaml -originList: -- 1 -- 2 -- 3 -- 4 -- 5 -oneWayDeleteListItem: -- 1 -- 2 -anotherWayDeleteListItem: -- 1 -- 2 -``` - -## 8. How to write a for loop in KCL? How to understand and use list comprehension and dict comprehension? - -KCL currently only supports functional/declarative deductive for loops. We can traverse dict and list variables as follows: - -The specific form of a list comprehension is (where `[]` are used on both sides of the comprehension): - -```txt -[expression for expr in sequence1 - if condition1 - for expr2 in sequence2 - if condition2 - for expr3 in sequence3 ... - if condition3 - for exprN in sequenceN - if conditionN] -``` - -The specific form of dict comprehension is (where `{}` are used on both sides of the comprehension): - -```txt -{expression for expr in sequence1 - if condition1 - for expr2 in sequence2 - if condition2 - for expr3 in sequence3 ... - if condition3 - for exprN in sequenceN - if conditionN} -``` - -The `if` in the above forms represents the filter condition, and the expression `expr` that satisfies the condition will be generated into a new list or dict - -List comprehension example: - -```python -_listData = [1, 2, 3, 4, 5, 6] -_listData = [l * 2 for l in _listData] # All elements in _listData are multiplied by 2: [2, 4, 6, 8, 10, 12] -_listData = [l for l in _listData if l % 4 == 0] # Filter out all elements in _listData that are divisible by 4: [4, 8, 12] -_listData = [l + 100 if l % 8 == 0 else l for l in _listData] # Traverse _listData, when the element in it is divisible by 8, add 100 to the element, otherwise keep it unchanged: [4, 108, 12] -``` - -Note the difference between the two `if`s on lines 3 and 4 in the above code: - -- The first `if` represents the filter condition of the variable `_listData` list comprehension itself, and cannot be followed by `else`. Elements that meet the conditions will be added to the list, and elements that do not meet the conditions will be removed. Besides, the process may change the length of the list. -- The second `if` represents the selection condition of the list iteration variable `l`, which means the `if-else` ternary expression, which must be followed by `else`, regardless of whether the condition is met, the resulting element is still in the list, the length of the list does not change. - -Dict comprehension example: - -```python -_dictData = {key1 = "value1", key2 = "value2"} -_dictData = {k = _dictData[k] for k in _dictData if k == "key1" and _dictData[k] == "value1"} # Filter out the elements whose key is "key1" and value is "value1" in _dictData, {"key1": "value1"} -``` - -Use comprehension to get all keys of dict: - -```python -dictData = {key1 = "value1", key2 = "value2"} -dictDataKeys = [k for k in _dictData] # ["key1", "key2"] -``` - -Use comprehension to sort a dict in ascending order by key: - -```python -dictData = {key3 = "value3", key2 = "value2", key1 = "value1"} # {'key3': 'value3', 'key2': 'value2', 'key1': 'value1'} -dictSortedData = {k = dictData[k] for k in sorted(dictData)} # {'key1': 'value1', 'key2': 'value2', 'key3': 'value3'} -``` - -Multi-level comprehension example: - -```python -array1 = [1, 2, 3] -array2 = [4, 5, 6] -data = [a1 + a2 for a1 in array1 for a2 in array2] # [5, 6, 7, 6, 7, 8, 7, 8, 9] len(data) == len(array1) * len(array2) -``` - -Double variable loop (list comprehension supports index iteration of list and value iteration of dict, which can simplify the code writing of list/dict iteration process): - -- list - -```python -data = [1000, 2000, 3000] -# Single variable loop -dataLoop1 = [i * 2 for i in data] # [2000, 4000, 6000] -dataLoop2 = [i for i in data if i == 2000] # [2000] -dataLoop3 = [i if i > 2 else i + 1 for i in data] # [1000, 2000, 3000] -# Double variable loop -dataLoop4 = [i + v for i, v in data] # [1000, 2001, 3002] -dataLoop5 = [v for i, v in data if v == 2000] # [2000] -# Use _ to ignore loop variables -dataLoop6 = [v if v > 2000 else v + i for i, v in data] # [1000, 2001, 3000] -dataLoop7 = [i for i, _ in data] # [0, 1, 2] -dataLoop8 = [v for _, v in data if v == 2000] # [2000] -``` - -- dict - -```python -data = {key1 = "value1", key2 = "value2"} -# Single variable loop -dataKeys1 = [k for k in data] # ["key1", "key2"] -dataValues1 = [data[k] for k in data] # ["value1", "value2"] -# Double variable loop -dataKeys2 = [k for k, v in data] # ["key1", "key2"] -dataValues2 = [v for k, v in data] # ["value1", "value2"] -dataFilter = {k = v for k, v in data if k == "key1" and v == "value1"} # {"key1": "value1"} -# Use _ to ignore loop variables -dataKeys3 = [k for k, _ in data] # ["key1", "key2"] -dataValues3 = [v for _, v in data] # ["value1", "value2"] -``` - -## 9. How to write an if conditional statement? - -KCL supports two ways to write if conditional statements: - -- if-elif-else block statement, where both elif and else blocks can be omitted, and the elif block can be used multiple times - -```python -success = True -_result = "failed" -if success: - _result = "success" -``` - -```python -success = True -if success: - _result = "success" -else: - _result = "failed" -``` - -```python -_result = 0 -if condition == "one": - _result = 1 -elif condition == "two": - _result = 2 -elif condition == "three": - _result = 3 -else: - _result = 4 -``` - -- Conditional expression ` if else `, similar to ` ? : ` ternary expression in C language - -```python -success = True -_result = "success" if success else "failed" -``` - -> Note: When writing an if-elif-else block statement, pay attention to the colon `:` after the if condition and keep the indentation consistent. - -In addition, conditional expressions can also be written directly in a list or dict (the difference is that the value to be written in the if expression written in the structure is not a statement): - -- list - -```python -env = "prod" -data = [ - "env_value" - ":" - if env == "prod": - "prod" # Write values that need to be added to data, not statements - else: - "other_prod" -] # ["env_value", ":", "prod"] -``` - -- dict - -```python -env = "prod" -config = { - if env == "prod": - MY_PROD_ENV = "prod_value" # Write key-value pairs that need to be added to config, not statements - else: - OTHER_ENV = "other_value" -} # {"MY_PROD_ENV": "prod_value"} -``` - -## 10. How to express logical operations such as "and" "or" "not"? - -In KCL, use `and` for "logical and", use `or` for "logical or", use `not` for "not", which is similar to `&&`, `||` and `~` semantic in C language. - -```python -done = True -col == 0 -if done and (col == 0 or col == 3): - ok = 1 -``` - -For "bitwise AND", "bitwise OR" and "bitwise XOR" of integers, we can use `&`, `|` and `^` operators in KCL, which is similar to `&`, `|` and `^` semantic in C language. - -```python -value = 0x22 -bitmask = 0x0f - -assert (value & bitmask) == 0x02 -assert (value & ~bitmask) == 0x20 -assert (value | bitmask) == 0x2f -assert (value ^ bitmask) == 0x2d -``` - -When we need to write a pattern such as `A if A else B`, we can use `A or B` to simplify, such as the following code: - -```python -value = [0] -default = [1] -x0 = value if value else default -x1 = value or default # Use `value or default` instead of `value if value else default` -``` - -## 11. How to judge whether a variable is None/Undefined, and whether a string/dict/list is empty? - -Please note that `False`, `None`, `Undefined`, number `0`, empty list `[]`, empty dictionary `{}` and empty string `""`, `''`, `""""""`, `''''''` in the conditional expression, are all treated as `false` expressions. - -For example, when judging a string variable `strData` is neither `None/Undefined` nor an empty string (string length is greater than 0), we can simply use the following expression: - -```python -strData = "value" -if strData: - isEmptyStr = False -``` - -Empty dictionary and empty list judgment examples: - -```python -_emptyList = [] -_emptyDict = {} -isEmptyList = False if _emptyList else True -isEmptyDict = False if _emptyDict else True -``` - -The output YAML is - -```yaml -isEmptyList: true -isEmptyDict: true -``` - -Or use the boolean function `bool` to judge - -```python -_emptyList = [] -_emptyDict = {} -isEmptyList = bool(_emptyList) -isEmptyDict = bool(_emptyDict) -``` - -## 12. How to concatenate strings, format strings, check string prefixes and suffixes and replace string content? - -- The `+` operator can be used to concatenate two strings in KCL - -```python -data1 = "string1" + "string2" # "string1string2" -data2 = "string1" + " " + "string2" # "string1 string2" -``` - -- There are currently two ways to format strings in KCL: - - `format` method for string variables `"{}".format()` - - Using string interpolation `${}` - -```python -hello = "hello" -a = "{} world".format(hello) -b = "${hello} world" -``` - -Note that if we want to use the `{` character or `}` alone in `"{}".format()`, we need to use `{{` and `}}` to convert `{` and `}` respectively, such as escaping a JSON string as follows: - -```python -data = "value" -jsonData = '{{"key": "{}"}}'.format(data) -``` - -The output YAML is - -```yaml -data: value -jsonData: '{"key": "value"}' -``` - -Note that if we want to use the `$` character alone in the `${}` interpolated string, we need to escape the `$` with `$$` - -```python -world = "world" -a = "hello {}".format(world) # "hello world" -b = "hello ${world}" # "hello world" -c = "$$hello ${world}$$" # "$hello world$" -c2 = "$" + "hello ${world}" + "$" # "$hello world$" -``` - -The output YAML is - -```yaml -world: world -a: hello world -b: hello world -c: $hello world$ -c2: $hello world$ -``` - -- Use the `startswith` and `endswith` methods of strings in KCL to check the prefix and suffix of strings - -```python -data = "length" -isEndsWith = data.endswith("th") # True -isStartsWith = "length".startswith('len') # True -``` - -- Use the replace method of the string or the `regex.replace` function to replace the content of the string in KCL - -```python -import regex -data1 = "length".replace("len", "xxx") # Replace "len", "xxxgth" with "xxx" -data2 = regex.replace("abc123", r"\D", "0") # Replace all non-digits in "abc123" with "0", "000123" -``` - -Among them, `r"\D"` means that we do not need to use `\\` to escape the backslash `\` in `\D`, which is mostly used in regular expression strings. - -Besides, we can use index placeholders or keyword placeholders in string formatting expressions to format multiple strings - -- Index placeholders - -```python -x = '{2} {1} {0}'.format('directions', 'the', 'Read') -y = '{0} {0} {0}'.format('string') -``` - -The output YAML is - -```yaml -x: Read the directions -y: string string string -``` - -- Keyword placeholders - -```python -x = 'a: {a}, b: {b}, c: {c}'.format(a = 1, b = 'Two', c = 12.3) -``` - -The output YAML is - -```yaml -x: 'a: 1, b: Two, c: 12.3' -``` - -## 13. What is the difference between using single and double quotes in a string? - -There is little difference between KCL single-quoted and double-quoted strings. The only difference is that we don't need to use `\"` to escape `"` in single-quoted strings, and we don't need to use `\'` to escape `'` in double-quoted strings. - -```python -singleQuotedString = 'This is my book named "foo"' # Don’t need to escape double quotes in single quoted strings. -doubleQuotedString = "This is my book named 'foo'" # Don’t need to escape single quotes in double quoted strings. -``` - -In addition, a long string consisting of three single quotes or three double quotes does not need to be escaped (except for the beginning and end of the string), such as the following example: - -```python -longStrWithQuote0 = """Double quotes in long strings "(not at the beginning and end)""" -longStrWithQuote1 = '''Double quotes in long strings "(not at the beginning and end)''' -longStrWithQuote2 = """Single quotes in long strings '(not at the beginning and end)""" -longStrWithQuote3 = '''Single quotes in long strings '(not at the beginning and end)''' -``` - -The output YAML is - -```yaml -longStrWithQuote0: Double quotes in long strings "(not at the beginning and end) -longStrWithQuote1: Double quotes in long strings "(not at the beginning and end) -longStrWithQuote2: Single quotes in long strings '(not at the beginning and end) -longStrWithQuote3: Single quotes in long strings '(not at the beginning and end) -``` - -## 14. How to write a long multiline string? - -In KCL, we can use a single-quoted string and newline characters `\n` or a triple-quoted string to write a multi-line string, and we can use the continuation character `\` to optimize the form of the KCL string. For example, for the three multi-line string variables in the following code, their values are the same: - -```python -string1 = "The first line\nThe second line\nThe third line\n" -string2 = """The first line -The second line -The third line -""" -string3 = """\ -The first line -The second line -The third line -""" # It is recommended to use the long string writing form of `string3`. -``` - -The output YAML is - -```yaml -string1: | - The first line - The second line - The third line -string2: | - The first line - The second line - The third line -string3: | - The first line - The second line - The third line -``` - -## 15. How to use regular expressions in KCL? - -Regular expressions can be used by importing the regular expression system module `import regex` in KCL, which includes the following functions: - -- **match**: Regular expression matching function, which matches the input string according to the regular expression, and returns a bool type to indicate whether the match is successful. -- **split**: Regular expression split function, which splits the string according to the regular expression, and returns a list of split strings. -- **replace**: Regular expression replacement function, which replaces all substrings in the string that satisfies the regular expression, and returns the replaced string. -- **compile**: Regular expression compilation function, which returns bool type to indicate whether it is a valid regular expression. -- **search**: Regular expression search function, which searches all substrings that satisfy the regular expression, and returns a list of substrings. - -Examples: - -```python -regex_source = "Apple,Google,Baidu,Xiaomi" -regex_split = regex.split(regex_source, ",") -regex_replace = regex.replace(regex_source, ",", "|") -regex_compile = regex.compile("$^") -regex_search = regex.search("aaaa", "a") -regex_find_all = regex.findall("aaaa", "a") -regex_result = regex.match("192.168.0.1", "^(1\\d{2}|2[0-4]\\d|25[0-5]|[1-9]\\d|[1-9])\\."+"(1\\d{2}|2[0-4]\\d|25[0-5]|[1-9]\\d|\\d)\\."+"(1\\d{2}|2[0-4]\\d|25[0-5]|[1-9]\\d|\\d)\\."+"(1\\d{2}|2[0-4]\\d|25[0-5]|[1-9]\\d|\\d)$") # Determine if it is an IP string -regex_result_false = regex.match("192.168.0,1", "^(1\\d{2}|2[0-4]\\d|25[0-5]|[1-9]\\d|[1-9])\\."+"(1\\d{2}|2[0-4]\\d|25[0-5]|[1-9]\\d|\\d)\\."+"(1\\d{2}|2[0-4]\\d|25[0-5]|[1-9]\\d|\\d)\\."+"(1\\d{2}|2[0-4]\\d|25[0-5]|[1-9]\\d|\\d)$") # Determine if it is an IP string -``` - -The output YAML is - -```yaml -regex_source: Apple,Google,Baidu,Xiaomi -regex_split: -- Apple -- Google -- Baidu -- Xiaomi -regex_replace: Apple|Google|Baidu|Xiaomi -regex_compile: true -regex_search: true -regex_find_all: -- a -- a -- a -- a -regex_result: true -regex_result_false: false -``` - -For longer regular expressions, we can also use **r-string** to ignore the escape of `\` symbols to simplify the writing of regular expression strings. - -Examples: - -```python -isIp = regex.match("192.168.0.1", r"^(1\d{2}|2[0-4]\d|25[0-5]|[1-9]\d|[1-9])."+r"(1\d{2}|2[0-4]\d|25[0-5]|[1-9]\d|\d)."+r"(1\d{2}|2[0-4]\d|25[0-5]|[1-9]\d|\d)."+r"(1\d{2}|2[0-4]\d|25[0-5]|[1-9]\d|\d)$") # Determine if it is an IP string -``` - -```python -import regex - -schema Resource: - cpu: str = "1" - memory: str = "1024Mi" - disk: str = "10Gi" - check: - regex.match(cpu, r"^([+-]?[0-9.]+)([m]*[-+]?[0-9]*)$"), "cpu must match specific regular expression" - regex.match(memory, r"^([1-9][0-9]{0,63})(E|P|T|G|M|K|Ei|Pi|Ti|Gi|Mi|Ki)$"), "memory must match specific regular expression" - regex.match(disk, r"^([1-9][0-9]{0,63})(E|P|T|G|M|K|Ei|Pi|Ti|Gi|Mi|Ki)$"), "disk must match specific regular expression" -``` - -```python -import regex - -schema Env: - name: str - value?: str - check: - len(name) <= 63, "a valid env name must be no more than 63 characters" - regex.match(name, r"[A-Za-z_][A-Za-z0-9_]*"), "a valid env name must start with alphabetic character or '_', followed by a string of alphanumeric characters or '_'" -``` - -## 16. What is the meaning of schema in KCL? - -Schema is a language element in KCL that defines the type of configuration data. Like struct in C language or class in Java, attributes can be defined in it, and each attribute has a corresponding type. - -## 17. How to use schema? - -In KCL, we can use the `schema` keyword to define a structure in which we can declare the various attributes of the schema. - -```python -# A Person structure with firstName of attribute string type, lastName of string type, age of integer type. -schema Person: - firstName: str - lastName: str - # The default value of the age attribute is 0. - age: int = 0 -``` - -A complex example: - -```python -schema Deployment: - name: str - cpu: int - memory: int - image: str - service: str - replica: int - command: [str] - labels: {str:str} -``` - -In the above code, `cpu` and `memory` are defined as integer types; `name`, `image` and `service` are string types; `command` is a list of string types; labels are dictionaries type whose key type and value type are both strings. - -## 18. How to add "optional" and "required" constraints to the schema attribute? - -The `?` operator is used in KCL to define an "optional" constraint for a schema, and the schema attribute is "required" by default. - -```python -# A Person structure with firstName of attribute string type, lastName of string type, age of integer type. -schema Person: - firstName?: str # firstName is an optional attribute that can be assigned to None/Undefined - lastName?: str # age is an optional attribute that can be assigned to None/Undefined - age: int = 18 # age is an optional attribute that can be assigned to None/Undefined. -``` - -## 19. How to write validation rules for attributes in schema? - -In the schema definition, we can use the `check` keyword to write the validation rules of the schema attribute. As shown below, each line in the check code block corresponds to a conditional expression. When the condition is satisfied, the validation is successful. The conditional expression can be followed by `, "check error message"` to indicate the information to be displayed when the validation fails. - -```python -import regex - -schema Sample: - foo: str # Required, cannot be None/Undefined, and the type must be str - bar: int # Required, cannot be None/Undefined, and the type must be int - fooList: [int] # Required, cannot be None/Undefined, and the type must be int list - color: "Red" | "Yellow" | "Blue" # Required, literal union type, and must be one of "Red", "Yellow", "Blue". - id?: int # Optional, can be None/Undefined, the type must be int - - check: - 0 <= bar < 100 # bar must be greater than or equal to 0 and less than 100 - 0 < len(fooList) < 100 # fooList cannot be None/Undefined, and the length must be greater than 0 and less than 100 - regex.match(foo, "^The.*Foo$") # regular expression matching - bar in range(100) # bar can only range from 1 to 99 - bar in [2, 4, 6, 8] # bar can only take 2, 4, 6, 8 - bar % 2 == 0 # bar must be a multiple of 2 - all foo in fooList { - foo > 1 - } # All elements in fooList must be greater than 1 - any foo in fooList { - foo > 10 - } # At least one element in fooList must be greater than 10 - abs(id) > 10 if id # check expression with if guard, when id is not empty, the absolute value of id must be greater than 10 -``` - -To sum up, the validation kinds supported in KCL schema are: - -| Kind | Method | -| ----------------- | ----------------------------------------------------------------------------------------- | -| Range | Using comparison operators such as `<`, `>` | -| Regex | Using methods such as `match` from the `regex` system module | -| Length | Using the `len` built-in function to get the length of a variable of type `list/dict/str` | -| Enum | Using literal union types | -| Optional/Required | Using optional/required attributes of schema | -| Condition | Using the check if conditional expression | - -## 20. How to add documentation to schema and its attributes? - -A complete schema document is represented as a triple-quoted string, with the following structure: - -```python -schema Person: - """The schema person definition - - Attributes - ---------- - name : str - The name of the person - age : int - The age of the person - - See Also - -------- - Son: - Sub-schema Son of the schema Person. - - Examples - -------- - person = Person { - name = "Alice" - age = 18 - } - """ - name: str - age: int - -person = Person { - name = "Alice" - age = 18 -} -``` - -## 21. How to write configuration based on schema? How to reuse the common configuration between multiple configurations? - -In the process of schema instantiation, we can use the unpacking operator `**` to expand the public configuration - -```python -schema Boy: - name: str - age: int - hc: int - -schema Girl: - name: str - age: int - hc: int - -config = { - age = 18 - hc = 10 -} - -boy = Boy { - **config - name = "Bob" -} -girl = Girl { - **config - name = "Alice" -} -``` - -The output YAML is - -```yaml -config: - age: 18 - hc: 10 -boy: - name: Bob - age: 18 - hc: 10 -girl: - name: Alice - age: 18 - hc: 10 -``` - -## 22. How to override the default value of schema attribute when writing configuration based on schema? - -After defining a schema, we can use the schema name to instantiate the corresponding configuration, use the `:` operator to union schema attribute default values, and use `=` to override schema attribute default values. - -```python -schema Meta: - labels: {str:str} = {"key1" = "value1"} - annotations: {str:str} = {"key1" = "value1"} - -meta = Meta { - labels: {"key2": "value2"} - annotations = {"key2" = "value2"} -} -``` - -The output YAML is - -```yaml -meta: - labels: - key1: value1 - key2: value2 - annotations: - key2: value2 -``` - -## 23. How to reuse schema definitions? - -We can declare the schema name that the schema needs to inherit at the definition: - -```python -# A person has a first name, a last name and an age. -schema Person: - firstName: str - lastName: str - # The default value of age is 0 - age: int = 0 - -# An employee **is** a person, and has some additional information. -schema Employee(Person): - bankCard: int - nationality: str - -employee = Employee { - firstName = "Bob" - lastName = "Green" - age = 18 - bankCard = 123456 - nationality = "China" -} -``` - -The output YAML is - -```yaml -employee: - firstName: Bob - lastName: Green - age: 18 - bankCard: 123456 - nationality: China -``` - -> Note: KCL only allows schema single inheritance. - -## 24. How to reuse schema logic through composition? - -We can use KCL schema mixin to reuse schema logic. Mixins are generally used for functions such as separation of data in schema internal attributes, and data mapping, which can make KCL code more modular and declarative. - -Note that it is not recommended to define dependencies for mixing attributes between different mixins, which will make the use of mixins complicated. - -Examples: - -```python -schema Person: - mixin [FullNameMixin, UpperMixin] - - firstName: str - lastName: str - fullName: str - upper: str - -schema FullNameMixin: - fullName = "{} {}".format(firstName, lastName) - -schema UpperMixin: - upper = fullName.upper() - -person = Person { - firstName = "John" - lastName = "Doe" -} -``` - -The output YAML is - -```yaml -person: - firstName: John - lastName: Doe - fullName: John Doe - upper: JOHN DOE -``` - -## 25. How to import other KCL files? - -Other KCL files can be imported via the `import` keyword, and KCL configuration files are organized into modules. A single KCL file is considered a module, and a directory is considered a package, as a special module. The `import` keyword supports both relative path import and absolute path import - -For example, for the following directory structure: - -``` -. -└── root - ├── kcl.mod - ├── model - │ ├── model1.k - | ├── model2.k - │ └── main.k - ├── service - │ │── service1.k - │ └── service2.k - └── mixin - └── mixin1.k -``` - -For `main.k`, relative path import and absolute path import can be expressed as: - -```python -import service # Absolute path import, the root directory is the path where kcl.mod is located -import mixin # Absolute path import, the root directory is the path where kcl.mod is located - -import .model1 # Relative path import, current directory module -import ..service # Relative path import, parent directory -import ...root # Relative path import, parent directory of parent directory -``` - -> Note that for KCL's entry file `main.k`, it cannot import the folder where it is located, otherwise a circular import error will occur: - -```python -import model # Error: recursively loading -``` - -## 26. When can import be omitted? - -KCL files in the same folder the not in the main package can refer to each other without importing each other. For example, for the following directory structure: - -``` -. -└── root - ├── kcl.mod - ├── model - │ ├── model1.k - | ├── model2.k - │ └── main.k - ├── service - │ │── service1.k - │ └── service2.k - └── mixin - └── mixin1.k -``` - -When main.k is used as the KCL command line entry file, the variables in main.k, model1.k and model2.k in the model folder cannot refer to each other and need to be imported through import, but service1.k in the service folder and Variables in service2.k can refer to each other, ignoring import - -service1.k - -```python -schema BaseService: - name: str - namespace: str -``` - -service2.k - -```python -schema Service(BaseService): - id: str -``` - -## 27. There is a line of code that is too long, how to wrap it gracefully with correct syntax? - -In KCL, we can use the continuation character `\` for newlines, and we can also use `\` in strings to indicate continuation. - -An example of a long string concatenation continuation line: - -```python -longString = "Too long expression " + \ - "Too long expression " + \ - "Too long expression " -``` - -An example of a continuation in the comprehension expression: - -```python -data = [1, 2, 3, 4] -dataNew = [ - d + 2 \ - for d in data \ - if d % 2 == 0 -] -``` - -An example of a continuation in the if expression: - -```python -condition = 1 -data1 = 1 \ - if condition \ - else 2 -data2 = 2 \ -if condition \ -else 1 -``` - -An example of a continuation in the long string: - -```python -longString = """\ -The first line\ -The continue second line\ -""" -``` - -Note: Use the line continuation character `\` while maintaining indentation, as follows: - -- Error use case: - -```python -data1 = [ - 1, 2, - 3, 4 \ -] # Error, need to keep the indentation of the closing bracket ] - -data2 = [ - 1, 2, - 3, 4 -] # Error, requires uniform indentation of numbers 1 and 3 -``` - -- Right use case: - -```python -data1 = [ - 1, 2, - 3, 4 -] # OK - -data2 = [ \ - 1, 2, \ - 3, 4 \ -] # OK - -data3 = [ \ - 1, 2, \ - 3, 4 \ -] # OK -``` - -## 28. What do these symbols `**` and `*` mean? - -- `**`, `*` appear outside dict/list to represent power operator and multiplication operator respectively. - -```python -data1 = 2 ** 4 # 16 -data2 = 2 * 3 # 6 -``` - -- `**`, `*` appear inside dict/list to indicate unpacking operator, often used for unpacking and merging of list/dict, similar to unpacking operator in Python - -Unpacking of dict: - -```python -data = {"key1" = "value1"} -dataUnpack = {**data, "key2" = "value2"} # {"key1": "value1", "key2": "value2"} -``` - -Unpacking of list: - -```python -data = [1, 2, 3] -dataUnpack = [*data, 4, 5, 6] # [1, 2, 3, 4, 5, 6] -``` - -## 29. How to get child elements of list/dict/schema - -- For list type, we can use `[]` to get an element in the list - -```python -data = [1, 2, 3] # Define an list of integer types -theFirstItem = data[0] # Get the element with index 0 in the list, that is, the first element 1 -theSecondItem = data[1] # Get the element with index 1 in the list, which is the first element 2 -``` - -> Note: The value of the index cannot exceed the length of the list, otherwise an error will occur, we can use the `len` function to get the length of the list. - -```python -data = [1, 2, 3] -dataLength = len(data) # List length is 3 -item = data[3] # Error: Index out of bounds -``` - -In addition, we can also use the negative index to get the elements in the list in reverse order. - -```python -data = [1, 2, 3] -item1 = data[-1] # Get the element with index -1 in the list, which is the last element 3 -item2 = data[-2] # Get the element with index -2 in the list, which is the second-to-last element 2 -``` - -In summary, the value range of the list index is `[-len, len - 1]` - -When we want to get a part of the sub-elements of the list, we can use the slice expression in `[]`, the specific syntax is `[::]`, Note that the value range of the start and end of the index is `left closed right open [, )`, note that the three parameters can be omitted or not written. - -```python -data = [1, 2, 3, 4, 5] -dataSlice0 = data[1:2] # Get the set of elements in the list whose index starts at 1 and ends at 2 [2] -dataSlice1 = data[1:3] # Get the set of elements in the list whose index starts at 1 and ends at 3 [2, 3] -dataSlice2 = data[1:] # Get the set of elements in the list whose index starts at 1 and ends at the last index [2, 3, 4, 5] -dataSlice3 = data[:3] # Get the set of elements in the list whose index starts at the first index and ends at 3 [1, 2, 3] -dataSlice4 = data[::2] # Get the set of elements in the list whose index starts at the first index and ends at the last index (step size is 2) [1, 3, 5] -dataSlice5 = data[::-1] # Reverse the list, [5, 4, 3, 2, 1] -dataSlice6 = data[2:1] # When the start, stop, step combination of three parameters does not meet the conditions, return an empty list []. -``` - -- For dict/schema types, we can use `[]` and `.` to get child elements in dict/schema. - -```python -data = {key1: "value1", key2: "value2"} -data1 = data["key1"] # "value1" -data2 = data.key1 # "value1" -data3 = data["key2"] # "value2" -data4 = data.key2 # "value2" -``` - -```python -schema Person: - name: str = "Alice" - age: int = 18 - -person = Person {} -name1 = person.name # "Alice" -name2 = person["name"] # "Alice" -age1 = person.age # 18 -age2 = person.age # 18 -``` - -When the key value does not exist in the dict, return the value `Undefined`. - -```python -data = {key1 = "value1", key2 = "value2"} -data1 = data["not_exist_key"] # Undefined -data2 = data.not_exist_key # Undefined -``` - -We can use the `in` keyword to determine whether a key value exists in dict/schema - -```python -data = {key1 = "value1", key2 = "value2"} -exist1 = "key1" in data # True -exist2 = "not_exist_key" in data # False -``` - -When there is `.` in the key value or when we need to get the value corresponding to a key value variable at runtime, we can only use the `[]` method. If there is no special case, use `.`: - -```python -name = "key1" -data = {key1 = "value1", key2 = "value2", "contains.dot" = "value3"} -data1 = data[name] # "value1" -data2 = data["contains.dot"] # "value3" -# Note that this is wrong: data3 = data.contains.dot -``` - -> Note: The above sub-element operators cannot operate on values of non-list/dict/schema collection types, such as integers, nulls, etc. - -```python -data = 1 -data1 = 1[0] # Error -``` - -```python -data = None -data1 = None[0] # Error -``` - -When getting the child elements of the collection type, it is often necessary to make a non-null or length judgment: - -```python -data = [] -item = data[0] if data else None -``` - -We can use the `?` operator to make an if non-null judgment, and return None when the condition is not satisfied. For example, the above code can be simplified to: - -```python -data = [] -item1 = data?[0] # When data is empty, return the empty value None -item2 = data?[0] or 1 # When data is empty, return the empty value None, if we don't want to return None, we can also use the or operator to return other default values e.g., "1" in `data?[0] or 1` -``` - -Use more `?` operators to avoid complicated and cumbersome non-null judgments - -```python -data = {key1.key2.key3 = []} -item = data?.key1?.key2?.key3?[0] -``` - -## 30. How to get the type of a variable in KCL code - -The KCL `typeof` built-in function can return the type (string representation) of a variable immediately for type assertion. - -Examples: - -```python -import sub as pkg - -_a = 1 - -t1 = typeof(_a) -t2 = typeof("abc") - -schema Person: - name?: any - -_x1 = Person{} -t3 = typeof(_x1) - -_x2 = pkg.Person{} -t4 = typeof(_x2) -t5 = typeof(_x2, full_name=True) - -t6 = typeof(_x1, full_name=True) - -# Output -# t1: int -# t2: str -# t3: Person -# t4: Person -# t5: sub.Person -# t6: __main__.Person -``` - -## 31. How to solve the conflict between keywords and KCL variable names? - -For identifier names that conflict with keywords, we can add a `$` prefix before the identifier to define a keyword identifier. For example, in the following code, keywords such as `if`, `else` can be used as identifiers with the `$` prefix and we can get the corresponding YAML output - -```python -$if = 1 -$else = "s" - -schema Data: - $filter: str = "filter" - -data = Data {} -``` - -The output YAML is - -```yaml -data: - filter: filter -if: 1 -else: s -``` - -> Note: Prefixing non-keyword identifiers with `$` has the same effect as not adding. - -```python -_a = 1 -$_a = 2 # Equivalent to `_a = 2` -``` - -## 32. Are built-in types of KCL a keyword of KCL? Whether they can be used for the definition of variables - -The built-in types of KCL include `int`, `float`, `bool` and `str`, which are not KCL keywords and can be used to define variables, such as the following code: - -```py -int = 1 -str = 2 -``` - -The output YAML is - -```yaml -int: 1 -str: 2 -``` - -> Note: If there are no special requirements, it is not recommended that the names of variables take these built-in types, because in some languages, they exist as keywords. - -## 33. How to implement enumeration in KCL? - -There are two ways to implement enumeration in KCL - -- Use **literal union types** (recommended) - -```python -schema Person: - name: str - gender: "Male" | "Female" - -person = Person { - name = "Alice" - gender = "Male" # gender can only be "Male" or "Female" -} -``` - -```python -schema Config: - colors: ["Red" | "Yellow" | "Blue"] # colors is an enumerated array - -config = Config { - colors = [ - "Red" - "Blue" - ] -} -``` - -- Use schema check expressions - -```python -schema Person: - name: str - gender: "Male" | "Female" - - check: - gender in ["Male", "Female"] - -person = Person { - name = "Alice" - gender = "Male" # gender can only be "Male" or "Female" -} -``` - -## 34. How to get the length of dict - -In KCL, we can use the `len` built-in function to directly find the length of a dict - -```python -len1 = len({k1: "v1"}) # 1 -len2 = len({k1: "v1", k2: "v2"}) # 2 -varDict = {k1 = 1, k2 = 2, k3 = 3} -len3 = len(varDict) # 3 -``` - -In addition, the `len` function can also be used to get the length of `str` and `list` types - -```python -len1 = len("hello") # 5 -len2 = len([1, 2, 3]) # 3 -``` - -## 35. How to write conditional configuration in KCL - -In KCL, in addition to writing `if-elif-else` conditional expressions in top-level statements, it also supports writing conditional expressions in KCL complex structures (list/dict/schema), and supports conditional configuration writing. - -```python -x = 1 -# Conditional configuration in list -dataList = [ - if x == 1: 1 -] -# Conditional configuration in dict -dataDict = { - if x == 1: key1 = "value1" # Inline form - elif x == 2: - key2 = "value2" # Multi-line form -} - -schema Config: - id?: int - -env = "prod" -# Conditional configuration in schema -dataSchema = Config { - if env == "prod": - id = 1 - elif env == "pre": - id = 2 - elif env == "test": - id = 3 -} -``` - -## 36. Does the == operator in KCL do deep comparisons? - -`==` operator in KCL - -- For primitive types `int`, `float`, `bool`, `str` variables are directly compared to see if their values are equal -- Variables of composite types `list`, `dict`, `schema` will deeply recursively compare their sub-elements for equality - - `list` type deep recursive recursive comparison of the value and length of each index - - `dict`/`schema` types deeply recursively compare the value of each attribute (regardless of the order in which the attributes appear) - -```python -print([1, 2] == [1, 2]) # True -print([[0, 1], 1] == [[0, 1], 1]) # True -print({k1 = 1, k2 = 2} == {k2 = 2, k1 = 1}) # True - -print([1, 2] == [1, 2, 3]) # False -print({k1 = 1, k2 = 2, k3 = 3} == {k2 = 2, k1 = 1}) # False -``` - -## 37. How to modify existing configuration blocks in KCL - -In KCL, there are three **attribute operators** `=`, `+=`, `:`, which can be used to modify existing configuration blocks, and can use **unpacking operator** ` **` etc. "inherit" all attribute fields and values ​​of a configuration block. - -- The `=` attribute operator means overriding, use `=` operator to override/delete the attribute with priority, (if it is overwritten with `Undefined`, it means deletion) -- The `+=` attribute operator means adding, which is generally used to add sub-elements to the attributes of the list type. The operand type following the `+=` attribute operator can only be of the list type. -- The `:` attribute operator means idempotent merge. When the value conflicts, an error is reported, and when there is no conflict, the merge is performed - -### Override attribute operator = - -The most commonly used attribute operator is `=`, which indicates the assignment of an attribute. When the same attribute is used multiple times, it means overwriting. For global variables outside `{}` or attributes within `{}`, it means using value overrides this global variable or attribute - -```python -data = { # define a dictionary type variable data - a = 1 # use = to declare a attribute a in data with a value of 1 - b = 2 # use = to declare a attribute b in data with a value of 1 -} # The final data value is {"a": 1, "b": 1} -``` - -we can also use the override attribute operator at the schema instantiation to achieve the effect of overriding the default value of the schema. Generally, when creating a new schema instance, if there is no special requirement, we can generally use `=` - -```python -schema Person: - name: str = "Alice" # schema Person's name attribute has default value "Alice" - age: int = 18 # schema Person's age attribute has a default value of 18 - -bob = Person { - name = "Bob" # "Bob" -> "Alice", the value of the attribute name "Bob" will override the default value "Alice" of the schema Person name attribute - age = 10 # 10 -> 18, the value of the attribute age of 10 will override the default value of the schema Person age attribute of 18 -} # The final value of bob is {"name": "Bob", age: 10} -``` - -### Insert attribute operator += - -The insert attribute operator means to add the value of an attribute in place, such as adding a new element to a list type attribute - -```python -data = { - args = ["kcl"] # use = to declare an attribute in data with value ["kcl"] args - args += ["-Y", "settings.yaml"] # Add two elements "-Y", "settings.yaml" to attribute args using += operator -} # The final data value is {"args": ["kcl", "-Y", "settings.yaml"]} -``` - -### Merge attribute operators : - -The merge attribute operator means idempotent merging of different configuration block values ​​of an attribute. When the values ​​to be merged conflict, an error is reported. It is mostly used in complex configuration merging scenarios. - -```python -data = { - labels: {key1: "value1"} # define a labels, its type is dict, the value is {"key1": "value1"} - labels: {key2: "value2"} # Use : to combine different configuration values ​​of labels -} # The final data value is {"labels": {"key1": "value1", "key2": "value2"}} -``` - -The merge attribute operator is an idempotent operator, and the writing order of the configuration blocks to be merged does not affect the final result. For example, the two `labels` attributes in the above example can also be written in reverse order. - -```python -data = { # The merged writing order of the same attribute labels does not affect the final result - labels: {key2: "value2"} # define a label whose type is dict and the value is {"key2": "value2"} - labels: {key1: "value1"} # Use : to combine different configuration values ​​of labels -} # The final data value is {"labels": {"key1": "value1", "key2": "value2"}} -``` - -Note: The merge attribute operator will check the merged values ​​for conflicts, and report an error when the configuration values ​​that need to be merged conflict. - -```python -data = { - a: 1 # the value of a is 1 - a: 2 # Error: The value 2 of a cannot be merged with the value 1 of a because the results conflict and the merge is not commutative -} -``` - -```python -data = { - labels: {key: "value"} - labels: {key: "override_value"} # Error: The values ​​"value" and "override_value" of the key attributes of two labels are conflicting and cannot be merged -} -``` - -The coalescing operator is used differently for different types - -- Attributes of different types cannot be merged -- When the attribute is a basic type such as int/float/str/bool, the operator will judge whether the values ​​to be merged are equal, and a merge conflict error will occur if they are not equal - -```python -data = { - a: 1 - a: 1 # Ok - a: 2 # Error -} -``` - -- when the attribute is of type list - - Merge conflict error occurs when two lists that need to be merged are not of equal length - - When the lengths of the two lists to be merged are equal, recursively merge each element in the list according to the index - -```python -data = { - args: ["kcl"] - args: ["-Y", "settings.yaml"] # Error: The lengths of the two args attributes are not the same and cannot be merged - env: [{key1: "value1"}] - env: [{key2: "value2"}] # Ok: The value of the final env attribute is [{"key1": "value1"}, {"key2": "value2"}] -} -``` - -- When the attribute is of type dict/schema, recursively merge each element in dict/schema according to key - -```python -data = { - labels: {key1: "value1"} - labels: {key2: "value2"} - labels: {key3: "value3"} -} # The final data value is {"labels": {"key1": "value1", "key2": "value2", "key3": "value3"}} -``` - -- the result of combining an attribute of any type with None/Undefined is itself - -```python -data = { - args: ["kcl"] - args: None # Ok - args: Undefined #Ok -} # The final data value is {"args": ["kcl"]} -``` - -Support declaration and merging of top-level variables using the `:` attribute (we can still declare a configuration block using `config = Config {}`) - -```python -schema Config: - id: int - value: str - -config: Config { - id: 1 -} -config: Config { - value: "1" -} -""" -Two Config configuration blocks are defined here, and the : operator can be used to merge the two configuration blocks together. The equivalent code for the merge is as follows: -config: Config { - id: 1 - value: "1" -} -""" -``` - -To sum up, the usage scenario of the merge attribute operator `:` is mainly the merge operation of the complex data structure list/dict/schema. In general, if there is no special requirement, the two attribute operators `=` and `+=` are used. Yes, so the best practice for attribute operators is as follows - -- For primitive types, use the `=` operator -- For the list type, the `=` and `+=` operators are generally used. Use `=` to completely override the list attribute, and use `+=` to add elements to the list -- For dict/schema types, the `:` operator is generally used - -In addition, when a configuration already exists, we can use the unpacking operator `**` to get all field values ​​of this configuration and modify the fields with different attribute operators, and get a new configuration - -```python -configBase = { - intKey = 1 # A attribute of type int - floatKey = 1.0 # A attribute of type float - listKey = [0] # A attribute of type list - dictKey = {key1: "value1"} # an attribute of type dict -} -configNew = { - **configBase # Unpack and inline configBase into configNew - intKey = 0 # Use override attribute operator = to override intKey attribute to 1 - floatKey = Undefined # Use override attribute operator = remove floatKey attribute - listKey += [1] # Add an attribute 1 to the end of the listKey attribute using the add attribute operator += - dictKey: {key2: "value2"} # Use the merge attribute operator: extend a key-value pair for the dictKey attribute -} -``` - -The output YAML result is: - -```yaml -configBase: - intKey: 1 - floatKey: 1.0 - listKey: - - 0 - dictKey: - key1: value1 -configNew: - intKey: 0 - listKey: - - 0 - - 1 - dictKey: - key1: value1 - key2: value2 -``` - -Alternatively two configuration blocks can be combined using the `|` operator: - -```python -configBase = { - intKey = 1 # A attribute of type int - floatKey = 1.0 # A attribute of type float - listKey = [0] # A attribute of type list - dictKey = {key1: "value1"} # an attribute of type dict -} -configNew = configBase | { # Use | to merge - intKey = 0 # Use override attribute operator = to override intKey attribute to 1 - floatKey = Undefined # Use override attribute operator = remove floatKey attribute - listKey += [1] # Add an attribute 1 to the end of the listKey attribute using the add attribute operator += - dictKey: {key2: "value2"} # Use the merge attribute operator: extend a key-value pair for the dictKey attribute -} -``` - -The output YAML is - -```yaml -configBase: - intKey: 1 - floatKey: 1.0 - listKey: - - 0 - dictKey: - key1: value1 -configNew: - intKey: 0 - listKey: - - 0 - - 1 - dictKey: - key1: value1 - key2: value2 -``` - -### The solution to the conflicting values on the attribute 'attr' between {value1} and {value2} error in KCL - -When an error like conflicting values on the attribute 'attr' between {value1} and {value2} occurs in KCL, it is usually a problem with the use of the merge attribute operator `:`, indicating that when the `value1` and `value2` configurations are merged, the attribute A conflict error occurred at `attr`. In general, modify the attr attribute of value2 to other attribute operators, use `=` to indicate overwrite, and use `+=` to indicate addition - -For example for the following code: - -```python -data = {k: 1} | {k: 2} # Error: conflicting values on the attribute 'k' between {'k': 1} and {'k': 2} -``` - -We can use the `=` attribute operator to modify it to the following form - -```python -data = {k: 1} | {k = 2} # Ok: the value 2 will override the value 1 through the `=` operator -``` - -## 38. How to traverse multiple elements at the same time in the for comprehension? - -In KCL, we can use for comprehension to traverse multiple elements - -- Example 1: two dimension element loop - -```python -dimension1 = [1, 2, 3] # The length of the dimension1 list is 3 -dimension2 = [1, 2, 3] # The length of the dimension2 list is 3 -matrix = [x + y for x in dimension1 for y in dimension2] # The length of the matrix list is 9 = 3 * 3 -``` - -The output YAML is: - -```yaml -dimension1: -- 1 -- 2 -- 3 -dimension2: -- 1 -- 2 -- 3 -matrix: -- 2 -- 3 -- 4 -- 3 -- 4 -- 5 -- 4 -- 5 -- 6 -``` - -- Example 2: Use for loop and `zip` built-in function to traverse multiple lists one by one by index - -```python -dimension1 = [1, 2, 3] # The length of the dimension1 list is 3 -dimension2 = [1, 2, 3] # The length of the dimension2 list is 3 -dimension3 = [d[0] + d[1] for d in zip(dimension1, dimension2)] # The length of the dimension1 list is 3 -``` - -The output YAML is: - -```yaml -dimension1: -- 1 -- 2 -- 3 -dimension2: -- 1 -- 2 -- 3 -dimension3: -- 2 -- 4 -- 6 -``` - -## 39. How to set default value for option function in KCL - -In KCL, when the value of the option attribute is None/Undefined or empty, we can use the logical `or` to directly specify a default value. - -```python -value = option("key") or "default_value" # When the value of key exists, take the value of option("key"), otherwise take "default_value" -``` - -Or use the default parameter of the option function. - -```python -value = option("key", default="default_value") # When the value of key exists, take the value of option("key"), otherwise take "default_value" -``` - -## 40. How to check that multiple attributes cannot be empty at the same time in schema in KCL? - -In KCL, a single attribute of schema cannot be empty by default, unless we use the attribute optional operator `?`. - -```python -schema Person: - name: str # Required. - age: int # Required. - id?: int # Optional. -``` - -When it is necessary to check that the schema attributes cannot be empty at the same time or only one of them is empty, it needs to be written with the help of schema check expressions. The following takes two attributes `a`, `b` of the schema `Config` as an example to illustrate. - -- `a` and `b` attributes cannot be empty at the same time. - -```python -schema Config: - a?: str - b?: str - - check: - a or b -``` - -- `a` and `b` attributes can only have one or both empty (cannot exist at the same time or not empty) - -```python -schema Config: - a?: str - b?: str - - check: - not a or not b -``` - -## 41. A file is imported in KCL, but the schema defined by other KCL files in the same directory cannot be found. What might be the reason? - -It may be caused to import only this file in this folder. In KCL, import statement supports importing the entire folder, and also supports importing a certain KCL file under a certain folder. For the following directory structure. - -``` -. -├── kcl.mod -├── main.k -└── pkg - ├── pkg1.k - ├── pkg2.k - └── pkg3.k -``` - -There is an entry file main.k in the root directory. You can write the following code in main.k to import the entire pkg folder. At this time, all schema definitions in the pkg folder are visible to each other. - -```python -import pkg -``` - -We can also write the following code to import a single file pkg/pkg1.k. At this time, pkg1.k cannot find other files, namely the schema definitions under pkg2.k/pkg3.k - -```python -import pkg.pkg1 -``` - -## 42. How is indentation handled in KCL? - -In KCL, when a colon `:`, square bracket pair `[]` and curly bracket pair `{}` appear, we generally need to use newline + indentation, and the number of indented spaces for the same indentation level needs to be consistent. The indentation level is generally represented by 4 spaces. - -- colon `:` followed by newline and indent - -```python -"""Indentation in if statements""" -_a = 1 -_b = 1 -if _a >= 1: # colon `:` followed by newline and indent - if _a > 8: - _b = 2 - elif a > 6: - _b = 3 - -"""Indentation in schema statements""" -schema Person: # colon `:` followed by newline and indent - name: str - age: int -``` - -- opening bracket `[` followed by newline and indent - -```python -data = [ # opening bracket `[` followed by newline and indent - 1 - 2 - 3 -] # unindent before closing bracket ] -``` - -```python -data = [ # opening bracket `[` followed by newline and indent - i * 2 for i in range(5) -] # unindent before closing bracket `]` -``` - -- opening bracket `{` followed by newline and indent - -```python -data = { # opening bracket `{` followed by newline and indent - k1 = "v1" - k2 = "v2" -} # unindent before closing brace `}` -``` - -```python -data = { # opening bracket `{` followed by newline and indent - str(i): i * 2 for i in range(5) -} # unindent before closing brace `}` -``` - -## 43. How to write simple tests for KCL code? - -The current version of KCL does not support internal program debugging, we can use the assert statement and the print function to achieve data assertion and viewing. - -```python -a = 1 -print("The value of a is", a) -assert a == 1 -``` - -In addition, we can also use the kcl-test test tool to write KCL internal test cases - -Assuming there is a hello.k file, the code is as follows: - -```python -schema Person: - name: str = "kcl" - age: int = 1 - -hello = Person { - name = "hello kcl" - age = 102 -} -``` - -Construct the hello_test.k test file with the following contents: - -```python -schema TestPerson: - a = Person{} - assert a.name == 'kcl' - -schema TestPerson_age: - a = Person{} - assert a.age == 1 - -schema TestPerson_ok: - a = Person{} - assert a.name == "kcl" - assert a.age == 1 -``` - -Then execute the kcl-test command in the directory: - -``` -$ kcl-test -ok /pkg/to/app [365.154142ms] -$ -``` - -## 44. How to define and use functions in KCL? - -The schema structure acts as a function to a certain extent, and this function has the ability to have multiple input parameters and multiple output parameters. For example, the following code can implement the function of a Fibonacci sequence: - -```python -schema Fib: - n: int - value: int = 1 if n <= 2 else (Fib {n: n - 1}).value + (Fib {n: n - 2}).value - -fib8 = (Fib {n: 8}).value -``` - -The output is - -```yaml -fib8: 21 -``` - -A schema function that merges lists into dictionaries - -```python -schema UnionAll[data, n]: - _?: [] = data - value?: {:} = ((UnionAll(data=data, n=n - 1) {}).value | data[n] if n > 0 else data[0]) if data else {} - -schema MergeList[data]: - """Union all elements in a list returns the merged dictionary - - [{"key1": "value1"}, {"key2": "value2"}, {"key3": "value3"}] -> {"key1": "value1", "key2": "value2", "key3": "value3"} - """ - _?: [] = data - value?: {:} = (UnionAll(data=data, n=len(data) - 1) {}).value if data else {} -``` - -In addition, KCL supports defining a function using the `lambda` keyword: - -```python -func = lambda x: int, y: int -> int { - x + y -} -a = func(1, 1) # 2 -``` - -A lambda function has the following properties: - -- A lambda function takes the value of the last expression as the return value of the function, and an empty function body returns None. -- The return value type annotation can be omitted, the return value type is the type of the last expression value. -- There are no order-independent features in the function body, all expressions are executed in order. - -```python -_func = lambda x: int, y: int -> int { - x + y -} # Define a function using the lambda expression -_func = lambda x: int, y: int -> int { - x - y -} # Ok -_func = lambda x: int, y: int -> str { - str(x + y) -} # Error (int, int) -> str can't be assigned to (int, int) -> int -``` - -A lambda function cannot participate in any computation and can only be used in assignment and call statements. - -```python -func = lambda x: int, y: int -> int { - x + y -} -x = func + 1 # Error: unsupported operand type(s) for +: 'function' and 'int(1)' -``` - -```python -a = 1 -func = lambda x: int { - x + a -} -funcOther = lambda f, para: int { - f(para) -} -r = funcOther(func, 1) # 2 -``` - -The output is - -```python -a: 1 -r: 2 -``` - -We can define an anonymous function and call it directly - -```python -result = (lambda x, y { - z = 2 * x - z + y -})(1, 1) # 3 -``` - -Anonymous functions can be also used in for loops - -```python -result = [(lambda x, y { - x + y -})(x, y) for x in [1, 2] for y in [1, 2]] # [2, 3, 3, 4] -``` - -Functions can be defined and used in the KCL schema - -```python -_funcOutOfSchema = lambda x: int, y: int { - x + y -} -schema Data: - _funcInSchema = lambda x: int, y: int { - x + y - } - id0: int = _funcOutOfSchema(1, 1) - id1: int = _funcInSchema(1, 1) - id2: int = (lambda x: int, y: int { - x + y - })(1, 1) -``` - -The output YAML is - -```yaml -data: - id0: 2 - id1: 2 - id2: 2 -``` - -## 45. Why do we get an error when a variable is assigned an enumeration type (a literal union type)? - -In KCL, a attribute defined as a literal union type is only allowed to receive a literal value or a variable of the same literal union type during assignment. For example, the following code is correct: - -```python -schema Data: - color: "Red" | "Yellow" | "Blue" - -data = Data { - color = "Red" # Ok, can be assigned as "Red", "Yellow" and "Blue" -} -``` - -However the following code is wrong: - -```python -schema Data: - color: "Red" | "Yellow" | "Blue" - -_color = "Red" - -data = Data { - color = _color # Error: expect str(Red)|str(Yellow)|str(Blue), got str -} -``` - -This is because there is no type declared for the variable `_color`, it will be deduced by the KCL compiler as a `str` string type, so when a "larger" type `str` is assigned to a "smaller" type `"Red" | "Yellow" | "Blue"` will report an error, one solution is to declare a type for the `_color` variable, the following code is correct: - -```python -schema Data: - color: "Red" | "Yellow" | "Blue" - -_color: "Red" | "Yellow" | "Blue" = "Red" - -data = Data { - color = _color # Ok -} -``` - -Further, we can use type aliases to simplify enumeration (writing of literal union types), such as the following code: - -```python -type Color = "Red" | "Yellow" | "Blue" # Define a type alias, which can be reused in different places, reducing the amount of code writing - -schema Data: - color: Color - -_color: Color = "Red" - -data = Data { - color = _color # Ok -} -``` - -## 46. Procedural for loop - -KCL provides comprehensions and all/any/map/filter expressions for processing a collection element, which meets most needs, and provides a procedural for loop body. Providing a procedural for loop body is not very demanding from the current scenario, so there is no procedural for loop support yet. - -In addition, although KCL does not support procedural for loops, it is possible to "construct" corresponding procedural for loops through for loops and lambda functions. - -```python -result = [(lambda x: int, y: int -> int { - # Write procedural for loop logic in the lambda function. - z = x + y - x * 2 -})(x, y) for x in [1, 2] for y in [1, 2]] # [2, 2, 4, 4] -``` - -## 47. Default variables are immutable - -The immutability of KCL variables means that the exported variables starting with non-underscore `_` in the KCL top-level structure cannot be changed after initialization. - -```python -schema Person: - name: str - age: int - -a = 1 # a will be output to YAML, once assigned it cannot be modified -_b = 1 # _b The variable is named with an underscore and will not be output to YAML. It can be modified by multiple assignments -_b = 2 -alice = Person { - name = "Alice" - age = 18 -} -``` - -There are two ways of specifying that variables are immutable: - -- non-underscore top-level variables outside the schema - -```python -a = 1 # immutable exported variable -_b = 2 # mutable non-export variable -``` - -## 48. How to develop a KCL plugin? - -KCL plugins are installed in the plugins subdirectory of KCLVM (usually installed in the `$HOME/.kusion/kclvm/plugins` directory), or set through the `$KCL_PLUGINS_ROOT` environment variable. For plugin developers, plugins are managed in the [Git repository](https://github.com/KusionStack/kcl-plugin), and the plugin repository can be cloned to this directory for development. - -KCL has built-in kcl-plugin scaffolding command to assist users to write KCL plug-ins in Python language, so that the corresponding plug-ins can be called in the KCL file to enhance the KCL language itself, such as accessing the network, reading and writing IO, CMDB query and encryption and decryption functions. . - -``` -usage: kcl-plugin [-h] {list,init,info,gendoc,test} ... - -positional arguments: - {list,init,info,gendoc,test} - kcl plugin sub commands - list list all plugins - init init a new plugin - info show plugin document - gendoc gen all plugins document - test test plugin - -optional arguments: - -h, --help show this help message and exit -``` - -For example, if you want to develop a plugin named io, you can use the following command to successfully create a new io plugin - -``` -kcl-plugin init io -``` - -Then you can use the following command to get the root path of the plugin and cd to the corresponding io plugin directory for development - -``` -kcl-plugin info -``` - -For example, if you want to develop a function read_file to read a file, you can write python code in `plugin.py` of `$plugin_root/io`: - -```python -# Copyright 2020 The KCL Authors. All rights reserved. - -import pathlib - -INFO = { - 'name': 'io', - 'describe': 'my io plugin description test', - 'long_describe': 'my io plugin long description test', - 'version': '0.0.1', -} - - -def read_file(file: str) -> str: - """Read string from file""" - return pathlib.Path(file).read_text() - -``` - -In addition, you can write the corresponding test function in `plugin_test.py`, or you can directly write the following KCL file for testing: - -```python -import kcl_plugin.io - -text = io.read_file('test.txt') -``` - -You can also use the info command to view information about the io plugin - -``` -kcl-plugin info io -``` - -``` -{ - "name": "io", - "describe": "my io plugin description test", - "long_describe": "my io plugin long description test", - "version": "0.0.1", - "method": { - "read_file": "Read string from file" - } -} -``` - -Finally, the plugin that has written the test can be merged with MR in the `kcl_plugins` repository. diff --git a/docs/user_docs/support/faq-yaml.md b/docs/user_docs/support/faq-yaml.md deleted file mode 100644 index 466efff9..00000000 --- a/docs/user_docs/support/faq-yaml.md +++ /dev/null @@ -1,102 +0,0 @@ ---- -sidebar_position: 3 ---- - -# YAML - -## 1. What is the difference between single and double quote YAML strings? - -- YAML double-quoted strings are the only style that can express arbitrary strings, by using `\` escape characters, such as `\"` to escape double quotes `"`, `\\` to escape backslashes `\`, and a single backslash `\` can be used as a continuation character for double-quoted strings. -- YAML single-quoted strings differ from YAML double-quoted strings in that `\` and `"` can be used freely without escaping, but two single-quotes `''` are used to escape single-quote `'` characters. - -For the following example, the contents of the three string variables are the same. - -```yaml -string1: 'here '' s to "quotes"' -string2: "here's to \"quotes\"" -string3: here's to "quotes" -``` - -> Note: KCL's strategy for outputting YAML strings is to output unquoted strings or double-quoted strings preferentially when single quotes appear in the string content, and output single-quoted strings in other cases to avoid the burden of understanding. - -For more details, please refer to [YAML Spec v1.2](https://yaml.org/spec/1.2.1/) - -## 2. What is the meaning of symbols such as | - + > in YAML? - -When using KCL multi-line strings (triple quote strings), the output YAML often carries some special tokens, such as `|`, `-`, `+` and `>`, etc. These tokens usually are the representation method of YAML multi-line string, such as the following KCL code: - -```python -data = """This is a KCL multi line string (the first line) -This is a KCL multi line string (the second line) -This is a KCL multi line string (the third line) - - -""" -var = 1 -``` - -The output YAML is - -```yaml -data: |+ - This is a KCL multi line string (the first line) - This is a KCL multi line string (the second line) - This is a KCL multi line string (the third line) - - -var: 1 -``` - -- `|` represents **block style**, which is used to represent a multi-line string, where all newlines in the string represent the real newlines. -- `>` represents **folding style**, in which all newlines in the string will be replaced by spaces. -- `+` and `-` are used to control the use of newlines at the end of strings. The default is to keep a single newline at the end of the string. If we want to remove all newlines, we can put a `-` after the style indicator `|` or `>`. If we want to keep the newline at the end, we need to put a `+` after `|` or `>`. - -For more details, please refer to [YAML Multiline String](https://yaml-multiline.info/) and [YAML Spec v1.2](https://yaml.org/spec/1.2.1/) - -## 3. What is the meaning of numbers that appear after symbols | - + > such as |1 and |2 in YAML? - -Numbers represent **explicit indentation indicators** in YAML. For long strings in YAML, YAML usually the first non-blank line determines the indentation level of the string, and when the first non-blank line is preceded by a non-leading character, such as a newline, we must use **explicit indent indicators** to specify the indent level of the content, such as `|1` and `|2` etc. - -For example, for the following KCL code: - -```python -longStringStartWithEndline = """ -This is the second line -This is the third line -""" - -``` - -```yaml -longStringStartWithEndline: |2 - - This is the second line - This is the third line -``` - -- Writing long strings from the first line. - -```python -longString = """This is the second line -This is the third line -""" -``` - -- Writing long strings with line continuation characters. - -```python -longString = """\ -This is the second line -This is the third line -""" -``` - -The YAML output by the above two methods is: - -```yaml -longString: | - This is the second line - This is the third line -``` - -For more details, please refer to [YAML Spec v1.2](https://yaml.org/spec/1.2.1/) diff --git a/docs/user_docs/support/install-error.md b/docs/user_docs/support/install-error.md deleted file mode 100644 index 2ce84890..00000000 --- a/docs/user_docs/support/install-error.md +++ /dev/null @@ -1,35 +0,0 @@ -# Installation - -## 1. Could not find `libintl.dylib` - -This problem is that some tools depends on the `Gettext` library, but macOS does not have this library by default. You can try to solve it in the following ways: - -1. (Skip this step for non-macOS m1) For macOS m1 operating system, make sure you have a homebrew arm64e-version installed in /opt/homebrew, otherwise install the arm version of brew with the following command - -``` -/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" -# add to path -export PATH=/opt/homebrew/bin:$PATH -``` - -2. `brew install gettext` -3. Make sure `libintl.8.dylib` exists in `/usr/local/opt/gettext/lib` directory -4. If brew is installed in another directory, the library can be created by copying it to the corresponding directory - -## 2. macOS system SSL related errors - -Openssl dylib library not found or SSL module is not available problem - -1. (Skip this step for non-macOS m1) For macOS m1 operating system, make sure you have a homebrew arm64e-version installed in /opt/homebrew, otherwise install the arm version of brew with the following command - -``` -/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" -# add to path -export PATH=/opt/homebrew/bin:$PATH -``` - -2. Install openssl (version 1.1) via brew - -``` -brew install openssl@1.1 -``` diff --git a/docs/user_docs/support/support.md b/docs/user_docs/support/support.md deleted file mode 100644 index 09ccb1e9..00000000 --- a/docs/user_docs/support/support.md +++ /dev/null @@ -1,4 +0,0 @@ -# FAQ - -KusionStack frequently asked questions. - diff --git a/docs_versioned_docs/version-v0.10/1-what-is-kusion/1-overview.md b/docs_versioned_docs/version-v0.10/1-what-is-kusion/1-overview.md new file mode 100644 index 00000000..ecd9efd3 --- /dev/null +++ b/docs_versioned_docs/version-v0.10/1-what-is-kusion/1-overview.md @@ -0,0 +1,51 @@ +--- +id: overview +title: Overview +slug: / +--- + +# Overview + +Welcome to Kusion! This introduction section covers what Kusion is, the problem Kusion aims to solve, and how Kusion compares to other software. If you just want to dive into using Kusion, feel free to skip ahead to the [Getting Started](getting-started/install-kusion) section. + +## What is Kusion? + +Kusion is a modern application delivery and management toolchain that enables developers to specify desired intent in a declarative way and then using a consistent workflow to drive continuous deployment through application lifecycle. Inspired by the phrase **Fusion on Kubernetes**, Kusion aims to help application and platform developers to develop and deliver in a self-serviceable, fast, reliable, and collaborative way. + + +![arch](https://raw.githubusercontent.com/KusionStack/kusion/main/docs/workflow.png) + + +## Why Kusion? + +Developers should be able to deploy and run their applications and services end to end. **"You build it, you run it", the original promise of DevOps.** + +But the modern day for most software organizations this promise quickly become unrelalistic since the increasingly complex cloud-native toolchains, while cloud native technologies made huge improvements in areas such as scalability, availability and operability, it also brings downside - the growing burden on developers, which leads to the rise of [Platform Engineering](https://platformengineering.org/). + +Another challenge we saw is that a series of [antipatterns](https://web.devopstopologies.com/#anti-types) emerge when regular software organizations tries to implement true DevOps. Without well proven reference architecture and supporting tools, it's much more difficult to accomplish the original promise. + +On one hand, **Kusion was build to minimize developer's cognitive load**. With application-centric configuration model, you don't need to deal with tedious infrastructure and configuration management tooling, all you need to be familiar with is [AppConfigation](configuration-walkthrough/overview). This approach shields developers from the configurational complexity of Kubernetes but still enable standardization by design. + +On the other hand, **Kusion defines a new way of how different engineering teams collaboration**. With the separation of concerns, different roles could focus on their work based on their knowledge and responsibility. Through such a division of labor, the platform team can better manage the differences and complexities of the platform, and app developers could participate in ops work with less cognitive load. + +## Kusion Highlights + +* **Platform as Code** + + Specify desired application intent through declarative configuration code, drive continuous deployment with any CI/CD systems or GitOps to match that intent. No ad-hoc scripts, no hard maintain custom workflows, just declarative configuration code. + +* **Dynamic Configuration Management** + + Enable platform teams to set baseline-templates, control how and where to deploy application workloads and provision accessory resources. While still enabling application developers freedom via workload-centric specification and deployment. + +* **Security & Compliance Built In** + + Enforce security and infrastructure best practices with out-of-box [base models](https://github.com/KusionStack/catalog), create security and compliance guardrails for any Kusion deploy with third-party Policy as Code tools. All accessory resource secrets are automatically injected into Workloads. + +* **Lightweight and Open Model Ecosystem** + + Pure client-side solution ensures good portability and the rich APIs make it easier to integrate and automate. Large growing model ecosystem covers all stages in application lifecycle, with extensive connections to various infrastructure capabilities. + +:::tip + +**Kusion is an early project.** The end goal of Kusion is to boost [Internal Developer Platform](https://internaldeveloperplatform.org/) revolution, and we are iterating on Kusion quickly to strive towards this goal. For any help or feedback, please contact us in [Slack](https://github.com/KusionStack/community/discussions/categories/meeting) or [issues](https://github.com/KusionStack/kusion/issues). diff --git a/docs_versioned_docs/version-v0.10/1-what-is-kusion/2-kusion-vs-x.md b/docs_versioned_docs/version-v0.10/1-what-is-kusion/2-kusion-vs-x.md new file mode 100644 index 00000000..554b3c43 --- /dev/null +++ b/docs_versioned_docs/version-v0.10/1-what-is-kusion/2-kusion-vs-x.md @@ -0,0 +1,39 @@ +--- +id: kusion-vs-x +--- + +# Kusion vs Other Software + +It can be difficult to understand how different software compare to each other. Is one a replacement for the other? Are they complementary? etc. In this section, we compare Kusion to other software. + +**vs. GitOps (ArgoCD, FluxCD, etc.)** + +According to the [open GitOps principles](https://opengitops.dev/), GitOps systems typically have its desired state expressed declaratively, continuously observe actual system state and attempt to apply the desired state. In the design of Kusion toolchain, we refer to those principles but have no intention to reinvent any GitOps systems wheel. + +Kusion adopts your GitOps process and improves it with richness of features. The declarative [AppConfiguration](../concepts/app-configuration) model can be used to express desired intent, once intent is declared [Kusion CLI](../reference/commands) takes the role to make production match intent as safely as possible. + +**vs. PaaS (Heroku, Vercel, etc.)** + +Kusion shares the same goal with traditional PaaS platforms to provide application delivery and management capabilities. The intuitive difference from the full functionality PaaS platforms is that Kusion is a client-side toolchain, not a complete PaaS platform. + +Also traditional PaaS platforms typically constrain the type of applications they can run but there is no such constrain for Kusion which means Kusion provides greater flexibility. + +Kusion allows you to have platform-like features without the constraints of a traditional PaaS. However, Kusion is not attempting to replace any PaaS platforms, instead Kusion can be used to deploy to a platform such as Heroku. + +**vs. KubeVela** + +KubeVela is a modern software delivery and management control plane. KubeVela makes it easier to deploy and operate applications on top of Kubernetes. + +Kusion is not a control plane. Kusion is a client-side tool for describing application intent in a declarative way and providing consistent workflow to apply that desired state. + +With proper Generator implementation, the target Spec of [AppConfiguration](../concepts/app-configuration) can be [KubeVela Application](https://kubevela.io/docs/getting-started/core-concept/) and Kusion can use KubeVela to satisfy the "apply" step. + +**vs. Helm** + +The concept of Helm originates from the [package management](https://en.wikipedia.org/wiki/Package_manager) mechanism of the operating system. It is a package management tool based on templated YAML files and supports the execution and management of resources in the package. + +Kusion is not a package manager. Kusion naturally provides a superset of Helm capabilities with the modeled key-value pairs, so that developers can use Kusion directly as a programable alternative to avoid the pain of writing text templates. For users who have adopted Helm, the stack compilation results in Kusion can be packaged and used in Helm format. + +**vs. Kubernetes** + +Kubernetes(K8s) is a container scheduling and management runtime widely used around the world, an "operating system" core for containers, and a platform for building platforms. Above the Kubernetes API layer, Kusion aims to provide app-centric **abstraction** and unified **workspace**, better **user experience** and automation **workflow**, and helps developers build the app delivery model easily and collaboratively. diff --git a/docs_versioned_docs/version-v0.10/1-what-is-kusion/_category_.json b/docs_versioned_docs/version-v0.10/1-what-is-kusion/_category_.json new file mode 100644 index 00000000..0817eb90 --- /dev/null +++ b/docs_versioned_docs/version-v0.10/1-what-is-kusion/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "What is Kusion?" +} diff --git a/docs_versioned_docs/version-v0.10/2-getting-started/1-install-kusion.md b/docs_versioned_docs/version-v0.10/2-getting-started/1-install-kusion.md new file mode 100644 index 00000000..c7438054 --- /dev/null +++ b/docs_versioned_docs/version-v0.10/2-getting-started/1-install-kusion.md @@ -0,0 +1,71 @@ +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Install Kusion + +You can install the latest Kusion CLI on MacOS and Linux. Choose the one you prefer from the methods below. + + + + +The recommended method for installing on MacOS and Linux is to use the brew package manager. + +**Install Kusion** + +```bash +# tap formula repository Kusionstack/tap +brew tap KusionStack/tap + +# install Kusion +brew install KusionStack/tap/kusion +``` + +**Update Kusion** + +```bash +# update formulae from remote +brew update + +# update Kusion +brew upgrade KusionStack/tap/kusion +``` + +**Uninstall Kusion** + +```bash +# uninstall Kusion +brew uninstall KusionStack/tap/kusion +``` + +```mdx-code-block + + +``` + +**Install Kusion** + +```bash +# install Kusion, default latest version +curl https://www.kusionstack.io/scripts/install.sh | sh +``` + +**Install the Specified Version of Kusion** + +You can also install the specified version of Kusion by appointing the version as shell script parameter, where the version is the [available tag](https://github.com/KusionStack/kusion/tags) trimming prefix "v", such as 0.10.0, 0.9.0, etc. In general, you don't need to specify Kusion version, just use the command above to install the latest version. + +```bash +# install Kusion of specified version 0.10.0 +curl https://www.kusionstack.io/scripts/install.sh | sh -s 0.10.0 +``` + +**Uninstall Kusion** + +```bash +# uninstall Kusion +curl https://www.kusionstack.io/scripts/uninstall.sh | sh +``` + +```mdx-code-block + + +``` diff --git a/docs_versioned_docs/version-v0.10/2-getting-started/2-deliver-wordpress.md b/docs_versioned_docs/version-v0.10/2-getting-started/2-deliver-wordpress.md new file mode 100644 index 00000000..1bbffa1a --- /dev/null +++ b/docs_versioned_docs/version-v0.10/2-getting-started/2-deliver-wordpress.md @@ -0,0 +1,208 @@ +--- +id: deliver-wordpress +--- + +# Deliver the WordPress Application on Kubernetes + +In this tutorial we will walk through how to deploy a WordPress application on Kubernetes with Kusion. The WordPress application will interact with a locally deployed MySQL, which is declared as a database accessory in the config codes and will be automatically created and managed by Kusion. + +## Prerequisites + +Before we start to play with this example, we need to have the Kusion CLI installed and run a Kubernetes cluster. Here are some helpful documentations: + +- Install [Kusion](./1-install-kusion.md) CLI +- Install [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl) CLI and run a [Kubernetes](https://kubernetes.io/) cluster. Some light and convenient options for local deployment include [k3s](https://docs.k3s.io/quick-start), [k3d](https://k3d.io/v5.4.4/#installation), and [MiniKube](https://minikube.sigs.k8s.io/docs/tutorials/multi_node/). + +## Init Workspace + +To deploy the WordPress application, we need to first initiate a `Workspace` for the targeted stack (we are using `dev` here). Please copy the following example YAML file to your local `workspace.yaml`. + +`workspace.yaml` +```yaml +# example workspace configs for local mysql database +runtimes: + kubernetes: + kubeConfig: /etc/kubeconfig.yaml # Please replace with your own kubeconfig file path + +modules: + mysql: + default: + suffix: "-mysql" # The suffix of the MySQL database name +``` + +You can replace the `runtimes.kubernetes.kubeConfig` field with your own kubeconfig file path in `workspace.yaml` and execute the following command line to initiate the workspace configuration for `dev` stack. + +```shell +kusion workspace create dev -f workspace.yaml +``` + +You can use the following command lines to list and show the workspace configurations for `dev` stack. + +```shell +kusion workspace list + +kusion workspace show dev +``` + +The `workspace.yaml` is a sample configuration file for workspace management, including `Kubernetes` runtime config and `MySQL` module config. Workspace configurations are usually declared by **Platform Engineers** and will take effect through the corresponding stack. + +:::info +More details about the configuration of Workspace can be found in [Workspace Management](https://github.com/KusionStack/kusion/blob/main/docs/design/workspace_management/workspace_management.md). +::: + +## Init Project + +We can start by initializing this tutorial project with online templates: + +```shell +kusion init --online +``` + +All init templates are listed as follows: + +```shell +➜ kusion_playground kusion init --online +? Please choose a template: wordpress-local-db A sample wordpress project with local database +This command will walk you through creating a new kusion project. + +Enter a value or leave blank to accept the (default), and press . +Press ^C at any time to quit. + +Project Config: +? ProjectName: wordpress-local-db +? AppName: wordpress +Stack Config: dev +? Image: wordpress:6.3 +Created project 'wordpress-local-db' +``` + +Please select `wordpress-local-db` and press `Enter`, after which we will see the hints below and use the default values to configure this project and stack. + +![](/img/docs/user_docs/getting-started/init-wordpress-local-db.gif) + +The directory structure looks like the following: + +```shell +cd wordpress-local-db/dev && tree +``` + +```shell +➜ kusion_playground cd wordpress-local-db/dev && tree +. +├── kcl.mod +├── main.k +└── stack.yaml + +1 directory, 3 files +``` + +:::info +More details about the directory structure can be found in [Project](../3-concepts/1-project/1-overview.md) and [Stack](../3-concepts/2-stack/1-overview.md). +::: + +### Review Configuration Files + +Now let's have a glance at the configuration file of `dev/main.k`: + +```python +import catalog.models.schema.v1 as ac +import catalog.models.schema.v1.workload as wl +import catalog.models.schema.v1.workload.container as c +import catalog.models.schema.v1.workload.network as n +import catalog.models.schema.v1.accessories.mysql + +# main.k declares customized configurations for dev stack. +wordpress: ac.AppConfiguration { + workload: wl.Service { + containers: { + wordpress: c.Container { + image: "wordpress:6.3" + env: { + "WORDPRESS_DB_HOST": "$(KUSION_DB_HOST_WORDPRESS_MYSQL)" + "WORDPRESS_DB_USER": "$(KUSION_DB_USERNAME_WORDPRESS_MYSQL)" + "WORDPRESS_DB_PASSWORD": "$(KUSION_DB_PASSWORD_WORDPRESS_MYSQL)" + "WORDPRESS_DB_NAME": "mysql" + } + } + } + replicas: 1 + ports: [ + n.Port { + port: 80 + } + ] + } + database: { + wordpress: mysql.MySQL { + type: "local" + version: "8.0" + } + } +} +``` + +The configuration file `main.k`, usually written by the **App Developers**, declares customized configurations for `dev` stack, which includes an `AppConfiguration` with the name of `wordpress`. And the `wordpress` application includes a workload of type `workload.Service`, which runs on 1 replica and exposes `80` port to be accessed. Besides, it declares a local `mysql.MySQL` as the database accessory with the engine version of `8.0` for the application. The necessary Kubernetes resources for deploying and using the local database will be generated, and users can get the `host`, `username` and `paasword` of the database through the [mysql credentials and connectivity](../6-reference/2-modules/1-catalog-models/database/mysql.md#credentials-and-connectivity) of Kusion in application containers. + +This model hides the major complexity of Kubernetes resources such as `Namespace`, `Deployment` and `Service`, providing the concepts that are application-centric and infrastructure-agnostic. + +:::info +More details about the Models can be found in [Catalog](https://github.com/KusionStack/catalog) +::: + +:::info +The collaboration paradigm between App Developers and Platform Engineers with Kusion can be found in [Collaboration Paradigm](https://github.com/KusionStack/kusion/blob/main/docs/design/collaboration/collaboration_paradigm.md) +::: + +## Application Delivery + +```shell +kusion apply --watch +``` + +We will deliver the WordPress application in the `wordpress-local-db/dev` folder into the Kubernetes cluster with one command `kusion apply --watch`. + +![](/img/docs/user_docs/getting-started/apply-wordpress-local-db.gif) + +Check `Deployment` status. + +```shell +kubectl -n wordpress-local-db get deployment +``` + +The expected output is shown as follows: + +```shell +➜ dev kubectl -n wordpress-local-db get deployment +NAME READY UP-TO-DATE AVAILABLE AGE +wordpress-local-db-dev-wordpress 1/1 1 1 2m56s +wordpress-mysql 1/1 1 1 2m56s +``` + +In the above two resources, `wordpress-local-db-dev-wordpress` corresponds to the Kubernetes `Deployment` of the WordPress application, while `wordpress-mysql` corresponds to the `Deployment` of the local MySQL database. + +Port-forward our WordPress with the `Service`. + +```shell +kubectl port-forward -n wordpress-local-db service/wordpress-local-db-dev-wordpress-private 12345:80 +``` + +```shell +➜ dev kubectl port-forward -n wordpress-local-db service/wordpress-local-db-dev-wordpress-private 12345:80 +Forwarding from 127.0.0.1:12345 -> 80 +Forwarding from [::1]:12345 -> 80 + +``` + +Now we can visit [http://localhost:12345](http://localhost:12345) in our browser and enjoy! + +![](/img/docs/user_docs/getting-started/wordpress-site-page.png) + +## Delete WordPress Application + +We can delete the WordPress application and related database resources using the following command line: + +```shell +kusion destroy --yes +``` + +![](/img/docs/user_docs/getting-started/destroy-wordpress-local-db.gif) diff --git a/docs_versioned_docs/version-v0.10/2-getting-started/_category_.json b/docs_versioned_docs/version-v0.10/2-getting-started/_category_.json new file mode 100644 index 00000000..41f4c00e --- /dev/null +++ b/docs_versioned_docs/version-v0.10/2-getting-started/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Getting Started" +} diff --git a/docs_versioned_docs/version-v0.10/3-concepts/1-project/1-overview.md b/docs_versioned_docs/version-v0.10/3-concepts/1-project/1-overview.md new file mode 100644 index 00000000..ab14991e --- /dev/null +++ b/docs_versioned_docs/version-v0.10/3-concepts/1-project/1-overview.md @@ -0,0 +1,12 @@ +--- +sidebar_label: Overview +id: overview +--- + +# Overview + +A project in Kusion is defined as any folder that contains a project.yaml file and is linked to a Git repository. Typically, the mapping between a project and a repository is 1:1, however, it is possible to have multiple projects connected to a single repository—for example, in the case of a monorepo. A project consists of one or more applications. + +The purpose of the project is to bundle application configurations and refer to a Git repository. Specifically, it organizes logical configurations for internal components to orchestrate the application and assembles these configurations to suit different roles, such as developers and SREs, thereby covering the entire life cycle of application development. + +From the perspective of the application development life cycle, the configuration delineated by the project is decoupled from the application code. It takes an immutable image as input, allowing users to perform operations and maintain the application within an independent configuration codebase. \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.10/3-concepts/1-project/2-configuration.md b/docs_versioned_docs/version-v0.10/3-concepts/1-project/2-configuration.md new file mode 100644 index 00000000..49821e37 --- /dev/null +++ b/docs_versioned_docs/version-v0.10/3-concepts/1-project/2-configuration.md @@ -0,0 +1,29 @@ +--- +id: configuration +sidebar_label: Project Configuration +--- + +# Project Configuration + +Users can add config items of the project in `project.yaml`, such as the project name, generator type, Prometheus monitoring, etc. + +Here is an example of `project.yaml`. + +```yaml +# The project basic info +name: helloworld +generator: + type: AppConfiguration +prometheus: + operatorMode: True + monitorType: Service +``` + +The config items in `project.yaml` are explained below. + +- **name**: The name of the project +- **generator**: + - `type`: The type of the module generator, supports `AppConfiguration` and `KCL`, default is `AppConfiguration`. If using the schema AppConfiguration, set type as AppConfiguration +- **prometheus**: + - **operatorMode**: Decides whether Kusion runs Prometheus in `Operator` mode. Kusion will generate a `Custom Resource` if it is true, while generate some annotations if it is false + - **monitorType**: The type of the monitored resource, which can be one of `Service` or `Pod` diff --git a/docs_versioned_docs/version-v0.10/3-concepts/1-project/_category_.json b/docs_versioned_docs/version-v0.10/3-concepts/1-project/_category_.json new file mode 100644 index 00000000..3ca65e52 --- /dev/null +++ b/docs_versioned_docs/version-v0.10/3-concepts/1-project/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Project" +} diff --git a/docs_versioned_docs/version-v0.10/3-concepts/2-stack/1-overview.md b/docs_versioned_docs/version-v0.10/3-concepts/2-stack/1-overview.md new file mode 100644 index 00000000..c2965292 --- /dev/null +++ b/docs_versioned_docs/version-v0.10/3-concepts/2-stack/1-overview.md @@ -0,0 +1,11 @@ +--- +sidebar_label: Overview +id: overview +--- + +# Overview + +A stack in Kusion is any folder that contains a stack.yaml file within the corresponding project directory. A stack provides a mechanism to isolate multiple deployments of the same application, serving as the target workspace to which an application will be deployed. It is also the smallest operational unit that can be configured and deployed independently. Stacks are commonly used to denote different phases of the software development lifecycle, such as development, staging, and production. +## High Level Schema + +![High_Level_Schema](/img/docs/user_docs/concepts/high-level-schema.png) \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.10/3-concepts/2-stack/2-configuration.md b/docs_versioned_docs/version-v0.10/3-concepts/2-stack/2-configuration.md new file mode 100644 index 00000000..40e06607 --- /dev/null +++ b/docs_versioned_docs/version-v0.10/3-concepts/2-stack/2-configuration.md @@ -0,0 +1,19 @@ +--- +id: configuration +sidebar_label: Stack Configuration +--- + +# Stack Configuration + +Users can add config items of the stack in `stack.yaml`, such as the stack name, etc. + +Here is an example of `stack.yaml`. + +```yaml +# The stack basic info +name: dev +``` + +The config items in `stack.yaml` are explained below. + +- **name**: The name of the stack, should be same as the workspace name, such as `dev`, `pre` and `prod`. diff --git a/docs_versioned_docs/version-v0.10/3-concepts/2-stack/_category_.json b/docs_versioned_docs/version-v0.10/3-concepts/2-stack/_category_.json new file mode 100644 index 00000000..6425c52e --- /dev/null +++ b/docs_versioned_docs/version-v0.10/3-concepts/2-stack/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Stack" +} diff --git a/docs_versioned_docs/version-v0.10/3-concepts/3-kusion-module.md b/docs_versioned_docs/version-v0.10/3-concepts/3-kusion-module.md new file mode 100644 index 00000000..c1b4a7ee --- /dev/null +++ b/docs_versioned_docs/version-v0.10/3-concepts/3-kusion-module.md @@ -0,0 +1,8 @@ +# Kusion Module + +A Kusion module is a reusable building block designed by platform engineers to standardize application deployments and enable app developers to self-service. It consists of two parts: + +- App developer-oriented schema: It is a [KCL schema](https://kcl-lang.io/docs/user_docs/guides/schema-definition/). Fields in this schema are recommended to be understandable to application developers and workspace-agnostic. For example, a database Kusion module schema only contains fields like database engine type and database version. +- Kusion module generator: It is a piece of logic that generates the Intent with an instantiated schema mentioned above, along with platform configurations ([workspace](workspace)). As a building block, Kusion module hides the complexity of infrastructures. A database Kusion module not only represents a cloud RDS, but it also contains logic to configure other resources such as security groups and IAM policies. Additionally, it seamlessly injects the database host address, username, and password into the workload's environment variables. The generator logic can be very complex in some situations so we recommend implementing it in a GPL like [go](https://go.dev/). + +![kusion-module](/img/docs/concept/kusion-module.png) \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.10/3-concepts/4-workspace.md b/docs_versioned_docs/version-v0.10/3-concepts/4-workspace.md new file mode 100644 index 00000000..40c5675f --- /dev/null +++ b/docs_versioned_docs/version-v0.10/3-concepts/4-workspace.md @@ -0,0 +1,98 @@ +# Workspace + +## Definition + +A workspace is a logical concept that represents a target environment for deploying a stack. It contains platform configurations, including a set of configurations, Kubeconfig, and provider authentication information, all of which can be reused by multiple stacks. We recommend organizing workspaces by SDLC (Software Development Life Cycle) phases or by cloud vendors. For example, workspaces could be named `dev`, `staging`, and `prod`, or according to cloud vendors such as `AWS`, `Azure`, and `Alibaba Cloud`. + +For clarity, workspace data is categorized into two types: configuration and secret. The configuration data is non-sensitive and is stored locally in YAML files, including module inputs, runtime configurations, and backend configurations. The secret data is sensitive and should be stored as workspace variables. For example, when using AWS, users must set the correct workspace variables for `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`. + +If a set of data items serves the same target and contains one or more sensitive data items, then the entire set should be managed using environment variables. This approach ensures a consistent and seamless user experience. + +Each stack must be associated with a single workspace, and **the stack's name must be the same as the workspace it will be deployed to**. + +![workspace-project-stack](/img/docs/concept/workspace-project-stack.png) + +:::info +In product design, Kusion does not support deploying to multiple clouds or multiple regions within a single workspace. While users can technically define a module that provisions resources across multiple clouds or regions, Kusion does not recommend this practice and will not provide technical support for such configurations. If a platform team needs to manage resources across multiple clouds or regions, they should create separate workspaces. +::: + +## Structure + +The configuration of a workspace is stored in a single YAML file, which consists of three components: `modules`, `runtimes`, and `backends`. + +A `module` configuration comprises default configs and several patchers, where the name of each patcher must not be **default**. Configurations in the `default` block will be applied to all applications in this workspace and configurations in the patcher block will only be applied to projects in the `projectSelector`. Values in patchers will override default configs with the same field name. +For the default configuration or a specific patcher, field keys must be the same as module input field names defined by the module. Module configurations can be found in the [Kusion Modules](../reference/modules) + +The `runtime` configuration currently supports Kubernetes and Terraform, where the former includes the field `kubeConfig` to specify the path of Kube Config, and the latter contains data for Terraform providers, which vary across different providers. For Terraform providers, sensitive data should be stored in environment variables. + +The `backend` configuration currently supports local, oss, s3, database, and http. This defines the backend for state, intent, and other Kusion data that may require storage in the future. This format requires that all Kusion data share the same backend. As with sensitive data in the runtime configuration, these details should also be stored in environment variables. Backend configurations can be found in the [Backend Configuration](backend-configuration) + +An example is shown as below: + +```yaml +# Module input, each with the format standard: +# # : +# # default: # default configurations, applied to all projects +# # : +# # : +# # ... +# # : #patcher configurations, applied to the projects assigned in projectSelector +# # : +# # ... +# # projectSelector: +# # - +# # ... +modules: + database: + default: + provider: aws + size: 20 + instanceClass: db.t3.micro + securityIPs: + - 10.0.0.0/18 + smallClass: + size: 50 + instanceClass: db.t3.small + projectSelector: + - foo + - bar + largeClass: + instanceClass: db.t3.large + projectSelector: + - baz + +# A set of runtime configs, each with the format standard: +# # : +# # : +# # : +# # ... +runtimes: + kubernetes: + kubeConfig: /etc/kubeconfig.yaml + terraform: + aws: + version: 1.0.4 + source: hashicorp/aws + region: us-east-1 + +# A set of backend configs, each with the following format standard: +# # : +# # : +# # : +# # ... +backends: + s3: + bucket: kusion + region: us-east-1 +``` + +## Workflow + +1. Write the `workspace.yaml` with the format shown above and fulfill all necessary fields. +2. Create a workspace with `kusion worksapce create -f ` + A new workspace configuration file named `.yaml` will be created under the path `$KUSION_PATH/.workspace`, and the validation will be done before the creation. +3. Update a workspace with `kusion worksapce update -f ` + The workspace will be updated with the latest values. +4. Delete a workspace with `kusion workspace delete ` if you don't need it anymore. + +More workspace commands can be found in the [reference](../reference/commands/kusion-workspace). \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.10/3-concepts/5-appconfiguration.md b/docs_versioned_docs/version-v0.10/3-concepts/5-appconfiguration.md new file mode 100644 index 00000000..b3116097 --- /dev/null +++ b/docs_versioned_docs/version-v0.10/3-concepts/5-appconfiguration.md @@ -0,0 +1,46 @@ +--- +id: app-configuration +sidebar_label: AppConfiguration +--- + +# AppConfiguration + +As a modern cloud-native application delivery toolchain, declarative intent-based actuation is the central idea of Kusion, and `AppConfiguration` model plays the role of describing the intent, which provides a simpler path for on-boarding developers to the platform without leaking low level details in runtime infrastructure and allows developers to fully focus on the application logic itself. + +The `AppConfiguration` model consolidates all the necessary components and their dependent accessories for the application deployment, along with any workflow, policy and operational requirements into one standardized, infrastructure-independent declarative specification. This declarative specification represents the intuitive user intent for the application, which drives a standardized and efficient application delivery and operation process in a hybrid environment. + +![appconfig.png](/img/docs/concept/appconfig.png) + +AppConfiguration consists of five core concepts, namely `Components`, `Topologies`, `Pipeline`, `PolicySets`, and `Dependency`. We will walk through these concepts one by one. + +#### Component + +`Components` defines the foundation of any application configuration. Generally speaking, we believe that a comprehensive application description should at least consist of a core deployable workload that is frequently iterated and a collection of any other core services that the workload depends on, such as databases, caches or any other cloud services. + +Components are conceptually split into two categories, `Workload` and `Accessories`. The former revolves around the configuration for the computing resource. The latter represents any third-party runtime capabilities and operational requirements that the application needs. Each AppConfiguration consists of exactly one workload and any number of accessories. + +Simply put, we can define `Components` with the following expression: + +`Components = Workload + Accessories` + +The concept of `Components` and `Accessories` itself is implicit when [authoring the configuration files](../configuration-walkthrough/overview). You can define the workload and any type of accessories (such as database or monitoring) directly under the AppConfiguration model. + +From a collaboration perspective, platform developers and SREs are responsible for continuously adding any new schema (as abstractions for the underlying infrastructure) and implementations that can be used out-of-the-box. Application developers SREs should be able to leverage the corresponding schemas to cover the evolving application needs. This helps software organizations achieve separation of concern, so that different roles can focus on the subject matter they are an expert of. + +#### Pipeline + +In most of the cases, the platform is capable of providing a consistent application delivery process that can meet most application needs. In the case that an application warrants any customization in the delivery workflow, the `Pipeline` section in AppConfiguration provides an approach to extend the workflow as needed. + +A typical delivery workflow is made of several stages, each corresponds to some logic that needs to be executed, such as manual approval, data transfer, coordinated multi-cluster release, notification, etc. Implementation-wise, the execution of each stage should be carried out with a plugin, developed and managed by the platform owners. + +#### Topologies + +In reality, what we have observed for production-grade applications is that they usually need to be deployed to a wide range of different targets including different clouds, regions, availability zones or runtimes for availability/cost/regulation/performance or disaster recovery related reasons. The `Topologies` section in AppConfiguration highlights the different deployment targets in the application delivery and provides a single pane of glass that overlooks the entire deployment topology. + +#### PolicySets + +The `PolicySets` section is responsible for defining the set of rules and procedures that should be followed in the application delivery process. They generally represent the guidelines with the purpose of minimizing any technical, security or compliance risks. Some examples include release strategies, risk management policies, and self-healing strategies. The collections of policies are expected to be managed as a joint effort from all the stakeholders, including platform owners, infrastructure owners, and security and compliance stakeholders. Some policy sets (usually security and compliance related) are expected to be mandatory. Some can be switched on and off by the application owner (self-healing strategy for instance) depending on their specific needs. + +#### Dependency + +In a production-scale environment, there are usually intricate dependencies between multiple applications. The `Dependency` section is responsible for describing the dependencies between multiple applications. diff --git a/docs_versioned_docs/version-v0.10/3-concepts/6-intent.md b/docs_versioned_docs/version-v0.10/3-concepts/6-intent.md new file mode 100644 index 00000000..50f988df --- /dev/null +++ b/docs_versioned_docs/version-v0.10/3-concepts/6-intent.md @@ -0,0 +1,22 @@ +--- +id: intent +sidebar_label: Intent +--- + +# Intent + +The Intent represents the operational intentions that you aim to deliver using Kusion. These intentions are expected to contain all components throughout the software development lifecycle (SDLC), including resources (workload, database, load balancer, etc.), dependencies, and policies. The Kusion module generators are responsible for converting all AppConfigurations and environment configurations into the Intent. Once the Intent is generated, the Kusion Engine takes charge of updating the actual infrastructures to match the Intent. + +## Purpose + +### Single Source of Truth + +In Kusion's workflow, the platform engineer builds Kusion modules and provides environment configurations, application developers choose Kusion modules they need and deploy operational intentions to an environment with related environment configurations. They can also input dynamic parameters like the container image when executing the `kusion build` command. So the final operational intentions include configurations written by application developers, environment configurations and dynamic inputs. Due to this reason, we introduce **Intent** to represent the SSoT(Single Source of Truth) of Kusion. It is the result of `kusion build` which contains all operational intentions from different sources. + +### Consistency + +Delivering an application to different environments with identical configurations is a common practice, especially for applications that require scalable distribution. In such cases, an immutable configuration package is helpful. By utilizing the Intent, all configurations and changes are stored in a single file. As the Intent is the input of Kusion, it ensures consistency across different environments whenever you execute Kusion with the same Intent file. + +### Rollback and Disaster Recovery + +The ability to roll back is crucial in reducing incident duration. Rolling back the system to a previously validated version is much faster compared to attempting to fix it during an outage. We regard a validated Intent as a snapshot of the system and recommend storing the Intent in a version control system like Git. This enables better change management practices and makes it simpler to roll back to previous versions if needed. In case of a failure or outage, having a validated Intent simplifies the rollback process, ensuring that the system can be quickly recovered. \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.10/3-concepts/7-backend-configuration.md b/docs_versioned_docs/version-v0.10/3-concepts/7-backend-configuration.md new file mode 100644 index 00000000..7c3aa903 --- /dev/null +++ b/docs_versioned_docs/version-v0.10/3-concepts/7-backend-configuration.md @@ -0,0 +1,185 @@ +--- +id: backend-configuration +sidebar_label: Backend Configuration +--- + +# Backend Configuration + +The backend configuration defines the place where Kusion stores its `state` data file. By default, Kusion uses the `local` type of backend to store the state on the local disk. While for team collaboration projects, the state can be stored on a remote backend, such as `mysql`, `oss` and `s3` to allow multiple users access it. + +## Configuring State Backend + +There are three ways to configure the backend: + +- workspace configuration file +- environment variables +- command line parameters + +### Workspace Configuration File + +Users can configure the storage of the state with the `backends` block in the workspace file, where a map with the backend type as the key and the corresponding config items as the value to declare the backend configuration. Be attention, only one kind of backend type is allowed, more than one backend types are illegal. + +The following gives an example of the backend configuration of `mysql`. + +```yaml +backends: + mysql: + dbName: + user: + password: + host: + port: +``` + +### Environment Variables + +For the sensitive information, Kusion supports configuring them by environment variables. Not all the configuration items are enabled, and the items differ from backend type. For example, users can configure mysql password by environment variable `KUSION_BACKEND_MYSQL_PASSWORD`. + + +### Command Line Parameters + +Users can specify the type of backend with the option `--backend-type`, and configure the detailed information with `--backend-config` or `-C`, for instance: + +```shell +kusion apply --backend-type mysql -C dbName= -C user= -C password= -C host= -C port= +``` + +### Configuration Combination + +When more than one configuration methods are in use, Kusion will merge them to generate the whole backend configuration. Workspace configuration file, environment variables, command line parameter: the priority of these three configuration methods increases gradually. If there is no conflict of backend type, the latter will overlay the former by configuration items. If there is conflict of backend type, which only occurs between workspace configuration file and command line parameters, use the backend type specified by command line, and the configuration items from workspace are deprecated. + +## Available Backend + +- local +- mysql +- oss +- s3 + +### local + +The `local` storage type stores the `state` on the local file system, which is suitable for local operations while not ideal for multi-user collaboration. + +There is no configuration items for `local` backend. When neither the workspace configuration file nor the command line parameters declare the backend configuration, Kusion by default uses the `local`. + +### mysql + +The `mysql` storage type stores the `state` into a **mysql database**. + +```yaml +# workspace configuration file +backends: + mysql: + dbName: + user: + password: + host: + port: +``` + +```bash +# environment variables +export KUSION_BACKEND_MYSQL_PASSWORD= +``` + +```shell +# command line parameters +kusion apply --backend-type mysql -C dbName= -C user= -C password= -C host= -C port= +``` + +* dbName - `required` the name of the database +* user - `required` the username of the database +* password - `required` the password of the database, support declaring by environment variable `KUSION_BACKEND_MYSQL_PASSWORD` +* host - `required` the access address for the database +* port - `required` the port of the database + +Note that the table name in the database used by Kusion is **state**. Below is an example SQL statement for creating this table: + +```sql +CREATE TABLE `state` ( + `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'primary key', + `tenant` varchar(100) DEFAULT NULL COMMENT 'tenant', + `project` varchar(100) NOT NULL COMMENT 'project', + `kusion_version` varchar(50) DEFAULT NULL COMMENT 'kusion version', + `version` int(10) unsigned NOT NULL COMMENT 'current state format version,may upgrade in the future', + `serial` bigint(20) unsigned NOT NULL DEFAULT '0' COMMENT 'modification times for state,can be used in concurrent control', + `operator` varchar(100) DEFAULT NULL COMMENT 'last modifier', + `resources` longtext DEFAULT NULL COMMENT 'state of the resources,json array', + `create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT 'creation time', + `modified_time` timestamp NULL DEFAULT NULL ON UPDATE CURRENT_TIMESTAMP COMMENT 'update time', + `stack` varchar(100) DEFAULT NULL COMMENT 'stack', + `cluster` varchar(100) DEFAULT NULL COMMENT 'logical isolation in a stack,usually clustername__cellname', + PRIMARY KEY (`id`), + UNIQUE KEY `uk_state_latest` (`tenant`, `project`, `stack`, `serial`, `cluster`), + KEY `idx_tenant` (`tenant`), + KEY `idx_project` (`project`), + KEY `idx_kusion_version` (`kusion_version`), + KEY `idx_version` (`version`), + KEY `idx_create_time` (`create_time`), + KEY `idx_modified_time` (`modified_time`), + KEY `idx_stack` (`stack`), + KEY `idx_cluster` (`cluster`) +); +``` + +### oss + +The `oss` storage type stores the `state` on the **Alicloud Object Storage Service (OSS)**. + +```yaml +# workspace configuration file +backends: + oss: + endpoint: + bucket: + accessKeyID: + access-key-secret: +``` + +```bash +# environment variables +export OSS_ACCESS_KEY_ID= +export OSS_ACCESS_KEY_SECRET= +``` + +```shell +# command line parameters +kusion apply --backend-type oss -C endpoint= -C bucket= -C accessKeyID= -C accessKeySecret= +``` + +* endpoint - `required` specify the access endpoint for alicloud oss bucket +* bucket - `required` specify the name of the alicloud oss bucket +* accessKeyID - `required` specify the alicloud account accessKeyID, support declaring by environment variable `OSS_ACCESS_KEY_ID` +* accessKeySecret - `required` specify the alicloud account accessKeySecret, support declaring by environment variable `OSS_ACCESS_KEY_SECRET` + +### s3 + +The `s3` storage type stores the `state` on the **AWS Simple Storage Service (S3)**. + +```yaml +# workspace configuration file +backend: + s3: + endpoint: + bucket: + accessKeyID: + access-key-secret: + region: +``` + +```bash +# environment variables +export AWS_ACCESS_KEY_ID= +export AWS_SECRET_ACCESS_KEY= +export AWS_REGION= +``` + +```shell +# command line parameters +kusion apply --backend-type s3 -C endpoint= -C bucket= -C accessKeyID= -C accessKeySecret= -C region= +``` + +* endpoint - `optional` specify the access endpoint for aws s3 bucket +* bucket - `required` specify the name of the aws s3 bucket +* accessKeyID - `required` specify the aws account accessKeyID, support declaring by environment variable `AWS_ACCESS_KEY_ID` +* accessKeySecret - `required` specify the aws account accessKeySecret, support declaring by environment variable `AWS_SECRET_ACCESS_KEY` +* region - `required` specify the region of aws s3 bucket, support declaring by environment variable `AWS_DEFAULT_REGION` or `AWS_REGION` diff --git a/docs_versioned_docs/version-v0.10/3-concepts/8-how-kusion-works.md b/docs_versioned_docs/version-v0.10/3-concepts/8-how-kusion-works.md new file mode 100644 index 00000000..ebd9005d --- /dev/null +++ b/docs_versioned_docs/version-v0.10/3-concepts/8-how-kusion-works.md @@ -0,0 +1,138 @@ +--- +id: how-kusion-works +sidebar_label: How Kusion Works? +--- + +# How Kusion Works? + +Kusion is the platform engineering engine of [KusionStack](https://github.com/KusionStack). It delivers intentions described with Kusion Modules defined in [Catalog](https://github.com/KusionStack/catalog) to Kubernetes, Clouds and On-Prem infrastructures. + +![arch](https://raw.githubusercontent.com/KusionStack/kusion/main/docs/workflow.png) + +## Overview + +The workflow of KusionStack is illustrated in the diagram above, and it consists of three steps. The first step is `Write`, where platform engineers provide Kusion Modules and application developers write AppConfigurations based on the Kusion Modules to describe their operational intent. + +The second step is the `Build` process, which results in the creation of the SSoT (Single Source of Truth), also known as the [Intent](intent) of the current operational task. If you need version management of the SSoT, we recommend you manage the Intent with a VCS (Version Control System) tool like git. + +The third step is `Apply` which makes the Intent effective. Kusion parses the operational intent based on the Intent produced in the previous step. Before applying the intent, Kusion will execute the Preview command (you can also execute this command manually) which will use a three-way diff algorithm to preview changes and prompt users to make sure all changes meet expectations; the Apply command will then actualize the operational intent onto various infrastructure platforms. Currently, it supports three runtimes: Terraform, Kubernetes, and on-prem infrastructures. + +As a user of Kusion, if you prefer not to be conscious of so many steps, you can simply use `kusion apply`, and Kusion will automatically execute all the aforementioned steps for you. + +## Platform Developer’s Workflow + +### Design Kusion Modules + +[Kusion Module](kusion-module) is a reusable building block designed by platform engineers and contains two components: an application developer-oriented schema and a Kusion module generator. When platform engineers have developed a Kusion module, they can push it to a [catalog](https://github.com/KusionStack/catalog) repository to make it into a KCL package. + +Given a database Kusion module as an example, the schema definition is shown below and the generator logic can be found [here](https://github.com/KusionStack/kusion/blob/main/pkg/modules/generators/accessories/database_generator.go). + +```python +schema MySQL: + """ MySQL describes the attributes to locally deploy or create a cloud provider + managed mysql database instance for the workload. + + Attributes + ---------- + type: "local" | "cloud", defaults to Undefined, required. + Type defines whether the mysql database is deployed locally or provided by + cloud vendor. + version: str, defaults to Undefined, required. + Version defines the mysql version to use. + + Examples + -------- + Instantiate a local mysql database with version of 5.7. + + import models.schema.v1.accessories.mysql + + mysql: mysql.MySQL { + type: "local" + version: "5.7" + } + """ + + # The deployment mode of the mysql database. + type: "local" | "cloud" + + # The mysql database version to use. + version: str +``` + +### Instantiate and Set Up Workspaces + +Each [workspace](workspace) includes a corresponding Platform config file maintained by platform engineers. +Platform engineers should instantiate all workspaces and fulfill all fields with platform default values. Kusion will merge the workspace configuration with AppConfiguration in the Stack of the same name. An example is as follows. + +```yaml +runtimes: + # your kubeconfig file path + kubernetes: + kubeConfig: /etc/kubeconfig.yaml + # metadat of used terraform providers + terraform: + random: + version: 3.5.1 + source: hashicorp/random + aws: + version: 5.0.1 + source: hashicorp/aws + region: us-east-1 + +modules: + # platform configuration of AWS RDS MySQL + mysql: + default: + cloud: aws + size: 20 + instanceType: db.t3.micro + privateRouting: false + suffix: "-mysql" +``` + +The `mysql` block represents a Kusion module. The fields inside are parts of the inputs for the Kusion module generator. For more details about the workspace, please refer to the [workspace](workspace) section. + +## Application Developer’s Workflow + +### Instantiate AppConfiguration and Apply + +Application developers choose Kusion modules they need and instantiate them in the AppConfiguration to describe their operation intentions. We have built some built-in Kusion modules in the repository [Catalog](https://github.com/KusionStack/catalog) and we warmly welcome you to join us in building this ecosystem together. + +`main.k` is the **only** configuration maintained by application developers and schemas in this file are defined from the application developer's perspective to reduce their cognitive load. An example is as follows. + +```pthyon +import catalog.models.schema.v1 as ac +import catalog.models.schema.v1.workload as wl +import catalog.models.schema.v1.workload.container as c +import catalog.models.schema.v1.workload.network as n +import catalog.models.schema.v1.accessories.mysql + +# main.k declares customized configurations for dev stacks. +wordpress: ac.AppConfiguration { + workload: wl.Service { + containers: { + wordpress: c.Container { + image: "wordpress:6.3" + env: { + "WORDPRESS_DB_HOST": "$(KUSION_DB_HOST_WORDPRESS_MYSQL)" + "WORDPRESS_DB_USER": "$(KUSION_DB_USERNAME_WORDPRESS_MYSQL)" + "WORDPRESS_DB_PASSWORD": "$(KUSION_DB_PASSWORD_WORDPRESS_MYSQL)" + "WORDPRESS_DB_NAME": "mysql" + } + ...... + } + } + ...... + } + database: { + wordpress: mysql.MySQL { + type: "cloud" + version: "8.0" + } + } +} +``` + +`workload` and `database` are both Kusion modules provided by platform engineers and Kusion will convert them into actual infrastructure API calls eventually. + +Finally, application developers can deliver their operational intent to infrastructures with one command `kusion apply`. diff --git a/docs_versioned_docs/version-v0.10/3-concepts/_category_.json b/docs_versioned_docs/version-v0.10/3-concepts/_category_.json new file mode 100644 index 00000000..bccddbf1 --- /dev/null +++ b/docs_versioned_docs/version-v0.10/3-concepts/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Concepts" +} diff --git a/docs_versioned_docs/version-v0.10/4-configuration-walkthrough/1-overview.md b/docs_versioned_docs/version-v0.10/4-configuration-walkthrough/1-overview.md new file mode 100644 index 00000000..ef87e995 --- /dev/null +++ b/docs_versioned_docs/version-v0.10/4-configuration-walkthrough/1-overview.md @@ -0,0 +1,210 @@ +--- +id: overview +--- + +# Configuration File Overview + +Kusion consumes one or more declarative configuration files (written in KCL) that describe the application, and delivers intent to the target runtime including Kubernetes, clouds, or on-prem infrastructure. + +This documentation series walks you through the odds and ends of managing such configuration files. + +## Table of Content + +- [Configuration File Overview](#configuration-file-overview) + - [Table of Content](#table-of-content) + - [Directory Structure](#directory-structure) + - [AppConfiguration Model](#appconfiguration-model) + - [Authoring Configuration Files](#authoring-configuration-files) + - [Identifying KCL file](#identifying-kcl-file) + - [KCL Packages and Import](#kcl-packages-and-import) + - [Understanding kcl.mod](#understanding-kclmod) + - [Building Blocks](#building-blocks) + - [Instantiating an application](#instantiating-an-application) + - [Using `kusion init`](#using-kusion-init) + - [Using references](#using-references) + +## Directory Structure + +Kusion expects the configuration file to be placed in a certain directory structure because it might need some metadata (that is not stored in the application configuration itself) in order to proceed. + +:::info + +See [Project](../concepts/project/overview) and [Stack](../concepts/stack/overview) for more details about Project and Stack. +::: + +A sample multi-stack directory structure looks like the following: +``` +~/playground$ tree multi-stack-project/ +multi-stack-project/ +├── README.md +├── base +│   └── base.k +├── dev +│   ├── kcl.mod +│   ├── main.k +│   └── stack.yaml +├── prod +│   ├── kcl.mod +│   ├── main.k +│   └── stack.yaml +└── project.yaml +``` + +In general, the directory structure follows a hierarchy where the top-level is the project configurations, and the sub-directories represent stack-level configurations. + +You may notice there is a `base` directory besides all the stacks. The `base` directory is not mandatory, but rather a place to store common configurations between different stacks. A common pattern we observed is to use stacks to represent different stages (dev, stage, prod, etc.) in the software development lifecycle, and/or different deployment targets (azure-eastus, aws-us-east-1, etc). A project can have as many stacks as needed. + +In practice, the applications deployed into dev and prod might very likely end up with a similar set of configurations except a few fields such as the application image (dev might be on newer versions), resource requirements (prod might require more resources), etc. + +As a general best practice, we recommend managing the common configurations in `base.k` as much as possible to minimize duplicate code. We will cover how override works in [Base and Override](base-override). + +## AppConfiguration Model + +`AppConfiguration` is the out-of-the-box model we build that describes an application. It serves as the declarative intent for a given application. + +The schema for `AppConfiguration` is defined in the [KusionStack/catalog](https://github.com/KusionStack/catalog) repository. It is designed as a unified, application-centric model that encapsulates the comprehensive configuration details and in the meantime, hides the complexity of the infrastructure as much as possible. + +`AppConfiguration` consists of multiple sub-components that each represent either the application workload itself, its dependencies, relevant workflows or operational expectations. We will deep dive into the details on how to author each of these elements in this upcoming documentation series. + +For more details on the `AppConfiguration`, please refer to the [design documentation](../concepts/app-configuration). + +## Authoring Configuration Files + +[KCL](https://kcl-lang.io/) is the choice of configuration language consumed by Kusion. KCL is an open-source constraint-based record and functional language. KCL works well with a large number of complex configurations via modern programming language technology and practice, and is committed to provide better modularity, scalability, stability and extensibility. + +### Identifying KCL file + +KCL files are identified with `.k` suffix in the filename. + +### KCL Packages and Import + +Similar to most modern General Programming Languages (GPLs), KCL packages are used to organize collections of related KCL source files into modular and re-usable units. + +In the context of Kusion, we use KCL packages to define models that could best abstract the behavior of an application. Specifically, we provide an official out-of-the-box KCL package(will keep iterating) with the name [catalog](https://github.com/KusionStack/catalog). When authoring an application configuration file, you can simply import the [catalog](https://github.com/KusionStack/catalog) package in the source code and use all the schemas (including AppConfiguration) defined in the `catalog` package. + +Similarly, if the schemas in the [catalog](https://github.com/KusionStack/catalog) package does not meet your needs, you can always fork it and make modifications, then import the modified package; or create a brand new package altogether and import it. + +The Kusion ecosystem can be easily expanded in this manner. + +An example of the import looks like the following: +``` +### import from the official catalog package +import catalog.models.schema.v1 as ac +import catalog.models.schema.v1.workload as wl +import catalog.models.schema.v1.workload.container as c + +### import my own modified package +import my_own_catalog.models.schema.v1 as moc +import my_other_package.schema.v1.redis as myredis +``` + +Take `import catalog.models.schema.v1.workload as wl` as an example, the `.models.schema.v1.workload` part after `import catalog` represents the relative path of a specific schema to import. In this case, the `workload` schemas is defined under `models/schema/v1/workload` directory in the `catalog` package. + +### Understanding kcl.mod + +Much similar to the concept of `go.mod`, Kusion uses `kcl.mod` as the source of truth to manage metadata (such as package name, dependencies, etc.) for the current package. Kusion will also auto-generate a `kcl.mod.lock` as the dependency lock file. + +The most common usage for `kcl.mod` is to manage the dependency of your application configurations. + +:::info + +Please note this `kcl.mod` will be automatically generated if you are using `kusion init` to initialize a project with a template. You will only need to modify this file if you are modifying the project metadata outside the initialization process, such as upgrading the dependency version or adding a new dependency altogether, etc. +:::info + +There are 3 sections in a `kcl.mod` file: +- `package`, representing the metadata for the current package. +- `dependencies`, describing the packages the current package depends on. Supports referencing either a git repository or an OCI artifact. +- `profile`, defining the behavior for Kusion. In the example below, it describes the list of files Kusion should look for when parsing the application configuration. + +An example of `kcl.mod`: +``` +[package] +name = "multi-stack-project" +edition = "0.5.0" +version = "0.1.0" + +[dependencies] +catalog = { git = "https://github.com/KusionStack/catalog.git", tag = "0.1.0" } +# Uncomment the line below to use your own modified package +# my-package = ghcr.io/kcl-lang/my-package + +[profile] +entries = ["../base/base.k", "main.k"] +``` + +### Building Blocks + +Configuration files consist of building blocks that are made of instances of schemas. An `AppConfiguration` instance consists of several child schemas, most of which are optional. The only mandatory one is the `workload` instance. We will take a closer look in the [workload walkthrough](workload). The order of the building blocks does NOT matter. + +The major building blocks as of version `0.9.0`: +``` +myapp: ac.AppConfiguration { + workload: wl.Service { + containers: { + "myapp": c.Container {} + ... + } + ports: [] + secrets: {} + } + database: d.Database{} + monitoring: m.Prometheus{} + opsRule: t.OpsRule {} + ... +} +``` + +We will deep dive into each one of the building blocks in this documentation series. + +### Instantiating an application + +In Kusion's out-of-the-box experience, an application is identified with an instance of `AppConfiguration`. You may have more than one application in the same project or stack. + +Here's an example of a configuration that can be consumed by Kusion (assuming it is placed inside the proper directory structure that includes project and stack configurations, with a `kcl.mod` present): + +``` +import catalog.models.schema.v1 as ac +import catalog.models.schema.v1.workload as wl +import catalog.models.schema.v1.workload.network as n +import catalog.models.schema.v1.workload.container as c + +gocity: ac.AppConfiguration { + workload: wl.Service { + containers: { + "gocity": c.Container { + image = "howieyuen/gocity:latest" + resources: { + "cpu": "500m" + "memory": "512Mi" + } + } + } + replicas: 1 + ports: [ + n.Port { + port: 4000 + } + ] + } +} +``` + +Don't worry about what `workload` or `ports` stand for at the moment. We will deep dive into each one of them in this upcoming documentation series. + +### Using `kusion init` + +Kusion offers a `kusion init` sub-command which initializes a new project using some pre-built templates, which saves you from the hassle of manually building the aforementioned directory structure that Kusion expects. + +There is a built-in template `single-stack-sample` in the Kusion binary that can be used offline. + +We also maintain a [kusion-templates repository](https://github.com/KusionStack/kusion-templates) that hosts a list of more comprehensive project scaffolds. You can access them via `kusion init --online` command which requires connectivity to `github.com`. + +The pre-built templates are meant to help you get off the ground quickly with some simple out-of-the-box examples. You can refer to the [QuickStart documentation](../getting-started/deliver-wordpress) for some step-by-step tutorials. + +### Using references + +The reference documentation for the `catalog` package is located in [Reference](../reference/modules/catalog-models/app-configuration). + +If you are using the `catalog` package out of the box, the reference documentation provides a comprehensive view for each schema involved, including all the attribute names and description, their types, default value if any, and whether a particular attribute is required or not. There will also be an example attached to each schema reference. + +We will also deep dive into some common examples in the upcoming sections. \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.10/4-configuration-walkthrough/2-kcl-basics.md b/docs_versioned_docs/version-v0.10/4-configuration-walkthrough/2-kcl-basics.md new file mode 100644 index 00000000..a695bf9c --- /dev/null +++ b/docs_versioned_docs/version-v0.10/4-configuration-walkthrough/2-kcl-basics.md @@ -0,0 +1,144 @@ +--- +id: kcl-basics +--- + +# KCL Basics + +## Table of Content +- [Variable assignments](#variable-assignments) +- [Common built-in types](#common-built-in-types) +- [Lists and maps](#lists-and-maps) +- [Conditional statements](#conditional-statements) +- [The : and = operator](#the--and--operator) +- [Advanced KCL capabilities](#advanced-kcl-capabilities) + +[KCL](https://kcl-lang.io/) is the choice of configuration language consumed by Kusion. KCL is an open source constraint-based record and functional language. KCL works well with a large number of complex configurations via modern programming language technology and practice, and is committed to provide better modularity, scalability, stability and extensibility. + +## Variable assignments + +There are two ways to initialize a variable in KCL. You can either use the `:` operator or the `=` operator. We will discuss the difference between them in [this section later](#the--and--operator). + +Here are the two ways to create a variable and initialize it: +``` +foo = "Foo" # Declare a variable named `foo` and its value is a string literal "Foo" +bar: "Bar" # Declare a variable named `bar` and its value is a string literal "Bar" +``` + +You will be able to override a variable assignment via the `=` operator. We will discuss this in depth in the [`:` and `=` operator section](#the--and--operator). + +## Common built-in types + +KCL supports `int`, `float`, `bool` and `string` as the built-in types. + +Other types are defined in the packages that are imported into the application configuration files. One such example would be the `AppConfiguration` object (or `Container`, `Probe`, `Port` object, etc) that are defined in the `catalog` repository. + +## Lists and maps + +Lists are represented using the `[]` notation. +An example of lists: +``` +list0 = [1, 2, 3] +list1 = [4, 5, 6] +joined_list = list0 + list1 # [1, 2, 3, 4, 5, 6] +``` + +Maps are represented using the `{}` notation. +An example of maps: +``` +a = {"one" = 1, "two" = 2, "three" = 3} +b = {'one' = 1, 'two' = 2, 'three' = 3} +assert a == b # True +assert len(a) == 3 # True +``` + +## Conditional statements +You can also use basic control flow statements when writing the configuration file. + +An example that sets the value of `replicas` conditionally based on the value of `containers.myapp.resources.cpu`: +``` +import catalog.models.schema.v1 as ac +import catalog.models.schema.v1.workload as wl +import catalog.models.schema.v1.workload.container as c + +myapp: ac.AppConfiguration { + workload: wl.Service { + containers: { + "myapp": c.Container { + image: "" + resources: { + "cpu": "500m" + "memory": "512Mi" + } + } + } + replicas: 1 if containers.myapp.resources.cpu == "500m" else 2 + } +} +``` + +For more details on KCL's control flow statements, please refer to the [KCL documentation](https://kcl-lang.io/docs/reference/lang/tour#control-flow-statements). + +## The `:` and `=` operator + +You might have noticed there is a mixed usage of the `:` and `=` in the samples above. + +:::info + +**TLDR: The recommendation is to use `:` in the common configurations, and `=` for override in the environment-specific configurations.** +::: + +In KCL: +- `:` represents a union-ed value assignment. In the pattern `identifier: E` or `identifier: T E`, the value of the expression `E` with optional type annotation `T` will be merged and union-ed into the element value. +- `=` represents a value override. In the pattern `identifier = E` or `identifier = T E`, The value of the expression `E` with optional type annotation `T` will override the `identifier` attribute value. + +Let's take a look at an example: +``` +# This is one configuration that will be merged. +config: Config { + data.d1 = 1 +} +# This is another configuration that will be merged. +config: Config { + data.d2 = 2 +} +``` + +The above is equivalent to the snippet below since the two expressions for `config` get merged/union-ed into one: +``` +config: Config { + data.d1 = 1 + data.d2 = 1 +} +``` + +whereas using the `=` operators will result in a different outcome: +``` +# This is first configuration. +config = Config { + data.d1 = 1 +} +# This is second configuration that will override the prior one. +config = Config { + data.d2 = 2 +} +``` + +The config above results in: +``` +config: Config { + data.d2 = 2 +} +``` + +Please note that the `:` attribute operator represents an idempotent merge operation, and an error will be thrown when the values that need to be merged conflict with each other. + +``` +data0 = {id: 1} | {id: 2} # Error:conflicting values between {'id': 2} and {'id': 1} +data1 = {id: 1} | {id = 2} # Ok, the value of `data` is {"id": 2} +``` + +More about `:` and `=` operator can be found in the [KCL documentation](https://kcl-lang.io/docs/reference/lang/tour#config-operations). + +## Advanced KCL capabilities + +For more advanced KCL capabilities, please visit the [KCL website](https://kcl-lang.io/docs/user_docs/support/faq-kcl). \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.10/4-configuration-walkthrough/3-base-override.md b/docs_versioned_docs/version-v0.10/4-configuration-walkthrough/3-base-override.md new file mode 100644 index 00000000..01cd34e1 --- /dev/null +++ b/docs_versioned_docs/version-v0.10/4-configuration-walkthrough/3-base-override.md @@ -0,0 +1,87 @@ +--- +id: base-override +--- + +# Base and Override + +In practice, what we have observed for production-grade applications is that they usually need to be deployed to a wide range of different targets, be it different environments in the SDLC, or different clouds, regions or runtimes for cost/regulation/performance or disaster recovery related reasons. + +In that context, we advocate for a pattern where you can leverage some Kusion and KCL features to minimize the amount of duplicate configurations, by separating the common base application configuration and environment-specific ones. + +:::info + +The file names in the below examples don't matter as long as they are called out and appear in the correct order in the `entries` field (the field is a list) in `kcl.mod`. The files with common configurations should appear first in the list and stack-specific ones last. The latter one takes precedence. + +The configurations also don't have be placed into a single `.k` file. For complex projects, they can be broken down into smaller organized `.k` files for better readability. +::: + +Base configuration defined in `base/base.k`: +``` +import catalog.models.schema.v1 as ac +import catalog.models.schema.v1.workload as wl +import catalog.models.schema.v1.workload.network as n +import catalog.models.schema.v1.workload.container as c + +myapp: ac.AppConfiguration { + workload: wl.Service { + containers: { + "myapp": c.Container { + image: "" + resources: { + "cpu": "500m" + "memory": "512Mi" + } + } + } + replicas: 1 + ports: [ + n.Port { + port: 80 + } + ] + } +} +``` + +Environment-specific configuration defined in `dev/main.k`: +``` +import catalog.models.schema.v1 as ac + +# main.k declares customized configurations for dev stack. +myapp: ac.AppConfiguration { + workload: wl.Service { + containers: { + "myapp": c.Container { + # dev stack has different app configuration from the base + image = "gcr.io/google-samples/gb-frontend:v5" + resources = { + "cpu": "250m" + "memory": "256Mi" + } + } + } + replicas = 2 + } +} +``` + +Alternatively, you could locate a specific property (in this case below, the `Container` object) in the `AppConfiguration` object using the dot selector shorthand(such as `workload.containers.myapp` or `workload.replicas` below): +``` +import catalog.models.schema.v1 as ac + +# main.k declares customized configurations for dev stack. +myapp: ac.AppConfiguration { + workload.replicas = 2 + workload.containers.myapp: { + # dev stack has different app configuration + image = "gcr.io/google-samples/gb-frontend:v5" + resources = { + "cpu": "250m" + "memory": "256Mi" + } + } +} +``` +This is especially useful when the application configuration is complex but the override is relatively straightforward. + +The two examples above are equivalent when overriding the base. \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.10/4-configuration-walkthrough/4-workload.md b/docs_versioned_docs/version-v0.10/4-configuration-walkthrough/4-workload.md new file mode 100644 index 00000000..97dbcd4b --- /dev/null +++ b/docs_versioned_docs/version-v0.10/4-configuration-walkthrough/4-workload.md @@ -0,0 +1,318 @@ +# Workload + +The `workload` attribute in the `AppConfiguration` instance is used to describe the specification for the application workload. The application workload generally represents the computing component for the application. + +A `workload` maps to an `AppConfiguration` instance 1:1. If there are more than one workload, they should be considered different applications. + +## Table of Content +- [Import](#import) +- [Types of workloads](#types-of-workloads) +- [Configure containers](#configure-containers) + - [Application image](#application-image) + - [Resource Requirements](#resource-requirements) + - [Health Probes](#health-probes) + - [Lifecycle Hooks](#lifecycle-hooks) + - [Create Files](#create-files) + - [Customize container initialization](#customize-container-initialization) +- [Configure Replicas](#configure-replicas) +- [Differences between Service and Job](#differences-between-service-and-job) +- [Workload References](#workload-references) + +## Import + +In the examples below, we are using schemas defined in the `catalog` package. For more details on KCL package import, please refer to the [Configuration File Overview](overview). + +The `import` statements needed for the following walkthrough: +``` +import catalog.models.schema.v1 as ac +import catalog.models.schema.v1.workload as wl +import catalog.models.schema.v1.workload.container as c +import catalog.models.schema.v1.workload.container.probe as p +import catalog.models.schema.v1.workload.container.lifecycle as lc +``` + +## Types of Workloads + +There are currently two types of workloads: + +- `Service`, representing a long-running, scalable workload type that should "never" go down and respond to short-lived latency-sensitive requests. This workload type is commonly used for web applications and services that expose APIs. +- `Job`, representing batch tasks that take from a few seconds to days to complete and then stop. These are commonly used for batch processing that is less sensitive to short-term performance fluctuations. + +To instantiate a `Service`: +``` +myapp: ac.AppConfiguration { + workload: wl.Service {} +} +``` + +To instantiate a `Job`: +``` +myapp: ac.AppConfiguration { + workload: wl.Job {} +} +``` + +Of course, the `AppConfiguration` instances above is not sufficient to describe an application. We still need to provide more details in the `workload` section. + +## Configure containers + +Kusion is built on top of cloud-native philosophies. One of which is that applications should run as loosely coupled microservices on abstract and self-contained software units, such as containers. + +The `containers` attribute in a workload instance is used to define the behavior for the containers that run application workload. The `containers` attribute is a map, from the name of the container to the `catalog.models.schema.v1.workload.container.Container` Object which includes the container configurations. + +:::info + +The name of the container is in the context of the configuration file, so you could refer to it later. It's not referring to the name of the container in the Kubernetes cluster (or any other runtime). +::: + +Everything defined in the `containers` attribute is considered an application container, as opposed to a sidecar container. Sidecar containers will be introduced in a different attribute in a future version. + +In most of the cases, only one application container is needed. Ideally, we recommend mapping an `AppConfiguration` instance to a microservice in the microservice terminology. + +We will walk through the details of configuring a container using an example of the `Service` type. + +To add an application container: +``` +myapp: ac.AppConfiguration { + workload: wl.Service { + containers: { + "myapp": c.Container {} + } + } +} +``` + +### Application image + +The `image` attribute in the `Container` schema specifies the application image to run. This is the only required field in the `Container` schema. + +To specify an application image: +``` +myapp: ac.AppConfiguration { + workload: wl.Service { + containers: { + "myapp": c.Container { + image: "gcr.io/google-samples/gb-frontend:v5" + } + # ... + } + } +} +``` + +### Resource Requirements + +The `resources` attribute in the `Container` schema specifies the application resource requirements such as cpu and memory. + +You can specify an upper limit (which maps to resource limits only) or a range as the resource requirements (which maps to resource requests and limits in Kubernetes). + +To specify an upper bound (only resource limits): +``` +myapp: ac.AppConfiguration { + workload: wl.Service { + containers: { + "myapp": c.Container { + image: "gcr.io/google-samples/gb-frontend:v5" + resources: { + "cpu": "500m" + "memory": "512Mi" + } + # ... + } + } + } +} +``` + +To specify a range (both resource requests and limits): +``` +myapp: ac.AppConfiguration { + workload: wl.Service { + containers: { + "myapp": c.Container { + image: "gcr.io/google-samples/gb-frontend:v5" + # Sets requests to cpu=250m and memory=256Mi + # Sets limits to cpu=500m and memory=512Mi + resources: { + "cpu": "250m-500m" + "memory": "256Mi-512Mi" + } + # ... + } + } + } +} +``` + +### Health Probes + +There are three types of `Probe` defined in a `Container`: + +- `livenessProbe` - used to determine if the container is healthy and running +- `readinessProbe` - used to determine if the container is ready to accept traffic +- `startupProbe` - used to determine if the container has started properly. Liveness and readiness probes don't start until `startupProbe` succeeds. Commonly used for containers that takes a while to start + +The probes are optional. You can only have one Probe of each kind for a given `Container`. + +To configure a `Http` type `readinessProbe` that probes the health via HTTP request and a `Exec` type `livenessProbe` which executes a command: +``` +myapp: ac.AppConfiguration { + workload: wl.Service { + containers: { + "myapp": c.Container { + image: "gcr.io/google-samples/gb-frontend:v5" + # ... + # Configure an Http type readiness probe at /healthz + readinessProbe: p.Probe { + probeHandler: p.Http { + url: "/healthz" + } + initialDelaySeconds: 10 + timeoutSeconds: 5 + periodSeconds: 15 + successThreshold: 3 + failureThreshold: 1 + } + # Configure an Exec type liveness probe that executes probe.sh + livenessProbe: p.Probe { + probeHandler: p.Exec { + command: ["probe.sh"] + } + initialDelaySeconds: 10 + } + } + } + } +} +``` + +### Lifecycle Hooks + +You can also configure lifecycle hooks that triggers in response to container lifecycle events such as liveness/startup probe failure, preemption, resource contention, etc. + +There are two types that is currently supported: + +- `PreStop` - triggers before the container is terminated. +- `PostStart` - triggers after the container is initialized. + +``` +myapp: ac.AppConfiguration { + workload: wl.Service { + containers: { + "myapp": c.Container { + image: "gcr.io/google-samples/gb-frontend:v5" + # ... + # Configure lifecycle hooks + lifecycle: lc.Lifecycle { + # Configures an Exec type pre-stop hook that executes preStop.sh + preStop: p.Exec { + command: ["preStop.sh"] + } + # Configures an Http type pre-stop hook at /post-start + postStart: p.Http { + url: "/post-start" + } + } + } + } + } +} +``` + +### Create Files + +You can also create files on-demand during the container initialization. + +To create a custom file and mount it to `/home/admin/my-file` when the container starts: +``` +myapp: ac.AppConfiguration { + workload: wl.Service { + containers: { + "myapp": c.Container { + image: "gcr.io/google-samples/gb-frontend:v5" + } + # ... + # Creates a file during container startup + files: { + "/home/admin/my-file": c.FileSpec { + content: "some file contents" + mode: "0777" + } + } + } + } +} +``` + +### Customize container initialization + +You can also customize the container entrypoint via `command`, `args`, and `workingDir`. These should **most likely not be required**. In most of the cases, the entrypoint details should be baked into the application image itself. + +To customize the container entrypoint: +``` +myapp: ac.AppConfiguration { + workload: wl.Service { + containers: { + "myapp": c.Container { + image: "gcr.io/google-samples/gb-frontend:v5" + # ... + # This command will overwrite the entrypoint set in the image Dockerfile + command: ["/usr/local/bin/my-init-script.sh"] + # Extra arguments append to command defined above + args: [ + "--log-dir=/home/my-app/logs" + "--timeout=60s" + ] + # Run the command as defined above, in the directory "/tmp" + workingDir: "/tmp" + } + } + } +} +``` + +## Configure Replicas + +The `replicas` field in the `workload` instance describes the number of identical copies to run at the same time. It is generally recommended to have multiple replicas in production environments to eliminate any single point of failure. In Kubernetes, this corresponds to the `spec.replicas` field in the relevant workload manifests. + +To configure a workload to have a replica count of 3: +``` +myapp: ac.AppConfiguration { + workload: wl.Service { + containers: { + # ... + } + replicas: 3 + # ... + } + # ... +} +``` + +## Differences between Service and Job + +The two types of workloads, namely `Service` and `Job`, share a majority of the attributes with some minor differences. + +### Exposure + +A `Service` usually represents a long-running, scalable workload that responds to short-lived latency-sensitive requests and never go down. Hence, a `Service` has an additional attribute that determines how it is exposed and can be accessed. A `Job` does NOT have the option to be exposed. We will explore more in the [application networking walkthrough](networking). + +### Job Schedule + +A `Job` can be configured to run in a recurring manner. In this case, the job will have a cron-format schedule that represents its recurring schedule. + +To configure a job to run at 21:00 every night: +``` +myapp: wl.Job { + containers: { + # ... + } + schedule: "0 21 * * *" + } +``` + +## Workload References + +You can find workload references [here](../reference/modules/catalog-models/workload/service). + +You can find workload schema source [here](https://github.com/KusionStack/catalog/tree/main/models/schema/v1/workload). \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.10/4-configuration-walkthrough/5-networking.md b/docs_versioned_docs/version-v0.10/4-configuration-walkthrough/5-networking.md new file mode 100644 index 00000000..528fef0d --- /dev/null +++ b/docs_versioned_docs/version-v0.10/4-configuration-walkthrough/5-networking.md @@ -0,0 +1,118 @@ +--- +id: networking +--- + +# Application Networking + +In addition to configuring application's [container specifications](workload#configure-containers), you can also configure its networking behaviors, including how to expose the application and how it can be accessed. + +In future versions, this will also include ingress-based routing strategy and DNS configurations. + +## Import + +In the examples below, we are using schemas defined in the `catalog` package. For more details on KCL package import, please refer to the [Configuration File Overview](overview). + +The `import` statements needed for the following walkthrough: +``` +import catalog.models.schema.v1 as ac +import catalog.models.schema.v1.workload as wl +import catalog.models.schema.v1.workload.network as n +``` + +## Private vs Public Access + +Private network access means the service can only be access from within the target cluster. + +Public access is implemented using public load balancers on the cloud. This generally requires a Kubernetes cluster that is running on the cloud with a vendor-specific service controller. + +Any ports defined default to private access unless explicitly specified. + +To expose port 80 to be accessed privately: +``` +myapp: ac.AppConfiguration { + workload: wl.Service { + # ... + ports: [ + n.Port { + port: 80 + } + ] + } +} +``` + +To expose port 80 to be accessed publicly: +``` +myapp: ac.AppConfiguration { + workload: wl.Service { + # ... + ports: [ + n.Port { + port: 80 + public: True + } + ] + } +} +``` + +:::info +The CSP (Cloud Service Provider) used to provide load balancers is defined by platform engineers in workspace. +::: + +## Mapping ports + +To expose a port `80` that maps to a different port `8088` on the container: +``` +myapp: ac.AppConfiguration { + workload: wl.Service { + # ... + ports: [ + n.Port { + port: 80 + targetPort: 8088 + } + ] + } +} +``` + +## Exposing multiple ports + +You can also expose multiple ports and configure them separately. + +To expose port 80 to be accessed publicly, and port 9099 for private access (to be scraped by Prometheus, for example): +``` +myapp: ac.AppConfiguration { + workload: wl.Service { + # ... + ports: [ + n.Port { + port: 80 + public: True + } + n.Port { + port: 9099 + } + ] + } +} +``` + +## Choosing protocol + +To expose a port using the `UDP` protocol: +``` +myapp: ac.AppConfiguration { + workload: wl.Service { + # ... + ports: [ + n.Port { + port: 80 + targetPort: 8088 + protocol: "UDP" + } + ] + } +} +``` \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.10/4-configuration-walkthrough/6-database.md b/docs_versioned_docs/version-v0.10/4-configuration-walkthrough/6-database.md new file mode 100644 index 00000000..851c8fc2 --- /dev/null +++ b/docs_versioned_docs/version-v0.10/4-configuration-walkthrough/6-database.md @@ -0,0 +1,454 @@ +--- +id: databse +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Managed Databases + +The `database` attribute in the `AppConfiguration` instance is used to describe the specification for any databases needed for the application. + +You can currently have several databases with **different database names** for an application at the same time. + +## Import + +In the examples below, we are using schemas defined in the `catalog` package. For more details on KCL package import, please refer to the [Configuration File Overview](./1-overview.md#configuration-file-overview). + +The `import` statements needed for the following walkthrough: +``` +import catalog.models.schema.v1 as ac +import catalog.models.schema.v1.workload as wl +import catalog.models.schema.v1.accessories.mysql +import catalog.models.schema.v1.accessories.postgres +``` + +## Types of Database offerings + +As of version 0.10.0, Kusion supports the following database offerings on the cloud: +- MySQL and PostgreSQL Relational Database Service (RDS) on [AWS](https://aws.amazon.com/rds/) +- MySQL and PostgreSQL Relational Database Service (RDS) on [AliCloud](https://www.alibabacloud.com/product/databases) + +More database types on more cloud vendors will be added in the future. + +Alternatively, Kusion also supports creating a database at `localhost` for local testing needs. A local database is quicker to stand up and easier to manage. It also eliminates the need for an account and any relevant costs with the cloud providers in the case that a local testing environment is sufficient. + +:::info +You do need a local Kubernetes cluster to run the local database workloads. You can refer to [Minikube](https://minikube.sigs.k8s.io/docs/start/) or [Kind](https://kind.sigs.k8s.io/docs/user/quick-start/) to get started. +To see an end-to-end use case for standing up a local testing environment including a local database, please refer to the [Kusion Quickstart](../2-getting-started/2-deliver-wordpress.md). +::: + +## Cloud Credentials and Permissions + +Kusion provisions databases on the cloud via [terraform](https://www.terraform.io/) providers. For it to create _any_ cloud resources, it requires a set of credentials that belongs to an account that has the appropriate write access so the terraform provider can be initialized properly. + +For AWS, the environment variables needed: +``` +export AWS_ACCESS_KEY_ID="xxxxxxxxxxx" # replace it with your AccessKey +export AWS_SECRET_ACCESS_KEY="xxxxxxx" # replace it with your SecretKey +``` + +For AliCloud, the environment variables needed: +``` +export ALICLOUD_ACCESS_KEY="xxxxxxxxx" # replace it with your AccessKey +export ALICLOUD_SECRET_KEY="xxxxxxxxx" # replace it with your SecretKey +``` + +The user account that owns these credentials would need to have the proper permission policies attached to create databases and security groups. If you are using the cloud-managed policies, the policies needed to provision a database and configure firewall rules are listed below. + +For AWS: +- `AmazonVPCFullAccess` for creating and managing database firewall rules via security group +- `AmazonRDSFullAccess` for creating and managing RDS instances + +For AliCloud: +- `AliyunVPCFullAccess` for creating and managing database firewall rules via security group +- `AliyunRDSFullAccess` for creating and managing RDS instances + +Alternatively, you can use customer managed policies if the cloud provider built-in policies don't meet your needs. The list of permissions needed are in the [AmazonRDSFullAccess Policy Document](https://docs.aws.amazon.com/aws-managed-policy/latest/reference/AmazonRDSFullAccess.html#AmazonRDSFullAccess-json) and [AmazonVPCFullAccess Policy Document](https://docs.aws.amazon.com/aws-managed-policy/latest/reference/AmazonVPCFullAccess.html). It will most likely be a subset of the permissions in the policy documents. + +## Configure Database + +### Provision a Cloud Database + +Assuming the steps in the [Cloud Credentials and Permissions](#cloud-credentials-and-permissions) section is setup properly, you can now provision cloud databases via Kusion. + +#### AWS RDS Instance +To provision an AWS RDS instance with MySQL v8.0 or PostgreSQL v14.0, you can append the following YAML file to your own workspace configurations and update the corresponding workspace with command `kusion workspace update`. + + + + +```yaml +runtimes: + terraform: + random: + version: 3.5.1 + source: hashicorp/random + aws: + version: 5.0.1 + source: hashicorp/aws + region: us-east-1 # Please replace with your own aws provider region + +# MySQL configurations for AWS RDS +modules: + mysql: + default: + cloud: aws + size: 20 + instanceType: db.t3.micro + securityIPs: + - 0.0.0.0/0 + suffix: "-mysql" +``` + +```mdx-code-block + + +``` +```yaml +runtimes: + terraform: + random: + version: 3.5.1 + source: hashicorp/random + aws: + version: 5.0.1 + source: hashicorp/aws + region: us-east-1 # Please replace with your own aws provider region + +# PostgreSQL configurations for AWS RDS +modules: + postgres: + default: + cloud: aws + size: 20 + instanceType: db.t3.micro + securityIPs: + - 0.0.0.0/0 + suffix: "-postgres" +``` + +```mdx-code-block + + +``` + +For KCL configuration file declarations: + + + + +```python +wordpress: ac.AppConfiguration { + # ... + database: { + wordpress: mysql.MySQL { + type: "cloud" + version: "8.0" + } + } +} +``` + +```mdx-code-block + + +``` + +```python +pgadmin: ac.AppConfiguration { + # ... + database: { + pgadmin: postgres.PostgreSQL { + type: "cloud" + version: "14.0" + } + } +} +``` + +```mdx-code-block + + +``` + +It's highly recommended to replace `0.0.0.0/0` and closely manage the whitelist of IPs that can access the database for security purposes. The `0.0.0.0/0` in the example above or if `securityIPs` is omitted altogether will allow connections from anywhere which would typically be a security bad practice. + +The `instanceType` field determines the computation and memory capacity of the RDS instance. The `db.t3.micro` instance type in the example above represents the `db.t3` instance class with a size of `micro`. In the same `db.t3` instance family there are also `db.t3.small`, `db.t3.medium`, `db.t3.2xlarge`, etc. + +The full list of supported `instanceType` values can be found [here](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html#Concepts.DBInstanceClass.Support). + +You can also adjust the storage capacity for the database instance by changing the `size` field which is storage size measured in gigabytes. The minimum is 20. More details can be found [here](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Storage.html#Concepts.Storage.GeneralSSD). + +#### AliCloud RDS Instance + +To provision an Alicloud RDS instance with MySQL or PostgreSQL, you can append the following YAML file to your own workspace configurations and update the corresponding workspace with command `kusion workspace update`. Note that AliCloud RDS has several additional fields such as `category`, `subnetID` and `privateRouting`: + + + + +```yaml +runtimes: + terraform: + random: + version: 3.5.1 + source: hashicorp/random + alicloud: + version: 1.209.1 + source: aliyun/alicloud + region: cn-beijing # Please replace with your own alicloud provider region + +# MySQL configurations for Alicloud RDS +modules: + mysql: + default: + cloud: alicloud + size: 20 + instanceType: mysql.n2.serverless.1c + category: serverless_basic + privateRouting: false + subnetID: [your-subnet-id] + securityIPs: + - 0.0.0.0/0 + suffix: "-mysql" +``` + +```mdx-code-block + + +``` +```yaml +runtimes: + terraform: + random: + version: 3.5.1 + source: hashicorp/random + alicloud: + version: 1.209.1 + source: aliyun/alicloud + region: cn-beijing # Please replace with your own alicloud provider region + +# PostgreSQL configurations for Alicloud RDS +modules: + postgres: + default: + cloud: alicloud + size: 20 + instanceType: pg.n2.serverless.1c + category: serverless_basic + privateRouting: false + subnetID: [your-subnet-id] + securityIPs: + - 0.0.0.0/0 + suffix: "-postgres" +``` + +```mdx-code-block + + +``` + +For KCL configuration file declarations: + + + + +```python +wordpress: ac.AppConfiguration { + # ... + database: { + wordpress: mysql.MySQL { + type: "cloud" + version: "8.0" + } + } +} +``` + +```mdx-code-block + + +``` + +```python +pgadmin: ac.AppConfiguration { + # ... + database: { + pgadmin: postgres.PostgreSQL { + type: "cloud" + version: "14.0" + } + } +} +``` + +```mdx-code-block + + +``` + +We will walkthrough `subnetID` and `privateRouting` in the [Configure Network Access](#configure-network-access) section. + +The full list of supported `instanceType` values can be found in: +- [MySQL instance types(x86)](https://www.alibabacloud.com/help/en/rds/apsaradb-rds-for-mysql/primary-apsaradb-rds-for-mysql-instance-types#concept-2096487) +- [PostgreSQL instance types](https://www.alibabacloud.com/help/en/rds/apsaradb-rds-for-postgresql/primary-apsaradb-rds-for-postgresql-instance-types#concept-2096578) + +### Local Database + +To deploy a local database with MySQL v8.0 or PostgreSQL v14.0: + + + + +```python +wordpress: ac.AppConfiguration { + # ... + database: { + wordpress: mysql.MySQL { + type: "local" + version: "8.0" + } + } +} +``` + +```mdx-code-block + + +``` + +```python +pgadmin: ac.AppConfiguration { + # ... + database: { + pgadmin: postgres.PostgreSQL { + type: "local" + version: "14.0" + } + } +} +``` + +```mdx-code-block + + +``` + +## Database Credentials + +There is no need to manage the database credentials manually. Kusion will automatically generate a random password, set it as the credential when creating the database, and then inject the hostname, username and password into the application runtime. + +You have the option to BYO (Bring Your Own) username for the database credential by specifying the `username` attribute in the `workspace.yaml`: +```yaml +modules: + mysql: + default: + # ... + username: "my_username" +``` + +You **cannot** bring your own password. The password will always be managed by Kusion automatically. + +The database credentials are injected into the environment variables of the application container. You can access them via the following env vars: +``` +# env | grep KUSION_DB +KUSION_DB_HOST_WORDPRESS_MYSQL=wordpress.xxxxxxxx.us-east-1.rds.amazonaws.com +KUSION_DB_USERNAME_WORDPRESS_MYSQL=xxxxxxxxx +KUSION_DB_PASSWORD_WORDPRESS_MYSQL=xxxxxxxxx +``` + +:::info +More details about the environment of database credentials injected by Kusion can be found at [mysql credentials and connectivity](../6-reference/2-modules/1-catalog-models/database/mysql.md#credentials-and-connectivity) and [postgres credentials and connectivity](../6-reference/2-modules/1-catalog-models/database/postgres.md#credentials-and-connectivity) +::: + +You can use these environment variables out of the box. Or most likely, your application might retrieve the connection details from a different set of environment variables. In that case, you can map the kusion environment variables to the ones expected by your application using the `$()` expression. + +This example below will assign the value of `KUSION_DB_HOST_WORDPRESS_MYSQL` into `WORDPRESS_DB_HOST`, `KUSION_DB_USERNAME_WORDPRESS_MYSQL` into `WORDPRESS_DB_USER`, likewise for `KUSION_DB_PASSWORD_WORDPRESS_MYSQL` and `WORDPRESS_DB_PASSWORD`: +``` +wordpress: ac.AppConfiguration { + workload: wl.Service { + containers: { + wordpress: c.Container { + image = "wordpress:6.3-apache" + env: { + "WORDPRESS_DB_HOST": "$(KUSION_DB_HOST_WORDPRESS_MYSQL)" + "WORDPRESS_DB_USER": "$(KUSION_DB_USERNAME_WORDPRESS_MYSQL)" + "WORDPRESS_DB_PASSWORD": "$(KUSION_DB_PASSWORD_WORDPRESS_MYSQL)" + } + # ... + } + } + # ... + } + database: { + # ... + } +} +``` + +## Configure Network Access + +You can also optionally configure the network access to the database as part of the `AppConfiguration`. This is highly recommended because it dramatically increases the security posture of your cloud environment in the means of least privilege principle. + +The `securityIPs` field in the `Database` schema declares the list of network addresses that are allowed to access the database. The network addresses are in the [CIDR notation](https://aws.amazon.com/what-is/cidr/) and can be either a private IP range ([RFC-1918](https://datatracker.ietf.org/doc/html/rfc1918) and [RFC-6598](https://datatracker.ietf.org/doc/html/rfc6598) address) or a public one. + +If the database need to be accessed from a public location (which should most likely not be the case in a production environment), `securityIPs` need to include the public IP address of the traffic source (For instance, if the RDS database needs to be accessed from your computer). + +To configure AWS RDS to restrict network access from a VPC with a CIDR of `10.0.1.0/24` and a public IP of `103.192.227.125`: + +```yaml +modules: + mysql: + default: + cloud: aws + # ... + securityIPs: + - "10.0.1.0/24" + - "103.192.227.125/32" +``` + +Depending on the cloud provider, the default behavior of the database firewall settings may differ if omitted. + +### Subnet ID + +On AWS, you have the option to launch the RDS instance inside a specific VPC if a `subnetID` is present in the application configuration. By default, if `subnetID` is not provided, the RDS will be created in the default VPC for that account. However, the recommendation is to self-manage your VPCs to provider better isolation from a network security perspective. + +On AliCloud, the `subnetID` is required. The concept of subnet maps to VSwitch in AliCloud. + +To place the RDS instance into a specific VPC on Alicloud: + +```yaml +modules: + mysql: + default: + cloud: alicloud + # ... + subnetID: "subnet-xxxxxxxxxxxxxxxx" +``` + +### Private Routing + +There is an option to enforce private routing on certain cloud providers if both the workload and the database are running on the cloud. + +On AliCloud, you can set the `privateRouting` flag to `True`. The database host generated will be a private FQDN that is only resolvable and accessible from within the AliCloud VPCs. Setting `privateRouting` flag to `True` when `type` is `aws` is a no-op. + +To enforce private routing on AliCloud: + +```yaml +modules: + mysql: + default: + cloud: alicloud + # ... + privateRouting: true +``` + +Kusion will then generate a private FQDN and inject it into the application runtime as the environment variable `KUSION_DB_HOST_` for the application to use. A complete list of Kusion-managed environment variables for mysql database can be found [here](../6-reference/2-modules/1-catalog-models/database/mysql.md#credentials-and-connectivity). + +Otherwise when using the public FQDN to connect to a database from the workload, the route will depend on cloud provider's routing preference. The options are generally either: +- Travel as far as possible on the cloud provider's global backbone network, or also referred to as cold potato routing, or +- Egress as early as possible to the public Internet and re-enter the cloud provider's datacenter later, or also referred to as hot potato routing + +The prior generally has better performance but is also more expensive. + +You can find a good read on the [AWS Blog](https://aws.amazon.com/blogs/architecture/internet-routing-and-traffic-engineering/) or the [Microsoft Learn](https://learn.microsoft.com/en-us/azure/virtual-network/ip-services/routing-preference-overview). \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.10/4-configuration-walkthrough/7-secret.md b/docs_versioned_docs/version-v0.10/4-configuration-walkthrough/7-secret.md new file mode 100644 index 00000000..6c3fde02 --- /dev/null +++ b/docs_versioned_docs/version-v0.10/4-configuration-walkthrough/7-secret.md @@ -0,0 +1,245 @@ +--- +id: secret +--- + +# Secrets + +Secrets are used to store sensitive data like passwords, API keys, TLS certificates, tokens, or other credentials. Kusion provides multiple secret types, and makes it easy to be consumed in containers. + +For application dependent cloud resources that are managed by Kusion, their credentials are automatically managed by Kusion (generated and injected into application runtime environment variable). You shouldn't have to manually create those. + +## Using secrets in workload + +Secrets must be defined in AppConfiguration. The values can be generated by Kusion or reference existing secrets stored in third-party vault. Secrets can be consumed in containers by referencing them through the `secret:///` URI syntax. + +### Consume secret in an environment variable + +You can consume the data in Secrets as environment variable in your container. For example the db container uses an environment variable to set the root password. + +``` +import models.schema.v1 as ac +import models.schema.v1.workload as wl +import models.schema.v1.workload.container as c +import models.schema.v1.workload.secret as sec + +sampledb: ac.AppConfiguration { + workload: wl.Service { + containers: { + "db": c.Container { + image: "mysql" + env: { + # Consume db-root-password secret in environment + "ROOT_PASSWORD": "secret://db-root-password/token" + } + } + } + # Secrets used to generate token + secrets: { + "init-info": sec.Secret { + type: "token" + } + } + } +} +``` + +The example shows the secret `root-password` being consumed as an environment variable in the db container. The secret is of type token and will automatically be generated at runtime by Kusion. + +### Consume all secret keys as environment variables + +Sometimes your secret contains multiple data that need to be consumed as environment variables. The example below shows how to consume all the values in a secret as environment variables named after the keys. + +``` +import models.schema.v1 as ac +import models.schema.v1.workload as wl +import models.schema.v1.workload.container as c +import models.schema.v1.workload.secret as sec + +sampledb: ac.AppConfiguration { + workload: wl.Service { + containers: { + "db": c.Container { + image: "mysql" + env: { + # Consume all init-info secret keys as environment variables + "secret://init-info": "" + } + } + } + # Secrets used to init mysql instance + secrets: { + "init-info": sec.Secret { + type: "opaque" + data: { + "ROOT_PASSWORD": "admin" + } + } + } + } +} +``` + +This will set the environment variable "ROOT_PASSWORD" to the value "admin" in the db container. + +## Types of secrets + +Kusion provides multiple types of secrets to application developers. + +1. Basic: Used to generate and/or store usernames and passwords. +2. Token: Used to generate and/or store secret strings for password. +3. Opaque: A generic secret that can store arbitrary user-defined data. +4. Certificate: Used to store a certificate and its associated key that are typically used for TLS. +5. External: Used to retrieve secret form third-party vault. + +### Basic secrets + +Basic secrets are defined in the secrets block with the type "basic". + +``` +import models.schema.v1 as ac +import models.schema.v1.workload as wl +import models.schema.v1.workload.secret as sec + +sampleapp: ac.AppConfiguration { + workload: wl.Service { + # ... + secrets: { + "auth-info": sec.Secret { + type: "basic" + data: { + "username": "admin" + "password": "******" + } + } + } + } +} +``` + +The basic secret type is typically used for basic authentication. The key names must be username and password. If one or both of the fields are defined with a non-empty string, those values will be used. If the empty string, the default value, is used Acorn will generate random values for one or both. + +### Token secrets + +Token secrets are useful for generating a password or secure string used for passwords when the user is already known or not required. + +``` +import models.schema.v1 as ac +import models.schema.v1.workload as wl +import models.schema.v1.workload.secret as sec + +sampleapp: ac.AppConfiguration { + workload: wl.Service { + # ... + secrets: { + "api-token": sec.Secret { + type: "token" + data: { + "token": "" + } + } + } + } +} +``` + +The token secret type must be defined. The `token` field in the data object is optional and if left empty Kusion will generate the token, which is 54 characters in length by default. If the `token` is defined that value will always be used. + +### Opaque secrets + +Opaque secrets have no defined structure and can have arbitrary key value pairs. + +``` +import models.schema.v1 as ac +import models.schema.v1.workload as wl +import models.schema.v1.workload.secret as sec + +sampleapp: ac.AppConfiguration { + workload: wl.Service { + # ... + secrets: { + "my-secret": sec.Secret { + type: "opaque" + } + } + } +} +``` + +### Certificate secrets + +Certificate secrets are useful for storing a certificate and its associated key. One common use for TLS Secrets is to configure encryption in transit for an Ingress, but you can also use it with other resources or directly in your workload. + +``` +import models.schema.v1 as ac +import models.schema.v1.workload as wl +import models.schema.v1.workload.secret as sec + +sampleapp: ac.AppConfiguration { + workload: wl.Service { + # ... + secrets: { + "server-cert": sec.Secret { + type: "certificate" + data: { + # Please do not put private keys in configuration files + "tls.crt": "The cert file content" + "tls.key": "The key file content" + } + } + } + } +} +``` + +### External secrets + +As a general principle, storing secrets in a plain text configuration file is highly discouraged, keeping secrets outside of Git is especially important for future-proofing, even encrypted secrets are not recommended to check into Git. The most common approach is to store secrets in a third-party vault (such as Hashicorp Vault, AWS Secrets Manager and Azure Key Vault, etc) and retrieve the secret in the runtime only. External secrets are used to retrieve sensitive data from external secret store to make it easy to be consumed in containers. + +``` +import models.schema.v1 as ac +import models.schema.v1.workload as wl +import models.schema.v1.workload.secret as sec + +sampleapp: ac.AppConfiguration { + workload: wl.Service { + # ... + secrets: { + "api-access-token": sec.Secret { + type: "external" + data: { + # Please do not put private keys in configuration files + "accessToken": "ref://api-auth-info/accessToken?version=1" + } + } + } + } +} +``` + +The value field in data object follow `ref://PATH[?version=]` URI syntax. `PATH` is the provider-specific path for the secret to be retried. Kusion provides out-of-the-box integration with `Hashicorp Vault`, `AWS Secrets Manager`, `Azure Key Vault` and `Alicloud Secrets Manager`. + +## Immutable secrets + +You can also declare a secret as immutable to prevent it from being changed accidentally. + +To declare a secret as immutable: + +``` +import models.schema.v1 as ac +import models.schema.v1.workload as wl +import models.schema.v1.workload.secret as sec + +sampleapp: ac.AppConfiguration { + workload: wl.Service { + # ... + secrets: { + "my-secret": sec.Secret { + # ... + immutable: True + } + } + } +} +``` + +You can change a secret from mutable to immutable but not the other way around. That is because the Kubelet will stop watching secrets that are immutable. As the name suggests, you can only delete and re-create immutable secrets but you cannot change them. \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.10/4-configuration-walkthrough/8-monitoring.md b/docs_versioned_docs/version-v0.10/4-configuration-walkthrough/8-monitoring.md new file mode 100644 index 00000000..60a8ec29 --- /dev/null +++ b/docs_versioned_docs/version-v0.10/4-configuration-walkthrough/8-monitoring.md @@ -0,0 +1,83 @@ +# Application Monitoring + +The `monitoring` attribute in the `AppConfiguration` instance is used to describe the specification for the collection of monitoring requirements for the application. + +As of version 0.10.0, Kusion supports integration with Prometheus by managing scraping behaviors in the configuration file. + +:::info + +The `monitoring` attribute requires the target cluster to have installed Prometheus correctly, either as a Kubernetes operator or a server/agent. + +More about how to set up Prometheus can be found in the [Prometheus User Guide for Kusion](../user-guides/observability/prometheus) +::: + +## Import + +In the examples below, we are using schemas defined in the `catalog` package. For more details on KCL package import, please refer to the [Configuration File Overview](overview). + +The `import` statements needed for the following walkthrough: +``` +import catalog.models.schema.v1 as ac +import catalog.models.schema.v1.workload as wl +import catalog.models.schema.v1.monitoring as m +``` + +## Workspace configurations + +In addition to the KCL configuration file, there are also workspace-level configurations that should be set first. In an ideal scenario, this step is done by the platform engineers. + +In the event that they do not exist for you or your organization, e.g. if you are an individual developer, you can either do it yourself or use the [default values](#default-values) provided by the KusionStack team. The steps to do this yourself can be found in the [Prometheus User Guide for Kusion](../user-guides/observability/prometheus#setting-up-workspace-configs). + +:::info + +For more details on how workspaces work, please refer to the [workspace concept](../3-concepts/4-workspace.md) +::: + +By separating configurations that the developers are interested in and those that platform owners are interested in, we can reduce the cognitive complexity of the application configuration and achieve separation of concern. + +## Managing Scraping Configuration +To manage scrape configuration for the application: +``` +myapp: ac.AppConfiguration { + workload: wl.Service { + # ... + } + monitoring: m.Prometheus{ + path: "/metrics" + port: "web" + } +} +``` + +The example above will instruct the Prometheus job to scrape metrics from the `/metrics` endpoint of the application on the port named `web`. + +To instruct Prometheus to scrape from `/actuator/metrics` on port `9099` instead: +``` +myapp: ac.AppConfiguration { + workload: wl.Service { + # ... + } + monitoring: m.Prometheus{ + path: "/actuator/metrics" + port: "9099" + } +} +``` + +Note that numbered ports only work when your Prometheus is not running as an operator. + +Neither `path` and `port` are required fields if Prometheus runs as an operator. If omitted, `path` defaults to `/metrics`, and `port` defaults to the container port or service port, depending on which resource is being monitored. If Prometheus does not run as an operator, both fields are required. + +Scraping scheme, interval and timeout are considered platform-managed configurations and are therefore managed as part of the [workspace configurations](../user-guides/observability/prometheus#setting-up-workspace-configs). + +More details about how the Prometheus integration works can be found in the [design documentation](https://github.com/KusionStack/kusion/blob/main/docs/prometheus.md). + +## Default values + +If no workspace configurations are found, the default values provided by the KusionStack team are: +- Scraping interval defaults to 30 seconds +- Scraping timeout defaults to 15 seconds +- Scraping scheme defaults to http +- Defaults to NOT running as an operator + +If any of the default values does not meet your need, you can change them by [setting up the workspace configuration](../user-guides/observability/prometheus#setting-up-workspace-configs). \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.10/4-configuration-walkthrough/9-operational-rules.md b/docs_versioned_docs/version-v0.10/4-configuration-walkthrough/9-operational-rules.md new file mode 100644 index 00000000..70aa3c9f --- /dev/null +++ b/docs_versioned_docs/version-v0.10/4-configuration-walkthrough/9-operational-rules.md @@ -0,0 +1,50 @@ +--- +id: operational-rules +--- + +# Operational Rules + +The `opsRule` attribute in the `AppConfiguration` instance is used to describe the specification for the collection of operational rule requirements for the application. Operational rules are used as a preemptive measure to police and stop any unwanted changes. + +## Import + +In the examples below, we are using schemas defined in the `catalog` package. For more details on KCL package import, please refer to the [Configuration File Overview](overview). + +The `import` statements needed for the following walkthrough: +``` +import catalog.models.schema.v1 as ac +import catalog.models.schema.v1.workload as wl +import catalog.models.schema.v1.trait as t +``` + +## Max Unavailable Replicas + +Currently, `OpsRule` supports setting a `maxUnavailable` parameter, which specifies the maximum number of pods that can be rendered unavailable at any time. It can be either a fraction of the total pods for the current application or a fixed number. This operational rule is particularly helpful against unexpected changes or deletes to the workloads. It can also prevent too many workloads from going down during an application upgrade. + +More rules will be available in future versions of Kusion. + +To set `maxUnavailable` to a percentage of pods: +``` +myapp: ac.AppConfiguration { + workload: wl.Service { + containers: { + # ... + } + } + opsRule: t.OpsRule { + maxUnavailable: "30%" + } +} +``` + +To set `maxUnavailable` to a fixed number of pods: +``` +myapp: ac.AppConfiguration { + workload: wl.Service { + # ... + } + opsRule: t.OpsRule { + maxUnavailable: 2 + } +} +``` \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.10/4-configuration-walkthrough/_category_.json b/docs_versioned_docs/version-v0.10/4-configuration-walkthrough/_category_.json new file mode 100644 index 00000000..64d45678 --- /dev/null +++ b/docs_versioned_docs/version-v0.10/4-configuration-walkthrough/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Configuration Walkthrough" +} diff --git a/docs_versioned_docs/version-v0.10/5-user-guides/1-cloud-resources/1-database.md b/docs_versioned_docs/version-v0.10/5-user-guides/1-cloud-resources/1-database.md new file mode 100644 index 00000000..89de70b9 --- /dev/null +++ b/docs_versioned_docs/version-v0.10/5-user-guides/1-cloud-resources/1-database.md @@ -0,0 +1,326 @@ +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Deliver the WordPress Application with Cloud RDS + +This tutorial will demonstrate how to deploy a WordPress application with Kusion, which relies on both Kubernetes and IaaS resources provided by cloud vendors. We can learn how to declare the Relational Database Service (RDS) to provide a cloud-based database solution for our application from this article. + +## Prerequisites + +- Install [Kusion](../../2-getting-started/1-install-kusion.md) +- Deploy [Kubernetes](https://kubernetes.io/) or [Kind](https://kind.sigs.k8s.io/) or [Minikube](https://minikube.sigs.k8s.io/docs/tutorials/multi_node/) +- Install [Terraform](https://www.terraform.io/) +- Prepare a cloud service account and create a user with `VPCFullAccess` and `RDSFullAccess` permissions to use the Relational Database Service (RDS). This kind of user can be created and managed in the Identity and Access Management (IAM) console +- The environment that executes `kusion` need to have connectivity to terraform registry to download the terraform providers + +Additionally, we also need to configure the obtained AccessKey and SecretKey as environment variables for specific cloud provider: + + + + +```bash +export AWS_ACCESS_KEY_ID="AKIAQZDxxxx" # replace it with your AccessKey +export AWS_SECRET_ACCESS_KEY="oE/xxxx" # replace it with your SecretKey +``` + +![aws iam account](/img/docs/user_docs/getting-started/aws-iam-account.png) + +```mdx-code-block + + +``` + +```bash +export ALICLOUD_ACCESS_KEY="LTAI5txxx" # replace it with your AccessKey +export ALICLOUD_SECRET_KEY="nxuowIxxx" # replace it with your SecretKey +``` + +![alicloud iam account](/img/docs/user_docs/getting-started/set-rds-access.png) + +```mdx-code-block + + +``` + +## Init Workspace + +To deploy the WordPress application with cloud rds, we first need to initiate a `Workspace` for the targeted stack (here we are using `dev`). Please copy the following example YAML file to your local `workspace.yaml`. + + + + +`workspace.yaml` +```yaml +runtimes: + kubernetes: + kubeConfig: /etc/kubeconfig.yaml # Please replace with your own kubeconfig file path + terraform: + random: + version: 3.5.1 + source: hashicorp/random + aws: + version: 5.0.1 + source: hashicorp/aws + region: us-east-1 + +# MySQL configurations for AWS RDS +modules: + mysql: + default: + cloud: aws + size: 20 + instanceType: db.t3.micro + privateRouting: false + suffix: "-mysql" +``` + +```mdx-code-block + + +``` + +`workspace.yaml` +```yaml +runtimes: + kubernetes: + kubeConfig: /etc/kubeconfig.yaml # Replace with your own kubeconfig file path + terraform: + random: + version: 3.5.1 + source: hashicorp/random + alicloud: + version: 1.209.1 + source: aliyun/alicloud + region: cn-beijing + +# MySQL configurations for Alicloud RDS +modules: + mysql: + default: + cloud: alicloud + size: 20 + instanceType: mysql.n2.serverless.1c + category: serverless_basic + privateRouting: false + subnetID: [your-subnet-id] + suffix: "-mysql" +``` + +```mdx-code-block + + +``` + +You can replace the `runtimes.kubernetes.kubeConfig` field with your own kubeconfig file path in `workspace.yaml`, and if you would like to try creating the `Alicloud` RDS instance, you should also replace the `[your-subnet-id]` of `modules.mysql.default.subnetID` field with the Alicloud `vSwitchID` to provision the database in. After that, you can execute the following command line to initiate the workspace configuration for `dev` stack. + +```shell +kusion workspace create dev -f workspace.yaml +``` + +If you already create the workspace configuration for `dev` stack, you can append the Terraform runtime configs and MySQL module configs to your workspace YAML file and use the following command line to update the workspace configuration. + +```shell +kusion workspace update dev -f workspace.yaml +``` + +You can use the following command lines to list and show the workspace configurations for `dev` stack. + +```shell +kusion workspace list + +kusion workspace show dev +``` + +The `workspace.yaml` is a sample configuration file for workspace management, including `Kubernetes` and `Terraform` runtime configs and `MySQL` module config. Workspace configurations are usually declared by **Platform Engineers** and will take effect through the corresponding stack. + +:::info +More details about the configuration of Workspace can be found in [Workspace Management](https://github.com/KusionStack/kusion/blob/main/docs/design/workspace_management/workspace_management.md). +::: + +## Init Project + +We can start by initializing this tutorial project with online templates: + +```shell +kusion init --online +``` + +All init templates are listed as follows: + +```shell +➜ kusion_playground kusion init --online +? Please choose a template: wordpress-cloud-rds A sample wordpress project with cloud rds +This command will walk you through creating a new kusion project. + +Enter a value or leave blank to accept the (default), and press . +Press ^C at any time to quit. + +Project Config: +? ProjectName: wordpress-cloud-rds +? AppName: wordpress +Stack Config: dev +? Image: wordpress:6.3 +Created project 'wordpress-cloud-rds' +``` + +Select `wordpress-cloud-rds` and press `Enter`. After that, we will see hints below and use the default values to config this project and stack. + +![](/img/docs/user_docs/getting-started/init-wordpress-cloud-rds.gif) + +The directory structure looks like the following: + +```shell +cd wordpress-cloud-rds/dev && tree +``` + +```shell +➜ kusion_playground cd wordpress-cloud-rds/dev && tree +. +├── kcl.mod +├── main.k +└── stack.yaml + +1 directory, 3 files +``` + +:::info +More details about the directory structure can be found in [Project](../../3-concepts/1-project/1-overview.md) and [Stack](../../3-concepts/2-stack/1-overview.md). +::: + +### Review Configuration Files + +Now let's take a look at the configuration files located in `dev/main.k`. + +```python +import catalog.models.schema.v1 as ac +import catalog.models.schema.v1.workload as wl +import catalog.models.schema.v1.workload.container as c +import catalog.models.schema.v1.workload.network as n +import catalog.models.schema.v1.accessories.mysql + +# main.k declares customized configurations for dev stacks. +wordpress: ac.AppConfiguration { + workload: wl.Service { + containers: { + wordpress: c.Container { + image: "wordpress:6.3" + env: { + "WORDPRESS_DB_HOST": "$(KUSION_DB_HOST_WORDPRESS_MYSQL)" + "WORDPRESS_DB_USER": "$(KUSION_DB_USERNAME_WORDPRESS_MYSQL)" + "WORDPRESS_DB_PASSWORD": "$(KUSION_DB_PASSWORD_WORDPRESS_MYSQL)" + "WORDPRESS_DB_NAME": "mysql" + } + resources: { + "cpu": "500m" + "memory": "512Mi" + } + } + } + replicas: 1 + ports: [ + n.Port { + port: 80 + } + ] + } + database: { + wordpress: mysql.MySQL { + type: "cloud" + version: "8.0" + } + } +} +``` + +The configuration file `main.k`, usually written by the **App Developers**, declares customized configurations for `dev` stack, which includes an `AppConfiguration` with the name of `wordpress`. And the `wordpress` application includes a workload of type `workload.Service`, which runs on 1 replica and exposes `80` port to be accessed. Besides, it declares a cloud `mysql.MySQL` as the database accessory with the engine version of `8.0` for the application. +The necessary Terraform resources for deploying and using the cloud rds (relational database service) will be generated, and users can get the `host`, `username` and `password` of the database through the [mysql credentials and connectivity](../../6-reference/2-modules/1-catalog-models/database/mysql.md#credentials-and-connectivity) of Kusion in application containers. + +:::info +More details about Catalog models can be found in [Catalog](https://github.com/KusionStack/catalog) +::: + +:::info +The collaboration paradigm between App Developers and Platform Engineers with Kusion can be found in [Collaboration Paradigm](https://github.com/KusionStack/kusion/blob/main/docs/design/collaboration/collaboration_paradigm.md) +::: + +## Application Delivery + +You can complete the delivery of the WordPress application in the folder of `wordpress-cloud-rds/dev` using the following command line: + +```shell +kusion apply --watch +``` + + + + +![apply the wordpress application with aws rds](/img/docs/user_docs/getting-started/apply-wordpress-cloud-rds-aws.png) + +```mdx-code-block + + +``` + +![apply the wordpress application with alicloud rds](/img/docs/user_docs/getting-started/apply-wordpress-cloud-rds-alicloud.png) + +```mdx-code-block + + +``` + +After all the resources reconciled, we can port-forward our local port (e.g. 12345) to the WordPress frontend service port (80) in the cluster: + +```shell +kubectl port-forward -n wordpress-cloud-rds svc/wordpress-cloud-rds-dev-wordpress-private 12345:80 +``` + +![kubectl port-forward for wordpress](/img/docs/user_docs/getting-started/wordpress-cloud-rds-port-forward.png) + +## Verify WordPress Application + +Next, we will verify the WordPress site service we just delivered, along with the creation of the RDS instance it depends on. We can start using the WordPress site by accessing the link of local-forwarded port [(http://localhost:12345)](http://localhost:12345) we just configured in the browser. + +![wordpress site page](/img/docs/user_docs/getting-started/wordpress-site-page.png) + +In addition, we can also log in to the cloud service console page to view the RDS instance we just created. + + + + +![aws rds instance](/img/docs/user_docs/getting-started/cloud-rds-instance-aws.png) + +```mdx-code-block + + +``` + +![alicloud rds instance](/img/docs/user_docs/getting-started/cloud-rds-instance-alicloud.png) + +```mdx-code-block + + +``` + +## Delete WordPress Application + +You can delete the WordPress application and related RDS resources using the following command line. + +```shell +kusion destroy --yes +``` + + + + +![kusion destroy wordpress with aws rds](/img/docs/user_docs/getting-started/destroy-wordpress-cloud-rds-aws.png) + +```mdx-code-block + + +``` + +![kusion destroy wordpress with alicloud rds](/img/docs/user_docs/getting-started/destroy-wordpress-cloud-rds-alicloud.png) + +```mdx-code-block + + \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.10/5-user-guides/1-cloud-resources/2-expose-service.md b/docs_versioned_docs/version-v0.10/5-user-guides/1-cloud-resources/2-expose-service.md new file mode 100644 index 00000000..33471449 --- /dev/null +++ b/docs_versioned_docs/version-v0.10/5-user-guides/1-cloud-resources/2-expose-service.md @@ -0,0 +1,164 @@ +# Expose Application Service Deployed on CSP Kubernetes + +Deploying application on the Kubernetes provided by CSP (Cloud Service Provider) is convenient and reliable, which is adopted by many enterprises. Kusion has a good integration with CSP Kubernetes service. You can deploy your application to the Kubernetes cluster, and expose the service in a quite easy way. + +This tutorial demonstrates how to expose service of the application deployed on CSP Kubernetes. And the responsibilities of platform engineers and application developers are also clearly defined. In this article, *[exposing the service of nginx](https://github.com/KusionStack/konfig/blob/main/example/nginx/dev/main.k) (referred to "the example" in the below)* is given as an example. + +## Prerequisites + +Create a Kubernetes cluster, the following CSP Kubernetes services are supported. + +- [Alibaba Cloud Container Service for Kubernetes (ACK)](https://www.alibabacloud.com/product/kubernetes) +- [Amazon Elastic Kubernetes Service (EKS)](https://aws.amazon.com/eks). + +Get the example from the official example repository. + +```bash +git clone https://github.com/KusionStack/konfig.git && cd konfig/example/nginx +``` + +## Expose Service Publicly + +If you want the application can be accessed from outside the cluster, you should expose the service publicly. Follow the steps below, you will simply hit the goal. + +### Set up Workspace + +Create the workspace as the target where the application will be deployed to. The workspace is usually set up by platform engineers, which contains platform-standard and application-agnostic configurations. The workspace configurations are organized through a YAML file. + +```yaml +modules: + port: + default: + type: alicloud + annotations: + service.beta.kubernetes.io/alibaba-cloud-loadbalancer-spec: slb.s1.small + +runtimes: + kubernetes: + kubeconfig: "" +``` + +The YAML shown above gives an example of the workspace configuration to expose service on ACK. The file contains two top-level blocks `modules` and `runtimes`, and the block `port` under `modules`, `kubernetes` under `runtimes`. + +The block `port` contains the workspace configuration of module port, which has the following fields: + +- type: the CSP providing Kubernetes service, support `alicloud` and `aws` +- annotations: annotations attached to the service, should be a map +- labels: labels attached to the service, should be a map + +The block `kubernetes` contains the kubernetes related configuration, which has the following fields: + +- kubeconfig: the kube-config file path, which is got after creating the cluster. + +You can also configure kube-config by environment variables, which has higher priority. + +```bash +export KUBE_CONFIG="" +``` + +Then, create the workspace with the configuration file. Be attention, the workspace name must be the same as the stack name. The following command creates a workspace named `dev` with configuration file `workspace.yaml`. + +```bash +kusion workspace create prod workspace.yaml +``` + +### Write Configuration Code + +After creating workspace, you should write application configuration code, which only contains simple and application-centric configurations. This step is usually accomplished by application developers. + +``` +import catalog.models.schema.v1 as ac +import catalog.models.schema.v1.workload as wl +import catalog.models.schema.v1.workload.container as c +import catalog.models.schema.v1.workload.network as n + +nginx: ac.AppConfiguration { + workload: wl.Service { + containers: { + nginx: c.Container { + image = "nginx:1.25.2" + resources: { + "cpu": "500m" + "memory": "512Mi" + } + } + } + replicas: 1 + ports: [ + n.Port { + type: "aliyun" + port: 80 + protocol: "TCP" + public: True + } + ] + } +} +``` + +The code shown above describes how to expose service publicly on ACK. Kusion use schema `Port` to describe the network configuration, the primary fields of Port are as follows: + +- port: port number to expose service +- protocol: protocol to expose service, support `TCP` and `UDP` +- public: whether to public the service + +To public the service, you should set `public` as True. Besides, schema `Service` should be used to describe the workload configuration. + +That's all what an application developer need to configure! Next, preview and apply the configuration, the application will get deployed and exposed publicly. + +:::info +Kusion uses Load Balancer (LB) provided by the CSP to expose service publicly. For more detailed network configuration, please refer to [Application Networking](https://www.kusionstack.io/docs/kusion/configuration-walkthrough/networking) +::: + +### Preview and Apply + +Execute `kusion preview` under the stack path, you will get what will be created in the real infrastructure. The picture below gives the preview result of the example. A Namespace, Service and Deployment will be created, which meets the expectation. The service name has a suffix `public`, which shows it can be accessed publicly. + +![preview-public](/img/docs/user_docs/cloud-resources/expose-service/preview-public.png) + +Then, execute `kusion apply --yes` to do the real deploying job. Just a command and a few minutes, you have accomplished deploying application and expose it publicly. + +![apply-public](/img/docs/user_docs/cloud-resources/expose-service/apply-public.png) + +### Verify Accessibility + +In the example, the kubernetes Namespace whose name is nginx, and a Service and Deployment under the Namespace should be created. Use `kubectl get` to check, the Service whose type is `LoadBalancer` and Deployment are created indeed. And the Service has `EXTERNAL-IP` 106.5.190.109, which means it can be accessed from outside the cluster. + +![k8s-resource-public](/img/docs/user_docs/cloud-resources/expose-service/k8s-resource-public.png) + +Visit the `EXTERNAL-IP` via browser, the correct result is returned, which illustrates the servie get publicly exposed successfully. + +![result-public](/img/docs/user_docs/cloud-resources/expose-service/result-public.png) + +## Expose Service Inside Cluster + +If you only need the application can be accessed inside the cluster, just configure `Public` as False in schema `Port`. There is no need to change the workspace, which means an application developer can easily change a service exposure range, without the involvement of platform engineers. + +``` +import catalog.models.schema.v1 as ac +import catalog.models.schema.v1.workload as wl +import catalog.models.schema.v1.workload.network as n + +nginx: ac.AppConfiguration { + workload: wl.Service { + ... + ports: [ + n.Port { + ... + public: False + } + ] + } +} +``` + +Execute `kusion apply --yes`, the generated Service has suffix `private`. + +![apply-private](/img/docs/user_docs/cloud-resources/expose-service/apply-private.png) + +And the Service type is `ClusterIP`, only has `CLUSTER_IP` and no `EXTERNAL_IP`, which means it cannot get accessed from outside the cluster. + +![k8s-resource-private](/img/docs/user_docs/cloud-resources/expose-service/k8s-resource-private.png) + +## Summary +This tutorial demonstrates how to expose service of the application deployed on the CSP Kubernetes. By platform engineers' setup of workspace, and application developers' configuration of schema Port, Kusion enables you expose service simply and efficiently. diff --git a/docs_versioned_docs/version-v0.10/5-user-guides/1-cloud-resources/_category_.json b/docs_versioned_docs/version-v0.10/5-user-guides/1-cloud-resources/_category_.json new file mode 100644 index 00000000..f6f2c380 --- /dev/null +++ b/docs_versioned_docs/version-v0.10/5-user-guides/1-cloud-resources/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Cloud Resources" +} diff --git a/docs_versioned_docs/version-v0.10/5-user-guides/2-working-with-k8s/1-deploy-application.md b/docs_versioned_docs/version-v0.10/5-user-guides/2-working-with-k8s/1-deploy-application.md new file mode 100644 index 00000000..06d97091 --- /dev/null +++ b/docs_versioned_docs/version-v0.10/5-user-guides/2-working-with-k8s/1-deploy-application.md @@ -0,0 +1,252 @@ +# Deploy Application + +This guide shows you how to use Kusion CLIs to complete the deployment of an application running in Kubernetes. +We call the abstraction of application operation and maintenance configuration as `AppConfiguration`, and its instance as `Application`. +It is essentially a configuration model that describes an application. The complete definition can be seen [here](../../reference/modules/catalog-models/app-configuration). + +In production, the application generally includes minimally several k8s resources: + +- Namespace +- Deployment +- Service + +:::tip + +This guide requires you to have a basic understanding of Kubernetes. +If you are not familiar with the relevant concepts, please refer to the links below: + +- [Learn Kubernetes Basics](https://kubernetes.io/docs/tutorials/kubernetes-basics/) +- [Namespace](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/) +- [Deployment](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/) +- [Service](https://kubernetes.io/docs/concepts/services-networking/service/) +::: + +## Prerequisites + +Before we start, we need to complete the following steps: + +1、Install Kusion + +We recommend using HomeBrew(Mac), Scoop(Windows), or an installation shell script to download and install Kusion. +See [Download and Install](../../getting-started/install-kusion) for more details. + +2、Running Kubernetes cluster + +There must be a running Kubernetes cluster and a [kubectl](https://Kubernetes.io/docs/tasks/tools/#kubectl) command line tool. +If you don't have a cluster yet, you can use [Minikube](https://minikube.sigs.k8s.io/docs/tutorials/multi_node/) to start one of your own. + +## Initializing + +This guide is to deploy an app using Kusion, relying on the Kusion CLI and an existing a Kubernetes cluster. + +## Initializing workspace configuration + +In version 0.10.0, we have introduced the new concept of [workspaces](../../concepts/workspace), which is a logical layer whose configurations represent an opinionated set of defaults, often appointed by the platform team. In most cases workspaces are represented with an "environment" in traditional SDLC terms. These workspaces provide a means to separate the concerns between the application developers who wish to focus on business logic, and a group of platform engineers who wish to standardize the applications on the platform. + +Driven by the discipline of Platform Engineering, management of the workspaces, including create/updating/deleting workspaces and their configurations should be done by dedicated platform engineers in a large software organizations to facilitate a more mature and scalable collaboration pattern. + +:::tip + +More on the collaboration pattern can be found in the [design doc](https://github.com/KusionStack/kusion/blob/main/docs/design/collaboration/collaboration_paradigm.md). +::: + +However, if that does NOT apply to your scenario, e.g. if you work in a smaller org without platform engineers or if you are an individual developer, we wish Kusion can still be a value tool to have when delivering an application. In this guide, we are NOT distinctively highlighting the different roles or what the best practices entails (the design doc above has all that) but rather the steps needed to get Kusion tool to work. + +As of version 0.10.0, workspace configurations in Kusion are managed on the local filesystem and their values are sourced from YAML files. Remotely-managed workspaces will be supported in future versions. + +To initialize the workspace configuration: + +```bash +~/playground$ touch ~/dev.yaml +~/playground$ kusion workspace create dev -f ~/dev.yaml +create workspace dev successfully +``` + +To verify the workspace has been created properly: +``` +~/playground$ kusion workspace list +- dev +~/playground$ kusion workspace show dev +{} +``` + +Note that `show` command tells us the workspace configuration is currently empty, which is expected because we created the `dev` workspace with an empty YAML file. An empty workspace configuration will suffice in some cases, where no platform configurations are needed. + +We will progressively add more workspace configurations throughout this user guide. + +## Initializing application configuration + +Now that workspaces are properly initialized, we can begin by initializing the application configuration: + +```bash +kusion init +``` + +The `kusion init` command will prompt you to enter required parameters, such as project name, project description, image address, etc. +You can keep pressing _Enter_ all the way to use the default values. + +The output is similar to: + +``` +✔ single-stack-sample A minimal kusion project of single stack +This command will walk you through creating a new kusion project. + +Enter a value or leave blank to accept the (default), and press . +Press ^C at any time to quit. + +Project Config: +✔ Project Name: simple-service +✔ AppName: helloworld +✔ ProjectName: simple-service +Stack Config: dev +✔ Image: gcr.io/google-samples/gb-frontend:v4 +Created project 'simple-service' +``` + +Now, we have successfully initialized a project `simple-service` using the `single-stack-sample` template, which contains a `dev` stack. + +- `AppName` represents the name of the sample application, which is recorded in the generated `main.k` as the name of the `AppConfiguration` instance. +- `ProjectName` and `Project Name` represent the name of the sample project, which is used as the generated folder name and then recorded in the generated `project.yaml`. +- `Image` represents the image address of the application container. + +:::info +See [Project](../../concepts/project/overview) and [Stack](../../concepts/stack/overview) for more details about Project and Stack. +::: + +The directory structure is as follows: + +``` +simple-service/ +├── README.md +├── dev +│   ├── kcl.mod +│   ├── kcl.mod.lock +│   ├── main.k +│   └── stack.yaml +└── project.yaml + +2 directories, 6 files +``` + +The project directory has the following files that are automatically generated: +- `README.md` contains the generated README from a template. +- `project.yaml` represents project-level configurations. +- `dev` directory stores the customized stack configuration: + - `dev/main.k` stores configurations in the `dev` stack. + - `dev/stack.yaml` stores stack-level configurations. + - `dev/kcl.mod` stores stack-level dependencies. + - `dev/kcl.mod.lock` stores version-sensitive dependencies. + +In general, the `.k` files are the KCL source code that represents the application configuration, and the `.yaml` is the static configuration file that describes behavior at the project or stack level. + +### kcl.mod +There should be a `kcl.mod` file generated automatically under the project directory. The `kcl.mod` file describes the dependency for the current project or stack. By default, it should contain a reference to the official [`catalog` repository](https://github.com/KusionStack/catalog) which holds some common model definitions that fits best practices. You can also create your own models library and reference that. + +## Building + +At this point, the project has been initialized with the Kusion built-in template. +The configuration is written in KCL, not JSON/YAML which Kubernetes recognizes, so it needs to be built to get the final output. + +Enter stack dir `simple-service/dev` and build: + +```bash +cd simple-service/dev && kusion build +``` + +The output is printed to `stdout` by default. You can save it to a file using the `-o/--output` flag when running `kusion build`. + +The output of `kusion build` is the [intent](../../concepts/intent) format. + +:::tip + +For instructions on the kusion command line tool, execute `kusion -h`, or refer to the tool's online [documentation](../../reference/commands). +::: + +## Applying + +Build is now completed. We can apply the configuration as the next step. In the output from `kusion build`, you can see 3 resources: + +- a Namespace named `simple-service` +- a Deployment named `simple-service-dev-helloworld` in the `simple-service` namespace +- a Service named `simple-service-dev-helloworld-private` in the `simple-service` namespace + +Execute command: + +```bash +kusion apply +``` + +The output is similar to: + +``` + ✔︎ Generating Intent in the Stack dev... +Stack: dev ID Action +* ├─ v1:Namespace:simple-service Create +* ├─ v1:Service:simple-service:simple-service-dev-helloworld-private Create +* └─ apps/v1:Deployment:simple-service:simple-service-dev-helloworld Create + + +? Do you want to apply these diffs? yes +Start applying diffs ... + SUCCESS Create v1:Namespace:simple-service success + SUCCESS Create v1:Service:simple-service:simple-service-dev-helloworld-private success + SUCCESS Create apps/v1:Deployment:simple-service:simple-service-dev-helloworld success +Create apps/v1:Deployment:simple-service:simple-service-dev-helloworld success [3/3] ███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████ 100% | 0s +Apply complete! Resources: 3 created, 0 updated, 0 deleted. +``` + +After the configuration applying successfully, you can use the `kubectl` to check the actual status of these resources. + +1、 Check Namespace + +```bash +kubectl get ns +``` + +The output is similar to: + +``` +NAME STATUS AGE +default Active 117d +simple-service Active 38s +kube-system Active 117d +... +``` + +2、Check Deployment + +```bash +kubectl get deploy -n simple-service +``` + +The output is similar to: + +``` +NAME READY UP-TO-DATE AVAILABLE AGE +simple-service-dev-helloworld 1/1 1 1 59s +``` + +3、Check Service + +```bash +kubectl get svc -n simple-service +``` + +The output is similar to: + +``` +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +simple-service-dev-helloworld-private ClusterIP 10.98.89.104 80/TCP 79s +``` + +4、Validate app + +Using the `kubectl` tool, forward native port `30000` to the service port `80`. + +```bash +kubectl port-forward svc/simple-service-dev-helloworld-private -n simple-service 30000:80 +``` + +Open browser and visit [http://127.0.0.1:30000](http://127.0.0.1:30000): + +![app-preview](/img/docs/user_docs/guides/working-with-k8s/app-preview.png) diff --git a/docs_versioned_docs/version-v0.10/5-user-guides/2-working-with-k8s/2-container.md b/docs_versioned_docs/version-v0.10/5-user-guides/2-working-with-k8s/2-container.md new file mode 100644 index 00000000..9b8e4c8f --- /dev/null +++ b/docs_versioned_docs/version-v0.10/5-user-guides/2-working-with-k8s/2-container.md @@ -0,0 +1,136 @@ +# Configure Containers + +You can manage container-level configurations in the `AppConfiguration` model via the `containers` field (under the `workload` schemas). By default, everything defined in the `containers` field will be treated as application containers. Sidecar containers will be supported in a future version of kusion. + +For the full `Container` schema reference, please see [here](../../reference/modules/catalog-models/workload/service#schema-container) for more details. + +## Pre-requisite + +Please refer to the [prerequisites](deploy-application#prerequisites) in the guide for deploying an application. + +The example below also requires you to have [initialized the project](deploy-application#initializing) using the `kusion workspace create` and `kusion init` command, which will create a workspace and also generate a [`kcl.mod` file](deploy-application#kclmod) under the stack directory. + +## Managing Workspace Configuration + +In the last guide, we introduced a step to [initialize a workspace](deploy-application#initializing-workspace-configuration) with an empty configuration. The same empty configuration will still work in this guide, no changes are required there. + +However, if you (or the platform team) would like to set default values for the workloads to standardize the behavior of applications in the `dev` workspace, you can do so by updating the `~/dev.yaml`: +```yaml +modules: + service: + default: + replicas: 3 + labels: + label-key: label-value + annotations: + annotation-key: annotation-value + type: CollaSet +``` + +Please note that the `replicas` in the workspace configuration only works as a default value and will be overridden by the value set in the application configuration. + +The workspace configuration need to be updated with the command: +```bash +kusion workspace update dev -f ~/dev.yaml +``` + +For a full reference of what can be configured in the workspace level, please see the [workspace reference](../../reference/modules/workspace-configs/workload/service). + +## Example +`simple-service/dev/main.k`: +```py +import catalog.models.schema.v1 as ac +import catalog.models.schema.v1.workload as wl +import catalog.models.schema.v1.workload.container as c +import catalog.models.schema.v1.workload.container.probe as p +import catalog.models.schema.v1.workload.network as n + +helloworld: ac.AppConfiguration { + workload: wl.Service { + containers: { + "helloworld": c.Container { + image: "gcr.io/google-samples/gb-frontend:v4" + env: { + "env1": "VALUE" + "env2": "VALUE2" + } + resources: { + "cpu": "500m" + "memory": "512M" + } + # Configure an HTTP readiness probe + readinessProbe: p.Probe { + probeHandler: p.Http { + url: "http://localhost:80" + } + initialDelaySeconds: 10 + } + } + } + replicas: 2 + ports: [ + n.Port { + port: 80 + } + ] + } +} +``` + +## Apply + +Re-run steps in [Applying](deploy-application#applying), new container configuration can be applied. + +``` +$ kusion apply + ✔︎ Generating Intent in the Stack dev... +Stack: dev ID Action +* ├─ v1:Namespace:simple-service UnChanged +* ├─ v1:Service:simple-service:simple-service-dev-helloworld-private UnChanged +* └─ apps/v1:Deployment:simple-service:simple-service-dev-helloworld Update + + +? Do you want to apply these diffs? yes +Start applying diffs ... + SUCCESS UnChanged v1:Namespace:simple-service, skip + SUCCESS UnChanged v1:Service:simple-service:simple-service-dev-helloworld-private, skip + SUCCESS Update apps/v1:Deployment:simple-service:simple-service-dev-helloworld success +Update apps/v1:Deployment:simple-service:simple-service-dev-helloworld success [3/3] ███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████ 100% | 0s +Apply complete! Resources: 0 created, 1 updated, 0 deleted. +``` + +## Validation + +We can verify the container (in the deployment template) now has the updated attributes as defined in the container configuration: +``` +$ kubectl get deployment -n simple-service -o yaml +... + template: + ... + spec: + containers: + - env: + - name: env1 + value: VALUE + - name: env2 + value: VALUE2 + image: gcr.io/google-samples/gb-frontend:v4 + imagePullPolicy: IfNotPresent + name: helloworld + readinessProbe: + failureThreshold: 3 + httpGet: + host: localhost + path: / + port: 80 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + resources: + limits: + cpu: 500m + memory: 512M +... +``` \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.10/5-user-guides/2-working-with-k8s/3-service.md b/docs_versioned_docs/version-v0.10/5-user-guides/2-working-with-k8s/3-service.md new file mode 100644 index 00000000..36b07a13 --- /dev/null +++ b/docs_versioned_docs/version-v0.10/5-user-guides/2-working-with-k8s/3-service.md @@ -0,0 +1,127 @@ +# Expose Service + +You can determine how to expose your service in the `AppConfiguration` model via the `ports` field (under the `workload` schemas). The `ports` field defines a list of all the `Port`s you want to expose for the application (and their corresponding listening ports on the container, if they don't match the service ports), so that it can be consumed by other applications. + +Unless explicitly defined, each of the ports exposed is by default exposed privately as a `ClusterIP` type service. You can expose a port publicly by specifying the `exposeInternet` field in the `Port` schema. At the moment, the implementation for publicly access is done via Load Balancer type service backed by cloud providers. Ingress will be supported in a future version of kusion. + +For the `Port` schema reference, please see [here](../../reference/modules/catalog-models/workload/service#schema-port) for more details. + +## Prerequisites + +Please refer to the [prerequisites](deploy-application#prerequisites) in the guide for deploying an application. + +The example below also requires you to have [initialized the project](deploy-application#initializing) using the `kusion workspace create` and `kusion init` command, which will create a workspace and also generate a [`kcl.mod` file](deploy-application#kclmod) under the stack directory. + +## Managing Workspace Configuration + +In the first guide in this series, we introduced a step to [initialize a workspace](deploy-application#initializing-workspace-configuration) with an empty configuration. The same empty configuration will still work in this guide, no changes are required there. + +However, if you (or the platform team) would like to set default values for the services to standardize the behavior of applications in the `dev` workspace, you can do so by updating the `~/dev.yaml`: +```yaml +modules: + port: + default: + type: alicloud + labels: + kusionstack.io/control: "true" + annotations: + service.beta.kubernetes.io/alibaba-cloud-loadbalancer-spec: slb.s1.small +``` + +The workspace configuration need to be updated with the command: +```bash +kusion workspace update dev -f ~/dev.yaml +``` + +For a full reference of what can be configured in the workspace level, please see the [workspace reference](../../reference/modules/workspace-configs/networking/port). + +## Example + +`simple-service/dev/main.k`: +```py +import catalog.models.schema.v1 as ac +import catalog.models.schema.v1.workload as wl +import catalog.models.schema.v1.workload.container as c +import catalog.models.schema.v1.workload.container.probe as p +import catalog.models.schema.v1.workload.network as n + +helloworld: ac.AppConfiguration { + workload: wl.Service { + containers: { + "helloworld": c.Container { + image: "gcr.io/google-samples/gb-frontend:v4" + env: { + "env1": "VALUE" + "env2": "VALUE2" + } + resources: { + "cpu": "500m" + "memory": "512M" + } + # Configure an HTTP readiness probe + readinessProbe: p.Probe { + probeHandler: p.Http { + url: "http://localhost:80" + } + initialDelaySeconds: 10 + } + } + } + replicas: 2 + ports: [ + n.Port { + port: 8080 + targetPort: 80 + } + ] + } +} +``` + +The code above changes the service port to expose from `80` in the last guide to `8080`, but still targeting the container port `80` because that's what the application is listening on. + +## Applying + +Re-run steps in [Applying](deploy-application#applying), new service configuration can be applied. + +``` +$ kusion apply + ✔︎ Generating Intent in the Stack dev... +Stack: dev ID Action +* ├─ v1:Namespace:simple-service UnChanged +* ├─ v1:Service:simple-service:simple-service-dev-helloworld-private Update +* └─ apps/v1:Deployment:simple-service:simple-service-dev-helloworld UnChanged + + +? Do you want to apply these diffs? yes +Start applying diffs ... + SUCCESS UnChanged v1:Namespace:simple-service, skip + SUCCESS Update v1:Service:simple-service:simple-service-dev-helloworld-private success + SUCCESS UnChanged apps/v1:Deployment:simple-service:simple-service-dev-helloworld, skip +UnChanged apps/v1:Deployment:simple-service:simple-service-dev-helloworld, skip [3/3] ██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████ 100% | 0s +Apply complete! Resources: 0 created, 1 updated, 0 deleted. +``` + +## Validation +We can verify the Kubernetes service now has the updated attributes (mapping service port 8080 to container port 80) as defined in the `ports` configuration: +``` +kubectl get svc -n simple-service -o yaml +... + spec: + ... + ports: + - name: simple-service-dev-helloworld-private-8080-tcp + port: 8080 + protocol: TCP + targetPort: 80 +... +``` + +Exposing service port 8080: +``` +kubectl port-forward svc/simple-service-dev-helloworld-private -n simple-service 30000:8080 +``` + +Open browser and visit [http://127.0.0.1:30000](http://127.0.0.1:30000), the application should be up and running: + +![app-preview](/img/docs/user_docs/guides/working-with-k8s/app-preview.png) \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.10/5-user-guides/2-working-with-k8s/4-image-upgrade.md b/docs_versioned_docs/version-v0.10/5-user-guides/2-working-with-k8s/4-image-upgrade.md new file mode 100644 index 00000000..5521373c --- /dev/null +++ b/docs_versioned_docs/version-v0.10/5-user-guides/2-working-with-k8s/4-image-upgrade.md @@ -0,0 +1,72 @@ +# Upgrade Image + +You can declare the application's container image via `image` field of the `Container` schema. + +For the full `Container` schema reference, please see [here](../../reference/modules/catalog-models/workload/service#schema-container) for more details. + +## Pre-requisite +Please refer to the [prerequisites](deploy-application#prerequisites) in the guide for deploying an application. + +The example below also requires you to have [initialized the project](deploy-application#initializing) using the `kusion workspace create` and `kusion init` command, which will create a workspace and also generate a [`kcl.mod` file](deploy-application#kclmod) under the stack directory. + +## Managing Workspace Configuration + +In the first guide in this series, we introduced a step to [initialize a workspace](deploy-application#initializing-workspace-configuration) with an empty configuration. The same empty configuration will still work in this guide, no changes are required there. + +## Example + +Update the image value in `simple-service/dev/main.k`: +```py +import catalog.models.schema.v1 as ac + +helloworld: ac.AppConfiguration { + workload.containers.nginx: { + ... + # before: + # image = "gcr.io/google-samples/gb-frontend:v4" + # after: + image = "gcr.io/google-samples/gb-frontend:v5" + ... + } +} +``` + +Everything else in `main.k` stay the same. + +## Applying + +Re-run steps in [Applying](deploy-application#applying), update image is completed. + +``` +$ kusion apply + ✔︎ Generating Intent in the Stack dev... +Stack: dev ID Action +* ├─ v1:Namespace:simple-service UnChanged +* ├─ v1:Service:simple-service:simple-service-dev-helloworld-private UnChanged +* └─ apps/v1:Deployment:simple-service:simple-service-dev-helloworld Update + + +? Do you want to apply these diffs? yes +Start applying diffs ... + SUCCESS UnChanged v1:Namespace:simple-service, skip + SUCCESS UnChanged v1:Service:simple-service:simple-service-dev-helloworld-private, skip + SUCCESS Update apps/v1:Deployment:simple-service:simple-service-dev-helloworld success +Update apps/v1:Deployment:simple-service:simple-service-dev-helloworld success [3/3] ███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████ 100% | 0s +Apply complete! Resources: 0 created, 1 updated, 0 deleted. +``` + +## Validation +We can verify the application container (in the deployment template) now has the updated image (v5) as defined in the container configuration: +``` +kubectl get deployment -n simple-service -o yaml +... + template: + ... + spec: + containers: + - env: + ... + image: gcr.io/google-samples/gb-frontend:v5 + ... +... +``` \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.10/5-user-guides/2-working-with-k8s/5-resource-spec.md b/docs_versioned_docs/version-v0.10/5-user-guides/2-working-with-k8s/5-resource-spec.md new file mode 100644 index 00000000..1d599165 --- /dev/null +++ b/docs_versioned_docs/version-v0.10/5-user-guides/2-working-with-k8s/5-resource-spec.md @@ -0,0 +1,82 @@ +# Configure Resource Specification + +You can manage container-level resource specification in the `AppConfiguration` model via the `resources` field (under the `Container` schema). + +For the full `Container` schema reference, please see [here](../../reference/modules/catalog-models/workload/service#schema-container) for more details. + +## Prerequisites + +Please refer to the [prerequisites](deploy-application#prerequisites) in the guide for deploying an application. + +The example below also requires you to have [initialized the project](deploy-application#initializing) using the `kusion workspace create` and `kusion init` command, which will create a workspace and also generate a [`kcl.mod` file](deploy-application#kclmod) under the stack directory. + +## Managing Workspace Configuration + +In the first guide in this series, we introduced a step to [initialize a workspace](deploy-application#initializing-workspace-configuration) with an empty configuration. The same empty configuration will still work in this guide, no changes are required there. + +## Example +Update the resources value in `simple-service/dev/main.k`: +```py +import catalog.models.schema.v1 as ac + +helloworld: ac.AppConfiguration { + workload.containers.helloworld: { + ... + # before: + # resources: { + # "cpu": "500m" + # "memory": "512M" + # } + # after: + resources: { + "cpu": "250m" + "memory": "256Mi" + } + ... + } +} +``` + +Everything else in `main.k` stay the same. + +## Applying + +Re-run steps in [Applying](deploy-application#applying), resource scaling is completed. + +``` +$ kusion apply + ✔︎ Generating Intent in the Stack dev... +Stack: dev ID Action +* ├─ v1:Namespace:simple-service UnChanged +* ├─ v1:Service:simple-service:simple-service-dev-helloworld-private UnChanged +* └─ apps/v1:Deployment:simple-service:simple-service-dev-helloworld Update + + +? Do you want to apply these diffs? yes +Start applying diffs ... + SUCCESS UnChanged v1:Namespace:simple-service, skip + SUCCESS UnChanged v1:Service:simple-service:simple-service-dev-helloworld-private, skip + SUCCESS Update apps/v1:Deployment:simple-service:simple-service-dev-helloworld success +Update apps/v1:Deployment:simple-service:simple-service-dev-helloworld success [3/3] ███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████ 100% | 0s +Apply complete! Resources: 0 created, 1 updated, 0 deleted. +``` + +## Validation +We can verify the application container (in the deployment template) now has the updated resources attributes (cpu:250m, memory:256Mi) as defined in the container configuration: +``` +kubectl get deployment -n simple-service -o yaml +... + template: + ... + spec: + containers: + - env: + ... + image: gcr.io/google-samples/gb-frontend:v5 + ... + resources: + limits: + cpu: 250m + memory: 256Mi +... +``` \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.10/5-user-guides/2-working-with-k8s/6-set-up-operational-rules.md b/docs_versioned_docs/version-v0.10/5-user-guides/2-working-with-k8s/6-set-up-operational-rules.md new file mode 100644 index 00000000..65b4a6fd --- /dev/null +++ b/docs_versioned_docs/version-v0.10/5-user-guides/2-working-with-k8s/6-set-up-operational-rules.md @@ -0,0 +1,94 @@ +# Set up Operational Rules + +You can set up operational rules in the `AppConfiguration` model via the `opsRule` field and corresponding platform configurations in the workspace directory. The `opsRule` is the collection of operational rule requirements for the application that are used as a preemptive measure to police and stop any unwanted changes. + +## Prerequisites + +Please refer to the [prerequisites](deploy-application#prerequisites) in the guide for deploying an application. + +The example below also requires you to have [initialized the project](deploy-application#initializing) using the `kusion workspace create` and `kusion init` command, which will create a workspace and also generate a [`kcl.mod` file](deploy-application#kclmod) under the stack directory. + +## Managing Workspace Configuration + +In the first guide in this series, we introduced a step to [initialize a workspace](deploy-application#initializing-workspace-configuration) with an empty configuration. The same empty configuration will still work in this guide, no changes are required there. + +However, if you (or the platform team) would like to set default values for the opsRule to standardize the behavior of applications, you can do so by updating the `~/dev.yaml`: +```yaml +modules: + opsRule: + default: + maxUnavailable: "40%" +``` + +Please note that the `maxUnavailable` in the workspace configuration only works as a default value and will be overridden by the value set in the application configuration. + +The workspace configuration need to be updated with the command: +```bash +kusion workspace update dev -f ~/dev.yaml +``` + +:::info +If the platform engineers have set the default workload to [Kusion Operation](https://github.com/KusionStack/operating) and installed the Kusion Operation controllers properly, the `opsRules` module will generate a [PodTransitionRule](https://www.kusionstack.io/docs/operating/manuals/podtransitionrule) instead of updating the `maxUnavailable` value in the deployment +::: + +## Example + +Add the `opsRule` snippet to the `AppConfiguration` in `simple-service/dev/main.k`: + +```py +import catalog.models.schema.v1 as ac +import catalog.models.schema.v1.workload as wl +import catalog.models.schema.v1.workload.container as c +import catalog.models.schema.v1.trait as t + +helloworld: ac.AppConfiguration { + workload: wl.Service { + ... + } + # Configure the maxUnavailable rule + opsRule = t.OpsRule { + maxUnavailable: "30%" + } +} +``` + +## Applying + +Re-run steps in [Applying](deploy-application#applying), resource scaling is completed. + +``` +$ kusion apply +✔︎ Generating Intent in the Stack dev... +Stack: dev ID Action +* ├─ v1:Namespace:simple-service UnChanged +* ├─ v1:Service:simple-service:simple-service-dev-helloworld-private UnChanged +* └─ apps/v1:Deployment:simple-service:simple-service-dev-helloworld Update + + +? Do you want to apply these diffs? yes +Start applying diffs ... + SUCCESS UnChanged v1:Namespace:simple-service, skip + SUCCESS UnChanged v1:Service:simple-service:simple-service-dev-helloworld-private, skip + SUCCESS Update apps/v1:Deployment:simple-service:simple-service-dev-helloworld success +Update apps/v1:Deployment:simple-service:simple-service-dev-helloworld success [3/3] ███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████ 100% | 0s +Apply complete! Resources: 0 created, 1 updated, 0 deleted. +``` + +## Validation + +We can verify the application deployment strategy now has the updated attributes `maxUnavailable: 30%` in the container configuration: + +```shell +kubectl get deployment -n simple-service -o yaml +... +apiVersion: apps/v1 + kind: Deployment +... + spec: + strategy: + rollingUpdate: + maxUnavailable: 30% + type: RollingUpdate + +... +``` \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.10/5-user-guides/2-working-with-k8s/7-job.md b/docs_versioned_docs/version-v0.10/5-user-guides/2-working-with-k8s/7-job.md new file mode 100644 index 00000000..c55d2245 --- /dev/null +++ b/docs_versioned_docs/version-v0.10/5-user-guides/2-working-with-k8s/7-job.md @@ -0,0 +1,123 @@ +# Schedule a Job + +The guides above provide examples on how to configure workloads of the type `wl.Service`, which is typically used for long-running web applications that should "never" go down. Alternatively, you could also schedule another kind of workload profile, namely `wl.Job` which corresponds to a one-off or recurring execution of tasks that run to completion and then stop. + +## Prerequisites + +Please refer to the [prerequisites](deploy-application#prerequisites) in the guide for scheduling a job. + +The example below also requires you to have [initialized the project](deploy-application#initializing) using the `kusion workspace create` and `kusion init` command, which will create a workspace and also generate a [`kcl.mod` file](deploy-application#kclmod) under the stack directory. + +## Managing Workspace Configuration + +In the first guide in this series, we introduced a step to [initialize a workspace](deploy-application#initializing-workspace-configuration) with an empty configuration. The same empty configuration will still work in this guide, no changes are required there. Alternatively, if you have updated your workspace config in the previous guides, no changes need to be made either. + +However, if you (or the platform team) would like to set default values for the workloads to standardize the behavior of applications in the `dev` workspace, you can do so by updating the `~/dev.yaml`: +```yaml +modules: + service: + default: + replicas: 3 + labels: + label-key: label-value + annotations: + annotation-key: annotation-value +``` + +Please note that the `replicas` in the workspace configuration only works as a default value and will be overridden by the value set in the application configuration. + +The workspace configuration need to be updated with the command: +```bash +kusion workspace update dev -f ~/dev.yaml +``` + +For a full reference of what can be configured in the workspace level, please see the [workspace reference](../../reference/modules/workspace-configs/workload/job). + +## Example + +To schedule a job with cron expression, update `simple-service/dev/main.k` to the following: + +`simple-service/dev/main.k`: +```py +import catalog.models.schema.v1 as ac +import catalog.models.schema.v1.workload as wl +import catalog.models.schema.v1.workload.container as c + +helloworld: ac.AppConfiguration { + workload: wl.Job { + containers: { + "busybox": c.Container { + # The target image + image: "busybox:1.28" + # Run the following command as defined + command: ["/bin/sh", "-c", "echo hello"] + } + } + # Run every minute. + schedule: "* * * * *" + } +} +``` + +The KCL snippet above schedules a job. Alternatively, if you want a one-time job without cron, simply remove the `schedule` from the configuration. + +You can find the full example in here in the [konfig repo](https://github.com/KusionStack/konfig/tree/main/example/simple-job). + +## Applying + +Re-run steps in [Applying](deploy-application#applying) and schedule the job. Your output might look like one of the following: + +If you are starting from scratch, all resources are created on the spot: +``` +$ kusion apply + ✔︎ Generating Intent in the Stack dev... +Stack: dev ID Action +* ├─ v1:Namespace:simple-service Create +* └─ batch/v1:CronJob:simple-service:simple-service-dev-helloworld Create + + +? Do you want to apply these diffs? yes +Start applying diffs ... + SUCCESS Create v1:Namespace:simple-service success + SUCCESS Create batch/v1:CronJob:simple-service:helloworld-dev-helloworld success +Create batch/v1:CronJob:simple-service:simple-service-dev-helloworld success [2/2] ██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████ 100% | 0s +Apply complete! Resources: 2 created, 0 updated, 0 deleted. +``` + +If you are starting from the last guide which configures an `opsRule`, the output looks like the following which destroys the `Deployment` and `Service` and replace it with a `CronJob`: +``` +$ kusion apply + ✔︎ Generating Intent in the Stack dev... +Stack: dev ID Action +* ├─ v1:Namespace:simple-service UnChanged +* ├─ batch/v1:CronJob:simple-service:simple-service-dev-helloworld Create +* ├─ apps/v1:Deployment:simple-service:simple-service-dev-helloworld Delete +* └─ v1:Service:simple-service:simple-service-dev-helloworld-private Delete + + +? Do you want to apply these diffs? yes +Start applying diffs ... + SUCCESS UnChanged v1:Namespace:simple-service, skip + SUCCESS Delete apps/v1:Deployment:simple-service:simple-service-dev-helloworld success + SUCCESS Create batch/v1:CronJob:simple-service:simple-service-dev-helloworld success + SUCCESS Delete v1:Service:simple-service:simple-service-dev-helloworld-private success +Delete v1:Service:simple-service:simple-service-dev-helloworld-private success [4/4] ███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████ 100% | 0s +Apply complete! Resources: 1 created, 0 updated, 2 deleted. +``` + +## Validation + +We can verify the job has now been scheduled: + +```shell +$ kubectl get cronjob -n simple-service +NAME SCHEDULE SUSPEND ACTIVE LAST SCHEDULE AGE +simple-service-dev-helloworld * * * * * False 0 2m18s +``` + +Verify the job has been triggered after the minute mark since we scheduled it to run every minute: +```shell +$ kubectl get job -n simple-service +NAME COMPLETIONS DURATION AGE +simple-service-dev-helloworld-28415748 1/1 5s 11s +``` diff --git a/docs_versioned_docs/version-v0.10/5-user-guides/2-working-with-k8s/_category_.json b/docs_versioned_docs/version-v0.10/5-user-guides/2-working-with-k8s/_category_.json new file mode 100644 index 00000000..79d3c6c5 --- /dev/null +++ b/docs_versioned_docs/version-v0.10/5-user-guides/2-working-with-k8s/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Kubernetes" +} diff --git a/docs_versioned_docs/version-v0.10/5-user-guides/3-observability/1-prometheus.md b/docs_versioned_docs/version-v0.10/5-user-guides/3-observability/1-prometheus.md new file mode 100644 index 00000000..f8349ea1 --- /dev/null +++ b/docs_versioned_docs/version-v0.10/5-user-guides/3-observability/1-prometheus.md @@ -0,0 +1,304 @@ +# Configure Monitoring Behavior With Prometheus + +This document provides the step-by-step instruction to set up monitoring for your application. + +As of today, Kusion supports the configuration of Prometheus scraping behaviors for the target application. In the future, we will add more cloud-provider-native solutions, such as AWS CloudWatch, Azure Monitor, etc. + +The user guide below is composed of the following components: + +- Namespace +- Deployment +- Service +- ServiceMonitor + +:::tip + +This guide requires you to have a basic understanding of Kubernetes and Prometheus. +If you are not familiar with the relevant concepts, please refer to the links below: + +- [Learn Kubernetes Basics](https://kubernetes.io/docs/tutorials/kubernetes-basics/) +- [Prometheus Introduction](https://prometheus.io/docs/introduction/overview/) +::: + +## Pre-requisite +Please refer to the [prerequisites](../working-with-k8s/deploy-application#prerequisites) in the guide for deploying an application. + +The example below also requires you to have [initialized the project](../working-with-k8s/deploy-application#initializing) using the `kusion init` command, which will generate a [`kcl.mod` file](../working-with-k8s/deploy-application#kclmod) under the project directory. + +## Setting up your own Prometheus + +There a quite a few ways to set up Prometheus in your cluster: +1. Installing a Prometheus operator +2. Installing a standalone Prometheus server +3. Installing a Prometheus agent and connect to a remote Prometheus server + +[The advice from the Prometheus team](https://github.com/prometheus-operator/prometheus-operator/issues/1547#issuecomment-401092041) is to use the `ServiceMonitor` or `PodMonitor` CRs via the Prometheus operator to manage scrape configs going forward[2]. + +In either case, you only have to do this setup once per cluster. This doc will use a minikube cluster and Prometheus operator as an example. + +### Installing Prometheus operator[3]. +To get the example in this user guide working, all you need is a running Prometheus operator. You can have that installed by running: +``` +LATEST=$(curl -s https://api.github.com/repos/prometheus-operator/prometheus-operator/releases/latest | jq -cr .tag_name) +curl -sL https://github.com/prometheus-operator/prometheus-operator/releases/download/${LATEST}/bundle.yaml | kubectl create -f - +``` + +This will install all the necessary CRDs and the Prometheus operator itself in the default namespace. Wait a few minutes, you can confirm the operator is up by running: +``` +kubectl wait --for=condition=Ready pods -l app.kubernetes.io/name=prometheus-operator -n default +``` + +### Make sure RBAC is properly set up +If you have RBAC enabled on the cluster, the following must be created for Prometheus to work properly: +``` +apiVersion: v1 +kind: ServiceAccount +metadata: + name: prometheus +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: prometheus +rules: +- apiGroups: [""] + resources: + - nodes + - nodes/metrics + - services + - endpoints + - pods + verbs: ["get", "list", "watch"] +- apiGroups: [""] + resources: + - configmaps + verbs: ["get"] +- apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: ["get", "list", "watch"] +- nonResourceURLs: ["/metrics"] + verbs: ["get"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: prometheus +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: prometheus +subjects: +- kind: ServiceAccount + name: prometheus + namespace: default +``` + +### Configure Prometheus instance via the operator +Once all of the above is set up, you can then configure the Prometheus instance via the operator: +``` +apiVersion: monitoring.coreos.com/v1 +kind: Prometheus +metadata: + name: prometheus +spec: + serviceAccountName: prometheus + serviceMonitorNamespaceSelector: {} + serviceMonitorSelector: {} + podMonitorNamespaceSelector: {} + podMonitorSelector: {} + resources: + requests: + memory: 400Mi +``` +This Prometheus instance above will be cluster-wide, picking up ALL the service monitors and pod monitors across ALL the namespaces. +You can adjust the requests and limits accordingly if you have a larger cluster. + +### Exposing the Prometheus portal (optional) +Once you have the managed Prometheus instance created via the Prometheus CR above, you should be able to see a service created called `prometheus-operated`: + +![prometheus-operated](/img/docs/user_docs/guides/prometheus/prometheus-operated.png) + +If you are also running on minikube, you can expose it onto your localhost via kubectl: +``` +kubectl port-forward svc/prometheus-operated 9099:9090 +``` + +You should then be able to see the Prometheus portal via `localhost:9099` in your browser: + +![prometheus-portal](/img/docs/user_docs/guides/prometheus/prometheus-portal.png) + +If you are running a non-local cluster, you can try to expose it via another way, through an ingress controller for example. + +## Setting up workspace configs + +Since v0.10.0, we have introduced the concept of [workspaces](../../3-concepts/4-workspace.md), whose configurations represent the part of the application behaviors that platform teams are interested in standardizing, or the ones to eliminate from developer's mind to make their lives easier. + +In the case of setting up Prometheus, there are a few things to set up on the workspace level: + +### Operator mode + +The `operatorMode` flag indicates to Kusion whether the Prometheus instance installed in the cluster runs as a Kubernetes operator or not. This determines the different kinds of resources Kusion manages. + +To see more about different ways to run Prometheus in the Kubernetes cluster, please refer to the [design documentation](https://github.com/KusionStack/kusion/blob/main/docs/prometheus.md#prometheus-installation). + +Most cloud vendors provide an out-of-the-box monitoring solutions for workloads running in a managed-Kubernetes cluster (EKS, AKS, etc), such as AWS CloudWatch, Azure Monitor, etc. These solutions mostly involve installing an agent (CloudWatch Agent, OMS Agent, etc) in the cluster and collecting the metrics to a centralized monitoring server. In those cases, you don't need to set `operatorMode` to `True`. It only needs to be set to `True` when you have an installation of the [Prometheus operator](https://github.com/prometheus-operator/prometheus-operator) running inside the Kubernetes cluster. + +:::info + +For differences between [Prometheus operator](https://github.com/prometheus-operator/prometheus-operator), [kube-prometheus](https://github.com/prometheus-operator/kube-prometheus) and the [community kube-prometheus-stack helm chart](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack), the details are documented [here](https://github.com/prometheus-operator/prometheus-operator#prometheus-operator-vs-kube-prometheus-vs-community-helm-chart). +::: + +### Monitor types + +The `monitorType` flag indicates the kind of monitor Kusion will create. It only applies when `operatorMode` is set to `True`. As of version 0.10.0, Kusion provides options to scrape metrics from either the application pods or its corresponding Kubernetes services. This determines the different kinds of resources Kusion manages when Prometheus runs as an operator in the target cluster. + +A sample `workspace.yaml` with Prometheus settings: +``` +modules: + ... + monitoring: + default: + operatorMode: True + monitorType: Service + scheme: http + interval: 30s + timeout: 15s +... +``` + +To instruct Prometheus to scrape from pod targets instead: +``` +modules: + ... + monitoring: + default: + operatorMode: True + monitorType: Pod + scheme: http + interval: 30s + timeout: 15s +... +``` + +If the `prometheus` section is missing from the `workspace.yaml`, Kusion defaults `operatorMode` to false. + +### Overriding with projectSelector + +Workspace configurations contain a set of default setting group for all projects in the workspace, with means to override them by Projects using a `projectSelector` keyword. + +Projects with the name matching those in projectSelector will use the values defined in that override group instead of the default. If a key is not present in the override group, the default value will be used. + +Take a look at the sample `workspace.yaml`: +``` +modules: + ... + monitoring: + default: + operatorMode: True + monitorType: Pod + scheme: http + interval: 30s + timeout: 15s + low_frequency: + operatorMode: False + interval: 2m + projectSelector: + - foobar + high_frequency: + monitorType: Service + projectSelector: + - helloworld +... +``` + +In the example above, a project with the name `helloworld` will have the monitoring settings where `operatorMode` is set to `False`, a 2 minute scraping interval, 15 seconds timeout (coming from default) and http scheme (coming from default). + +You cannot have the same project appear in two projectSelectors. + +For a full reference of what can be configured in the workspace level, please see the [workspace reference](../../reference/modules/workspace-configs/monitoring/prometheus). + +## Updating the workspace config + +Assuming you now have a `workspace.yaml` that looks like the following: +``` +modules: + monitoring: + default: + operatorMode: True + monitorType: Service + scheme: http + interval: 30s + timeout: 15s +... +``` + +Update the workspace configuration by running the following command: +``` +kusion workspace update dev -f workspace.yaml +``` +Verify the workspace config is properly updated by running the command: +``` +kusion workspace show dev +``` + +## Using kusion to deploy your application with monitoring requirements + +At this point we are set up for good! Any new applications you deploy via kusion will now automatically have the monitoring-related resources created, should you declare you want it via the `monitoring` field in the `AppConfiguration` model. + +The monitoring in an AppConfiguration is declared in the `monitoring` field. See the example below for a full, deployable AppConfiguration. + +Please note we are using a new image `quay.io/brancz/prometheus-example-app` since the app itself need to expose metrics for Prometheus to scrape: + +`helloworld/dev/main.k`: +``` +import catalog.models.schema.v1 as ac +import catalog.models.schema.v1.workload as wl +import catalog.models.schema.v1.workload.container as c +import catalog.models.schema.v1.monitoring as m +import catalog.models.schema.v1.workload.network as n + +helloworld: ac.AppConfiguration { + workload: wl.Service { + containers: { + "monitoring-sample-app": c.Container { + image: "quay.io/brancz/prometheus-example-app:v0.3.0" + } + } + ports: [ + n.Port { + port: 8080 + } + ] + } + monitoring: m.Prometheus{ + path: "/metrics" + } +} +``` + +The KCL file above represents an application with a service type workload, exposing the port 8080, and would like Prometheus to scrape the `/metrics` endpoint every 2 minutes. + +Running `kusion apply` would show that kusion will create a `Namespace`, a `Deployment`, a `Service` and a `ServiceMonitor`: +![kusion-apply-with-monitor](/img/docs/user_docs/guides/prometheus/kusion-apply-with-monitor.png) + +Continue applying all resources: +![kusion-apply-success](/img/docs/user_docs/guides/prometheus/kusion-apply-success.png) + +If we want to, we can verify the service monitor has been created successfully: +![service-monitor](/img/docs/user_docs/guides/prometheus/service-monitor.png) + +In a few seconds, you should be able to see in the Prometheus portal that the service we just deployed has now been discovered and monitored by Prometheus: +![prometheus-targets](/img/docs/user_docs/guides/prometheus/prometheus-targets.png) + +You can run a few simply queries for the data that Prometheus scraped from your application: +![prometheus-simple-query](/img/docs/user_docs/guides/prometheus/prometheus-simple-query.png) + +For more info about PromQL, you can find them [here](https://prometheus.io/docs/prometheus/latest/querying/basics/)[4]. + +## References +1. Prometheus: https://prometheus.io/docs/introduction/overview/ +2. Prometheus team advise: https://github.com/prometheus-operator/prometheus-operator/issues/1547#issuecomment-446691500 +3. Prometheus operator getting started doc: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/user-guides/getting-started.md +4. PromQL basics: https://prometheus.io/docs/prometheus/latest/querying/basics/ \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.10/5-user-guides/3-observability/_category_.json b/docs_versioned_docs/version-v0.10/5-user-guides/3-observability/_category_.json new file mode 100644 index 00000000..b061ae3e --- /dev/null +++ b/docs_versioned_docs/version-v0.10/5-user-guides/3-observability/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Automated Observability" +} diff --git a/docs_versioned_docs/version-v0.10/5-user-guides/4-github-actions/1-deploy-application-via-github-actions.md b/docs_versioned_docs/version-v0.10/5-user-guides/4-github-actions/1-deploy-application-via-github-actions.md new file mode 100644 index 00000000..82a3cedb --- /dev/null +++ b/docs_versioned_docs/version-v0.10/5-user-guides/4-github-actions/1-deploy-application-via-github-actions.md @@ -0,0 +1,131 @@ +# Deploy Application Securely and Efficiently via GitHub Actions + +This document provides the instruction to deploy your application securely and efficiently via GitHub Actions. + +Using git repository is a very reliable and common way to manage code, and the same goes for Kusion-managed configuration code. [GitHub Actions](https://docs.github.com/en/actions) is a CI/CD platform. By customizing [GitHub Actions workflow](https://docs.github.com/en/actions/using-workflows/about-workflows), the pipeline such as building, testing, and deploying will be executed automatically. + +Kusion has a commendable integration with GitHub Actions. You can use GitHub Actions to test configuration correctness, preview change, and deploy application. This tutorial demonstrates how to deploy and operate an application through GitHub Actions. + +## GitHub Actions Workflow + +[KusionStack/konfig](https://github.com/KusionStack/konfig) is the official example repository, and provides the GitHub Actions workflow [*deploy*](https://github.com/KusionStack/konfig/blob/main/.github/workflows/deploy/deploy.yml). The workflow is triggered by a push on the main branch, and includes multiple jobs, which ensures the reliability of configuration code, and deploys the changed application. + +![workflow](/img/docs/user_docs/guides/github-actions/workflow.png) + +The workflow to deploy an application is shown above, which includes the following jobs: + +- Get changed project and stack +- Check project and stack structure +- Test code correctness +- Preview changed stack +- Apply changed stack + +These jobs ensure the security and efficiency of the application deployment. Next, this tutorial will introduce the usage and function of these jobs. To show how they work more visually, updating port configuration in file `example/service-multi-stack/base/base.k` of [*service-multi-stack*](https://github.com/KusionStack/konfig/tree/main/example/service-multi-stack) (referred to "the example" in the below) is given as an example. + +## Get Changed Project and Stack + +As Kusion organizes code by project and stack, to deploy the affected applications, analyze the changed project and stack is the first step. + +The job, **get-changed-project-stack** perfectly accomplish the analysis. The main steps are as follows: + +- Obtain the list of changed files through `git diff`; +- Based on the changed file list, obtain the changed projects and stacks which are indicated by `project.yaml` and `stack.yaml` respectively. + +The example changes the file `example/service-multi-stack/base/base.k`, where the affected project is `example/service-multi-stack`, and the stack is `example/service-multi-stack/dev` and `example/service-multi-stack/prod`. Delightfully, the result, which is shown below, meets our expectation. + +![changed-project-stack](/img/docs/user_docs/guides/github-actions/kusion-changed-project-stack.png) + +## Check Project and Stack Structure + +The job **check-structure** guarantees the structure legality of the changed project and stack, so that Kusion CLI tools can be used correctly. The check items are as follows: + +- The field `name` is required in project.yaml; +- The field `name` is required in stack.yaml. + +The success of structure-check means the correctness of structure. A [pytest](https://docs.pytest.org/en/7.3.x/) report `check-structure-report` is also generated, and you can get it from [GithHub Actions Artifacts](https://docs.github.com/en/actions/managing-workflow-runs/downloading-workflow-artifacts) . + +The example passes the directory structure verification. It is clear from the report that the changed project and stack have get checked, and the result is passed. + +```xml + + + + + + + +``` + +## Test Code Correctness + +Besides a rightful structure, the code must have correct syntax and semantics, and the job **test-correctness** ensures the correctness. `kusion build` get executed on the changed stacks. If succeeded, there are no syntax errors; or the configuration code is illegal, and the following application deployment will fail. + +In this job, not only the correctness of AppConfiguration is checked, but also the workspace configuration. Hence, you should prepare workspace configuration in advance. Now, the job **test-correctness** supports you put workspace configuration files under directory `workspaces` with file name's prefix the same as the workspace name and suffix `.yaml`. For example, if you have two workspaces named `dev` and `prod`, you should provide files `workspaces/dev.yaml` and `workspaces/prod.yaml` with corresponding workspace configuration. + +:::info +The jobs preview and apply also ask for the workspace configuration files. + +Putting AppConfiguration and workspace configuration in one repository seems not a good idea. Doing this is to give a simple illustration. You can change it in your real production practice, and you can get more information of [AppConfiguration](../../concepts/app-configuration) and [workspace](../../concepts/workspace) here. +::: + +The report whose name is `test-correctness-report` get generated. + +The example passes the code correctness test. The report shows that the tested stack is `example/service-multi-stack/dev` and `example/service-multi-stack/prod`, and the result is passed. + +```xml + + + + + + +``` + +## Preview Changed Stack + +After passing the above jobs, security of the configuration change is guaranteed, and it's time to deploy your application. Before applying the change to the real infrastructure, it's necessary to get the expected result of the application deployment. The job **preview** calls `kusion preview` to get the expected change result, the result is uploaded to the artifact `preview-report`. If the result meets your requirement, you can go to the next job and deploy the application. + +The example changes stack `example/service-multi-stack/dev` and `example/service-multi-stack/prod`. The following picture shows the preview result of `example/service-multi-stack/prod`, where the result is to create a Kubernetes Namespace, Service and Deployment if call `kusion apply`. + +``` +Generating Intent in the Stack prod... +cloning 'https://github.com/KusionStack/catalog.git' with tag '0.1.2' + +Stack: prod ID Action +* ├─ v1:Namespace:service-multi-stack Create +* ├─ v1:Service:service-multi-stack:service-multi-stack-prod-echoserver-public Create +* └─ apps/v1:Deployment:service-multi-stack:service-multi-stack-prod-echoserver Create +``` + +## Apply Changed Stack +Finally, the last step is arrived, i.e. deploy application. The job **apply** calls `kusion apply` to apply the configuration change to the real infrastructure. If the job succeeded, the result will be uploaded to the artifact `apply-report`. + +For the stack `example/service-multi-stack/prod` in the example, a Kubernetes Namespace, Service and Deployment get created, which is consistent with the preview result. + +``` +Generating Intent in the Stack prod... +cloning 'https://github.com/KusionStack/catalog.git' with tag '0.1.2' + +Stack: prod ID Action +* ├─ v1:Namespace:service-multi-stack UnChanged +* ├─ v1:Service:service-multi-stack:service-multi-stack-prod-echoserver-public Create +* └─ apps/v1:Deployment:service-multi-stack:service-multi-stack-prod-echoserver Create + +Start applying diffs ... + +SUCCESS: UnChanged v1:Namespace:service-multi-stack, skip +SUCCESS: Create v1:Service:service-multi-stack:service-multi-stack-prod-echoserver-public success +SUCCESS: Create apps/v1:Deployment:service-multi-stack:service-multi-stack-prod-echoserver success + +Apply complete! Resources: 2 created, 0 updated, 0 deleted. +``` + +## Summary +This tutorial demonstrates how Kusion integrates with GitHub Actions to deploy an application. By structure check, correctness test, preview and apply, Kusion with GitHub Actions enables you deploy application efficiently and securely. diff --git a/docs_versioned_docs/version-v0.10/5-user-guides/4-github-actions/_category_.json b/docs_versioned_docs/version-v0.10/5-user-guides/4-github-actions/_category_.json new file mode 100644 index 00000000..b099de48 --- /dev/null +++ b/docs_versioned_docs/version-v0.10/5-user-guides/4-github-actions/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "GitHub Actions" +} diff --git a/docs_versioned_docs/version-v0.10/5-user-guides/5-secrets-management/1-using-cloud-secrets.md b/docs_versioned_docs/version-v0.10/5-user-guides/5-secrets-management/1-using-cloud-secrets.md new file mode 100644 index 00000000..29b474cd --- /dev/null +++ b/docs_versioned_docs/version-v0.10/5-user-guides/5-secrets-management/1-using-cloud-secrets.md @@ -0,0 +1,93 @@ +# Using Cloud Secrets Manager + +Applications usually store sensitive data in secrets by using centralized secrets management solutions. For example, you authenticate databases, services, and external systems with passwords, API keys, tokens, and other credentials stored in a secret store, e.g. Hashicorp Vault, AWS Secrets Manager, Azure Key Vault, etc + +Kusion provides out-of-the-box support to reference existing external secrets management solution, this tutorial introduces that how to pull the secret from AWS Secrets Manager to make it available to applications. + +## Prerequisites + +Please refer to the [prerequisites](../working-with-k8s/deploy-application#prerequisites) in the guide for deploying an application. + +The example below also requires you to have [initialized the project](../working-with-k8s/deploy-application#initializing) using the `kusion init` command, which will generate a [`kcl.mod` file](../working-with-k8s/deploy-application#kclmod) under the project directory. + +Additionally, you also need to configure the obtained AccessKey and SecretKey as environment variables: + +```bash +export AWS_ACCESS_KEY_ID="AKIAQZDxxxx" # replace it with your AccessKey +export AWS_SECRET_ACCESS_KEY="oE/xxxx" # replace it with your SecretKey +``` + +![aws iam account](/img/docs/user_docs/getting-started/aws-iam-account.png) + +## Setting up workspace + +Since v0.10.0, we have introduced the concept of [workspaces](../../3-concepts/4-workspace.md), whose configurations represent the part of the application behaviors that platform teams are interested in standardizing, or the ones to eliminate from developer's mind to make their lives easier. + +In the case of setting up cloud secrets manager, platform teams need to specify which secrets management solution to use and necessary information to access on the workspace level. + +A sample `workspace.yaml` with AWS Secrets Manager settings: + +``` +modules: + ... +secretStore: + provider: + aws: + region: us-east-1 + profile: The optional profile to be used to interact with AWS Secrets Manager. +... +``` + +## Update AppConfiguration + +At this point we are set up for good! Now you can declare external type of secrets via the `secrets` field in the `AppConfiguration` model to consume sensitive data stored in AWS Secrets Manager. + +See the example below for a full, deployable AppConfiguration. + +``` +import models.schema.v1 as ac +import models.schema.v1.workload as wl +import models.schema.v1.workload.container as c +import models.schema.v1.workload.secret as sec + +gitsync: ac.AppConfiguration { + workload: wl.Service { + containers: { + "syncer": c.Container { + image: "dyrnq/git-sync" + # Run the following command as defined + command: [ + "--repo=https://github.com/KusionStack/kusion" + "--ref=HEAD" + "--root=/mnt/git" + ] + # Consume secrets in environment variables + env: { + "GIT_SYNC_USERNAME": "secret://git-auth/username" + "GIT_SYNC_PASSWORD": "secret://git-auth/password" + } + } + } + # Secrets used to retrieve secret data from AWS Secrets Manager + secrets: { + "git-auth": sec.Secret { + type: "external" + data: { + "username": "ref://git-auth-info/username" + "password": "ref://git-auth-info/password" + } + } + } + } +} +``` + +## Apply and Verify + +Run `kusion apply` command to deploy above application, then use the below command to verify if the secret got deployed: + +``` +kubectl get secret -n secretdemo +``` + +You will find `git-auth` of type Opaque automatically created and contains sensitive information pulled from AWS Secrets Manager. \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.10/5-user-guides/5-secrets-management/_category_.json b/docs_versioned_docs/version-v0.10/5-user-guides/5-secrets-management/_category_.json new file mode 100644 index 00000000..8990c11b --- /dev/null +++ b/docs_versioned_docs/version-v0.10/5-user-guides/5-secrets-management/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Secrets Management" +} diff --git a/docs_versioned_docs/version-v0.10/5-user-guides/_category_.json b/docs_versioned_docs/version-v0.10/5-user-guides/_category_.json new file mode 100644 index 00000000..abf4c874 --- /dev/null +++ b/docs_versioned_docs/version-v0.10/5-user-guides/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "User Guides" +} diff --git a/docs_versioned_docs/version-v0.10/6-reference/1-commands/_category_.json b/docs_versioned_docs/version-v0.10/6-reference/1-commands/_category_.json new file mode 100644 index 00000000..d783ca2e --- /dev/null +++ b/docs_versioned_docs/version-v0.10/6-reference/1-commands/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Kusion Commands" +} diff --git a/docs_versioned_docs/version-v0.10/6-reference/1-commands/index.md b/docs_versioned_docs/version-v0.10/6-reference/1-commands/index.md new file mode 100644 index 00000000..46cb36e5 --- /dev/null +++ b/docs_versioned_docs/version-v0.10/6-reference/1-commands/index.md @@ -0,0 +1,32 @@ +# Kusion Commands + +Kusion is the Platform Orchestrator of KusionStack + +### Synopsis + +As a Platform Orchestrator, Kusion delivers user intentions to Kubernetes, Clouds and On-Premise resources. Also enables asynchronous cooperation between the development and the platform team and drives the separation of concerns. + +``` +kusion [flags] +``` + +### Options + +``` + -h, --help help for kusion + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion apply](kusion-apply.md) - Apply the operational intent of various resources to multiple runtimes +* [kusion build](kusion-build.md) - Build Kusion modules in a Stack to the Intent +* [kusion compile](kusion-compile.md) - Deprecated: Use 'kusion build' to generate the Intent instead +* [kusion destroy](kusion-destroy.md) - Destroy resources within the stack. +* [kusion init](kusion-init.md) - Initialize the scaffolding for a project +* [kusion preview](kusion-preview.md) - Preview a series of resource changes within the stack +* [kusion version](kusion-version.md) - Print the Kusion version information for the current context +* [kusion workspace](kusion-workspace.md) - Workspace is a logical concept representing a target that stacks will be deployed to + +###### Auto generated by spf13/cobra on 4-Jan-2024 diff --git a/docs_versioned_docs/version-v0.10/6-reference/1-commands/kusion-apply.md b/docs_versioned_docs/version-v0.10/6-reference/1-commands/kusion-apply.md new file mode 100644 index 00000000..8503cc10 --- /dev/null +++ b/docs_versioned_docs/version-v0.10/6-reference/1-commands/kusion-apply.md @@ -0,0 +1,69 @@ +# kusion apply + +Apply the operational intent of various resources to multiple runtimes + +### Synopsis + +Apply a series of resource changes within the stack. + + Create, update or delete resources according to the operational intent within a stack. By default, Kusion will generate an execution plan and prompt for your approval before performing any actions. You can review the plan details and make a decision to proceed with the actions or abort them. + +``` +kusion apply [flags] +``` + +### Examples + +``` + # Apply with specified work directory + kusion apply -w /path/to/workdir + + # Apply with specified arguments + kusion apply -D name=test -D age=18 + + # Apply with specified intent file + kusion apply --intent-file intent.yaml + + # Apply with specifying intent file + kusion apply --intent-file intent.yaml + + # Skip interactive approval of plan details before applying + kusion apply --yes + + # Apply without output style and color + kusion apply --no-style=true +``` + +### Options + +``` + -a, --all --detail Automatically show all plan details, combined use with flag --detail + -D, --argument stringToString Specify the top-level argument (default []) + -C, --backend-config strings backend-config config state storage backend + --backend-type string backend-type specify state storage backend + -d, --detail Automatically show plan details with interactive options + --dry-run Preview the execution effect (always successful) without actually applying the changes + -h, --help help for apply + --ignore-fields strings Ignore differences of target fields + --intent-file string Specify the intent file path as input, and the intent file must be located in the working directory or its subdirectories + --no-style no-style sets to RawOutput mode and disables all of styling + --operator string Specify the operator + -o, --output string Specify the output format + -Y, --setting strings Specify the command line setting files + --watch After creating/updating/deleting the requested object, watch for changes + -w, --workdir string Specify the work directory + -y, --yes Automatically approve and perform the update after previewing it +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion](index.md) - Kusion is the Platform Orchestrator of KusionStack + +###### Auto generated by spf13/cobra on 4-Jan-2024 \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.10/6-reference/1-commands/kusion-build.md b/docs_versioned_docs/version-v0.10/6-reference/1-commands/kusion-build.md new file mode 100644 index 00000000..435f3442 --- /dev/null +++ b/docs_versioned_docs/version-v0.10/6-reference/1-commands/kusion-build.md @@ -0,0 +1,56 @@ +# kusion build + +Build Kusion modules in a Stack to the Intent + +### Synopsis + +Build Kusion modules in a Stack to the Intent + + The command must be executed in a Stack or by specifying a Stack directory with the -w flag. You can provide a list of arguments to replace the placeholders defined in KCL, and use the --output flag to output the built results to a file + +``` +kusion build [flags] +``` + +### Examples + +``` + # Build main.k with arguments + kusion build -D name=test -D age=18 + + # Build main.k with work directory + kusion build -w appops/demo/dev + + # Build configurations and write result into an output.yaml + kusion build -o output.yaml + + # Build configurations with arguments from settings.yaml + kusion build -Y settings.yaml + + # Build without output style and color + kusion build --no-style=true +``` + +### Options + +``` + -D, --argument stringToString Specify the top-level argument (default []) + -h, --help help for build + --no-style Disable the output style and color + -o, --output string Specify the output file + -Y, --setting strings Specify the command line setting files + -w, --workdir string Specify the work directory +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion](index.md) - Kusion is the Platform Orchestrator of KusionStack + +###### Auto generated by spf13/cobra on 4-Jan-2024 diff --git a/docs_versioned_docs/version-v0.10/6-reference/1-commands/kusion-compile.md b/docs_versioned_docs/version-v0.10/6-reference/1-commands/kusion-compile.md new file mode 100644 index 00000000..253bd5cb --- /dev/null +++ b/docs_versioned_docs/version-v0.10/6-reference/1-commands/kusion-compile.md @@ -0,0 +1,32 @@ +# kusion compile + +Deprecated: Use 'kusion build' to generate the Intent instead + +``` +kusion compile [flags] +``` + +### Examples + +``` + Deprecated +``` + +### Options + +``` + -h, --help help for compile +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion](index.md) - Kusion is the Platform Orchestrator of KusionStack + +###### Auto generated by spf13/cobra on 4-Jan-2024 diff --git a/docs_versioned_docs/version-v0.10/6-reference/1-commands/kusion-destroy.md b/docs_versioned_docs/version-v0.10/6-reference/1-commands/kusion-destroy.md new file mode 100644 index 00000000..890e9756 --- /dev/null +++ b/docs_versioned_docs/version-v0.10/6-reference/1-commands/kusion-destroy.md @@ -0,0 +1,47 @@ +# kusion destroy + +Destroy resources within the stack. + +### Synopsis + +Destroy resources within the stack. + + Please note that the destroy command does NOT perform resource version checks. Therefore, if someone submits an update to a resource at the same time you execute a destroy command, their update will be lost along with the rest of the resource. + +``` +kusion destroy [flags] +``` + +### Examples + +``` + # Delete resources of current stack + kusion destroy +``` + +### Options + +``` + -D, --argument stringToString Specify the top-level argument (default []) + -C, --backend-config strings backend-config config state storage backend + --backend-type string backend-type specify state storage backend + -d, --detail Automatically show plan details after previewing it + -h, --help help for destroy + --operator string Specify the operator + -Y, --setting strings Specify the command line setting files + -w, --workdir string Specify the work directory + -y, --yes Automatically approve and perform the update after previewing it +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion](index.md) - Kusion is the Platform Orchestrator of KusionStack + +###### Auto generated by spf13/cobra on 4-Jan-2024 diff --git a/docs_versioned_docs/version-v0.10/6-reference/1-commands/kusion-init.md b/docs_versioned_docs/version-v0.10/6-reference/1-commands/kusion-init.md new file mode 100644 index 00000000..b9d5e1db --- /dev/null +++ b/docs_versioned_docs/version-v0.10/6-reference/1-commands/kusion-init.md @@ -0,0 +1,54 @@ +# kusion init + +Initialize the scaffolding for a project + +### Synopsis + +This command initializes the scaffolding for a project, generating a project from an appointed template with correct structure. + + The scaffold templates can be retrieved from local or online. The built-in templates are used by default, self-defined templates are also supported by assigning the template repository path. + +``` +kusion init +``` + +### Examples + +``` + # Initialize a project from internal templates + kusion init + + # Initialize a project from default online templates + kusion init --online=true + + # Initialize a project from a specific online template + kusion init https://github.com// --online=true + + # Initialize a project from a specific local template + kusion init /path/to/templates +``` + +### Options + +``` + --custom-params string Custom params in JSON. If specified, it will be used as the template default value and skip prompts + --force Force generating the scaffolding files, even if it would change the existing files + -h, --help help for init + --online Use templates from online repository to initialize project, or use locally cached templates + --project-name string Initialize with specified project name. If not specified, a prompt will request it + --template-name string Initialize with specified template. If not specified, a prompt will request it + --yes Skip prompts and proceed with default values +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion](index.md) - Kusion is the Platform Orchestrator of KusionStack + +###### Auto generated by spf13/cobra on 4-Jan-2024 diff --git a/docs_versioned_docs/version-v0.10/6-reference/1-commands/kusion-preview.md b/docs_versioned_docs/version-v0.10/6-reference/1-commands/kusion-preview.md new file mode 100644 index 00000000..90547749 --- /dev/null +++ b/docs_versioned_docs/version-v0.10/6-reference/1-commands/kusion-preview.md @@ -0,0 +1,66 @@ +# kusion preview + +Preview a series of resource changes within the stack + +### Synopsis + +Preview a series of resource changes within the stack. + + Create, update or delete resources according to the intent described in the a stack. By default, Kusion will generate an execution plan and present it for your approval before taking any action. + +``` +kusion preview [flags] +``` + +### Examples + +``` + # Preview with specified work directory + kusion preview -w /path/to/workdir + + # Preview with specified arguments + kusion preview -D name=test -D age=18 + + # Preview with specified intent file + kusion preview --intent-file intent.yaml + + # Preview with ignored fields + kusion preview --ignore-fields="metadata.generation,metadata.managedFields + + # Preview with json format result + kusion preview -o json + + # Preview without output style and color + kusion preview --no-style=true +``` + +### Options + +``` + -a, --all --detail Automatically show all plan details, combined use with flag --detail + -D, --argument stringToString Specify the top-level argument (default []) + -C, --backend-config strings backend-config config state storage backend + --backend-type string backend-type specify state storage backend + -d, --detail Automatically show plan details with interactive options + -h, --help help for preview + --ignore-fields strings Ignore differences of target fields + --intent-file string Specify the intent file path as input, and the intent file must be located in the working directory or its subdirectories + --no-style no-style sets to RawOutput mode and disables all of styling + --operator string Specify the operator + -o, --output string Specify the output format + -Y, --setting strings Specify the command line setting files + -w, --workdir string Specify the work directory +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion](index.md) - Kusion is the Platform Orchestrator of KusionStack + +###### Auto generated by spf13/cobra on 4-Jan-2024 diff --git a/docs_versioned_docs/version-v0.10/6-reference/1-commands/kusion-version.md b/docs_versioned_docs/version-v0.10/6-reference/1-commands/kusion-version.md new file mode 100644 index 00000000..6f7d0445 --- /dev/null +++ b/docs_versioned_docs/version-v0.10/6-reference/1-commands/kusion-version.md @@ -0,0 +1,38 @@ +# kusion version + +Print the Kusion version information for the current context + +### Synopsis + +Print the Kusion version information for the current context + +``` +kusion version [flags] +``` + +### Examples + +``` + # Print the Kusion version + kusion version +``` + +### Options + +``` + -h, --help help for version + -o, --output string Output format. Only json format is supported for now +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion](index.md) - Kusion is the Platform Orchestrator of KusionStack + +###### Auto generated by spf13/cobra on 4-Jan-2024 diff --git a/docs_versioned_docs/version-v0.10/6-reference/1-commands/kusion-workspace-create.md b/docs_versioned_docs/version-v0.10/6-reference/1-commands/kusion-workspace-create.md new file mode 100644 index 00000000..e723259b --- /dev/null +++ b/docs_versioned_docs/version-v0.10/6-reference/1-commands/kusion-workspace-create.md @@ -0,0 +1,38 @@ +# kusion workspace create + +Create a new workspace + +### Synopsis + +This command creates a workspace with specified name and configuration file, where the file must be in the YAML format. + +``` +kusion workspace create +``` + +### Examples + +``` + # Create a new workspace + kusion workspace create dev -f dev.yaml +``` + +### Options + +``` + -f, --file string the path of workspace configuration file + -h, --help help for create +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion workspace](kusion-workspace.md) - Workspace is a logical concept representing a target that stacks will be deployed to + +###### Auto generated by spf13/cobra on 4-Jan-2024 diff --git a/docs_versioned_docs/version-v0.10/6-reference/1-commands/kusion-workspace-delete.md b/docs_versioned_docs/version-v0.10/6-reference/1-commands/kusion-workspace-delete.md new file mode 100644 index 00000000..25f63165 --- /dev/null +++ b/docs_versioned_docs/version-v0.10/6-reference/1-commands/kusion-workspace-delete.md @@ -0,0 +1,37 @@ +# kusion workspace delete + +Delete a workspace + +### Synopsis + +This command deletes a specified workspace. + +``` +kusion workspace delete +``` + +### Examples + +``` + # Delete a workspace + kusion workspace delete dev +``` + +### Options + +``` + -h, --help help for delete +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion workspace](kusion-workspace.md) - Workspace is a logical concept representing a target that stacks will be deployed to + +###### Auto generated by spf13/cobra on 4-Jan-2024 diff --git a/docs_versioned_docs/version-v0.10/6-reference/1-commands/kusion-workspace-list.md b/docs_versioned_docs/version-v0.10/6-reference/1-commands/kusion-workspace-list.md new file mode 100644 index 00000000..01e377d7 --- /dev/null +++ b/docs_versioned_docs/version-v0.10/6-reference/1-commands/kusion-workspace-list.md @@ -0,0 +1,37 @@ +# kusion workspace list + +List all workspace names + +### Synopsis + +This command list the names of all workspaces. + +``` +kusion workspace list +``` + +### Examples + +``` + # List all workspace names + kusion workspace list +``` + +### Options + +``` + -h, --help help for list +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion workspace](kusion-workspace.md) - Workspace is a logical concept representing a target that stacks will be deployed to + +###### Auto generated by spf13/cobra on 4-Jan-2024 diff --git a/docs_versioned_docs/version-v0.10/6-reference/1-commands/kusion-workspace-show.md b/docs_versioned_docs/version-v0.10/6-reference/1-commands/kusion-workspace-show.md new file mode 100644 index 00000000..8a478221 --- /dev/null +++ b/docs_versioned_docs/version-v0.10/6-reference/1-commands/kusion-workspace-show.md @@ -0,0 +1,37 @@ +# kusion workspace show + +Show a workspace configuration + +### Synopsis + +This command gets a specified workspace configuration. + +``` +kusion workspace show +``` + +### Examples + +``` + # Show a workspace configuration + kusion workspace show dev +``` + +### Options + +``` + -h, --help help for show +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion workspace](kusion-workspace.md) - Workspace is a logical concept representing a target that stacks will be deployed to + +###### Auto generated by spf13/cobra on 4-Jan-2024 diff --git a/docs_versioned_docs/version-v0.10/6-reference/1-commands/kusion-workspace-update.md b/docs_versioned_docs/version-v0.10/6-reference/1-commands/kusion-workspace-update.md new file mode 100644 index 00000000..c2c38bd8 --- /dev/null +++ b/docs_versioned_docs/version-v0.10/6-reference/1-commands/kusion-workspace-update.md @@ -0,0 +1,38 @@ +# kusion workspace update + +Update a workspace configuration + +### Synopsis + +This command updates a workspace configuration with specified configuration file, where the file must be in the YAML format. + +``` +kusion workspace update +``` + +### Examples + +``` + # Update a workspace configuration + kusion workspace update dev -f dev.yaml +``` + +### Options + +``` + -f, --file string the path of workspace configuration file + -h, --help help for update +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion workspace](kusion-workspace.md) - Workspace is a logical concept representing a target that stacks will be deployed to + +###### Auto generated by spf13/cobra on 4-Jan-2024 diff --git a/docs_versioned_docs/version-v0.10/6-reference/1-commands/kusion-workspace.md b/docs_versioned_docs/version-v0.10/6-reference/1-commands/kusion-workspace.md new file mode 100644 index 00000000..1c08af7f --- /dev/null +++ b/docs_versioned_docs/version-v0.10/6-reference/1-commands/kusion-workspace.md @@ -0,0 +1,37 @@ +# kusion workspace + +Workspace is a logical concept representing a target that stacks will be deployed to + +### Synopsis + +Workspace is a logical concept representing a target that stacks will be deployed to. + + Workspace is managed by platform engineers, which contains a set of configurations that application developers do not want or should not concern, and is reused by multiple stacks belonging to different projects. + +``` +kusion workspace [flags] +``` + +### Options + +``` + -h, --help help for workspace +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion](index.md) - Kusion is the Platform Orchestrator of KusionStack +* [kusion workspace create](kusion-workspace-create.md) - Create a new workspace +* [kusion workspace delete](kusion-workspace-delete.md) - Delete a workspace +* [kusion workspace list](kusion-workspace-list.md) - List all workspace names +* [kusion workspace show](kusion-workspace-show.md) - Show a workspace configuration +* [kusion workspace update](kusion-workspace-update.md) - Update a workspace configuration + +###### Auto generated by spf13/cobra on 4-Jan-2024 diff --git a/docs_versioned_docs/version-v0.10/6-reference/2-modules/1-catalog-models/_category_.json b/docs_versioned_docs/version-v0.10/6-reference/2-modules/1-catalog-models/_category_.json new file mode 100644 index 00000000..c247ad43 --- /dev/null +++ b/docs_versioned_docs/version-v0.10/6-reference/2-modules/1-catalog-models/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Catalog Models" +} \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.10/6-reference/2-modules/1-catalog-models/app-configuration.md b/docs_versioned_docs/version-v0.10/6-reference/2-modules/1-catalog-models/app-configuration.md new file mode 100644 index 00000000..e593fa82 --- /dev/null +++ b/docs_versioned_docs/version-v0.10/6-reference/2-modules/1-catalog-models/app-configuration.md @@ -0,0 +1,56 @@ +# appconfiguration + +## Schema AppConfiguration + +AppConfiguration is a developer-centric definition that describes how to run an Application.
This application model builds upon a decade of experience at AntGroup running super large scale
internal developer platform, combined with best-of-breed ideas and practices from the community. + +### Attributes + +|Name and Description| Type |Default Value|Required| +|--------------------|--------------------------------------------------------------|-------------|--------| +|**workload**
Workload defines how to run your application code. Currently supported workload profile
includes Service and Job.| [workload.Service](workload/service#schema-service) \ | [workload.Job](workload/job#schema-job)|Undefined|**required**| +|**opsRule**
OpsRule specifies collection of rules that will be checked for Day-2 operation.| [trait.OpsRule](trait/opsrule#schema-opsrule) |Undefined|optional| +|**database**
Database describes a locally deployed or a cloud provider managed database instance for the workload.|{str: [mysql.MySQL](database/mysql#schema-mysql) \| [postgres.PostgreSQL](database/postgres#schema-postgresql)}|Undefined|optional| +|**monitoring**| [monitoring.Prometheus](monitoring/prometheus#schema-prometheus) |Undefined|optional| +|**labels**| {str: str} |Undefined|optional| +|**annotations**| {str: str} |Undefined|optional| +### Examples +```python +# Instantiate an App with a long-running service and its image is "nginx:v1" + +import catalog.models.schema.v1 as ac +import catalog.models.schema.v1.workload as wl +import catalog.models.schema.v1.workload.container as c +import catalog.models.schema.v1.accessories.database as db +import catalog.models.schema.v1.accessories.monitoring as m +import catalog.models.schema.v1.accessories.trait as t + +appConfiguration = ac.AppConfiguration { + workload: wl.Service { + containers: { + "nginx": c.Container { + image: "nginx:v1" + } + } + type: "CollaSet" + } + opsRule: t.OpsRule { + maxUnavailable: "30%" + } + database: db.Database { + type: "aws" + engine: "mysql" + version: "5.7" + instanceType: "db.t3.micro" + } + monitoring: m.Prometheus{ + interval: "30s" + timeout: "15s" + path: "/metrics" + port: "web" + scheme: "http" + } +} +``` + + diff --git a/docs_versioned_docs/version-v0.10/6-reference/2-modules/1-catalog-models/database/mysql.md b/docs_versioned_docs/version-v0.10/6-reference/2-modules/1-catalog-models/database/mysql.md new file mode 100644 index 00000000..661d7451 --- /dev/null +++ b/docs_versioned_docs/version-v0.10/6-reference/2-modules/1-catalog-models/database/mysql.md @@ -0,0 +1,36 @@ +# mysql + +## Schema MySQL + +MySQL describes the attributes to locally deploy or create a cloud provider
managed mysql database instance for the workload. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**type**
Type defines whether the mysql database is deployed locally or provided by
cloud vendor. |"local" \| "cloud"|Undefined|**required**| +|**version**
Version defines the mysql version to use. |str|Undefined|**required**| +### Examples +```python +Instantiate a local mysql database with version of 5.7. + +import catalog.models.schema.v1.accessories.mysql + +mysql: mysql.MySQL { + type: "local" + version: "5.7" +} +``` + + +### Credentials and Connectivity + +For sensitive information such as the **host**, **username** and **password** for the database instance, Kusion will automatically inject them into the application container for users through environment variables. The relevant environment variables are listed in the table below. + +| Name | Explanation | +| ---- | ----------- | +| KUSION_DB\_HOST\_`` | Host address for accessing the database instance | +| KUSION_DB\_USERNAME\_`` | Account username for accessing the database instance | +| KUSION_DB\_PASSWORD\_`` | Account password for accessing the database instance | + +The `` is composed of two parts, one of which is the `key` of database declared in `AppConfiguration` and the other is the `suffix` declared in `workspace` configuration. Kusion will concatenate the database key and suffix, convert them to uppercase, and replace `-` with `_`. And the `` supported now includes `mysql` and `postgres`. diff --git a/docs_versioned_docs/version-v0.10/6-reference/2-modules/1-catalog-models/database/postgres.md b/docs_versioned_docs/version-v0.10/6-reference/2-modules/1-catalog-models/database/postgres.md new file mode 100644 index 00000000..995ff6c7 --- /dev/null +++ b/docs_versioned_docs/version-v0.10/6-reference/2-modules/1-catalog-models/database/postgres.md @@ -0,0 +1,36 @@ +# postgres + +## Schema PostgreSQL + +PostgreSQL describes the attributes to locally deploy or create a cloud provider
managed postgresql database instance for the workload. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**type**
Type defines whether the postgresql database is deployed locally or provided by
cloud vendor. |"local" \| "cloud"|Undefined|**required**| +|**version**
Version defines the mysql version to use. |str|Undefined|**required**| +### Examples +```python +Instantiate a local postgresql database with image version of 14.0. + +import catalog.models.schema.v1.accessories.postgres + +postgres: postgres.PostgreSQL { + type: "local" + version: "14.0" +} +``` + + +### Credentials and Connectivity + +For sensitive information such as the **host**, **username** and **password** for the database instance, Kusion will automatically inject them into the application container for users through environment variables. The relevant environment variables are listed in the table below. + +| Name | Explanation | +| ---- | ----------- | +| KUSION_DB\_HOST\_`` | Host address for accessing the database instance | +| KUSION_DB\_USERNAME\_`` | Account username for accessing the database instance | +| KUSION_DB\_PASSWORD\_`` | Account password for accessing the database instance | + +The `` is composed of two parts, one of which is the `key` of database declared in `AppConfiguration` and the other is the `suffix` declared in `workspace` configuration. Kusion will concatenate the database key and suffix, convert them to uppercase, and replace `-` with `_`. And the `` supported now includes `mysql` and `postgres`. diff --git a/docs_versioned_docs/version-v0.10/6-reference/2-modules/1-catalog-models/internal/common.md b/docs_versioned_docs/version-v0.10/6-reference/2-modules/1-catalog-models/internal/common.md new file mode 100644 index 00000000..1f2df505 --- /dev/null +++ b/docs_versioned_docs/version-v0.10/6-reference/2-modules/1-catalog-models/internal/common.md @@ -0,0 +1,16 @@ +# common + +## Schema WorkloadBase + +WorkloadBase defines set of attributes shared by different workload profile, e.g Service
and Job. You can inherit this Schema to reuse these common attributes. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**containers**
Containers defines the templates of containers to be ran.
More info: https://kubernetes.io/docs/concepts/containers|{str: [container.Container](container/container.md#schema-container)}|Undefined|**required**| +|**secrets**|{str: [secret.Secret](secret/secret.md#schema-secret)}|Undefined|optional| +|**replicas**
Number of container replicas based on this configuration that should be ran.|int|2|**required**| +|**labels**
Labels are key/value pairs that are attached to the workload.|{str: str}|Undefined|optional| +|**annotations**
Annotations are key/value pairs that attach arbitrary non-identifying metadata to the workload.|{str: str}|Undefined|optional| + diff --git a/docs_versioned_docs/version-v0.10/6-reference/2-modules/1-catalog-models/internal/container/container.md b/docs_versioned_docs/version-v0.10/6-reference/2-modules/1-catalog-models/internal/container/container.md new file mode 100644 index 00000000..bb9f9676 --- /dev/null +++ b/docs_versioned_docs/version-v0.10/6-reference/2-modules/1-catalog-models/internal/container/container.md @@ -0,0 +1,61 @@ +# container + +## Schema Container + +Container describes how the Application's tasks are expected to be run. Depending on
the replicas parameter 1 or more containers can be created from each template. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**image**
Image refers to the Docker image name to run for this container.
More info: https://kubernetes.io/docs/concepts/containers/images|str|Undefined|**required**| +|**command**
Entrypoint array. Not executed within a shell.
Command will overwrite the ENTRYPOINT value set in the Dockfile, otherwise the Docker
image's ENTRYPOINT is used if this is not provided.|[str]|Undefined|optional| +|**args**
Arguments to the entrypoint.
Args will overwrite the CMD value set in the Dockfile, otherwise the Docker
image's CMD is used if this is not provided.|[str]|Undefined|optional| +|**env**
List of environment variables to set in the container.
The value of the environment variable may be static text or a value from a secret.|{str: str}|Undefined|optional| +|**workingDir**
The working directory of the running process defined in entrypoint.
Default container runtime will be used if this is not specified.|str|Undefined|optional| +|**resources**
Map of resource requirements the container should run with.
The resources parameter is a dict with the key being the resource name and the value being
the resource value.|{str: str}|Undefined|optional| +|**files**
List of files to create in the container.
The files parameter is a dict with the key being the file name in the container and the value
being the target file specification.|{str: [container.FileSpec](#schema-filespec)}|Undefined|optional| +|**dirs**
Collection of volumes mount into the container's filesystem.
The dirs parameter is a dict with the key being the folder name in the container and the value
being the referenced volume.|{str: str}|Undefined|optional| +|**livenessProbe**
LivenessProbe indicates if a running process is healthy.
Container will be restarted if the probe fails.|[p.Probe](probe/probe.md#schema-probe)|Undefined|optional| +|**readinessProbe**
ReadinessProbe indicates whether an application is available to handle requests.|[p.Probe](probe/probe.md#schema-probe)|Undefined|optional| +|**startupProbe**
StartupProbe indicates that the container has started for the first time.
Container will be restarted if the probe fails.|[p.Probe](probe/probe.md#schema-probe)|Undefined|optional| +|**lifecycle**
Lifecycle refers to actions that the management system should take in response to container lifecycle events.|[lc.Lifecycle](lifecycle/lifecycle.md#schema-lifecycle)|Undefined|optional| +### Examples +```python +import catalog.models.schema.v1.workload.container as c + +web = c.Container { + image: "nginx:latest" + command: ["/bin/sh", "-c", "echo hi"] + env: { + "name": "value" + } + resources: { + "cpu": "2" + "memory": "4Gi" + } +} +``` + +## Schema FileSpec + +FileSpec defines the target file in a Container. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**content**
File content in plain text.|str|Undefined|optional| +|**contentFrom**
Source for the file content, reference to a secret of configmap value.|str|Undefined|optional| +|**mode**
Mode bits used to set permissions on this file, must be an octal value
between 0000 and 0777 or a decimal value between 0 and 511|str|Undefined|**required**| +### Examples +```python +import catalog.models.schema.v1.workload.container as c + +tmpFile = c.FileSpec { + content: "some file contents" + mode: "0777" +} +``` + + diff --git a/docs_versioned_docs/version-v0.10/6-reference/2-modules/1-catalog-models/internal/container/lifecycle/lifecycle.md b/docs_versioned_docs/version-v0.10/6-reference/2-modules/1-catalog-models/internal/container/lifecycle/lifecycle.md new file mode 100644 index 00000000..88a769c5 --- /dev/null +++ b/docs_versioned_docs/version-v0.10/6-reference/2-modules/1-catalog-models/internal/container/lifecycle/lifecycle.md @@ -0,0 +1,28 @@ +# lifecycle + +## Schema Lifecycle + +Lifecycle describes actions that the management system should take in response
to container lifecycle events. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**preStop**
The action to be taken before a container is terminated due to an API request or
management event such as liveness/startup probe failure, preemption, resource contention, etc.
More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/\#container-hooks|[probe.Exec](../probe/probe.md#schema-exec) \| [probe.Http](../probe/probe.md#schema-http)|Undefined|optional| +|**postStart**
The action to be taken after a container is created.
More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/\#container-hooks|[probe.Exec](../probe/probe.md#schema-exec) \| [probe.Http](../probe/probe.md#schema-http)|Undefined|optional| +### Examples +```python +import catalog.models.schema.v1.workload.container.probe as p +import catalog.models.schema.v1.workload.container.lifecycle as lc + +lifecycleHook = lc.Lifecycle { + preStop: p.Exec { + command: ["preStop.sh"] + } + postStart: p.Http { + url: "http://localhost:80" + } +} +``` + + diff --git a/docs_versioned_docs/version-v0.10/6-reference/2-modules/1-catalog-models/internal/container/probe/probe.md b/docs_versioned_docs/version-v0.10/6-reference/2-modules/1-catalog-models/internal/container/probe/probe.md new file mode 100644 index 00000000..ad392f55 --- /dev/null +++ b/docs_versioned_docs/version-v0.10/6-reference/2-modules/1-catalog-models/internal/container/probe/probe.md @@ -0,0 +1,88 @@ +# probe + +## Schema Probe + +Probe describes a health check to be performed against a container to determine whether it is
alive or ready to receive traffic. There are three probe types: readiness, liveness, and startup. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**probeHandler**
The action taken to determine the alive or health of a container|[probe.Exec](#schema-exec) \| [probe.Http](#schema-http) \| [probe.Tcp](#schema-tcp)|Undefined|**required**| +|**initialDelaySeconds**
The number of seconds before health checking is activated.
More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle\#container-probes|int|Undefined|optional| +|**timeoutSeconds**
The number of seconds after which the probe times out.
More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle\#container-probes|int|Undefined|optional| +|**periodSeconds**
How often (in seconds) to perform the probe.|int|Undefined|optional| +|**successThreshold**
Minimum consecutive successes for the probe to be considered successful after having failed.|int|Undefined|optional| +|**failureThreshold**
Minimum consecutive failures for the probe to be considered failed after having succeeded.|int|Undefined|optional| +|**terminationGracePeriod**|int|Undefined|optional| +### Examples +```python +import catalog.models.schema.v1.workload.container.probe as p + +probe = p.Probe { + probeHandler: p.Http { + path: "/healthz" + } + initialDelaySeconds: 10 +} +``` + +## Schema Exec + +Exec describes a "run in container" action. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**command**
The command line to execute inside the container.|[str]|Undefined|**required**| +### Examples +```python +import catalog.models.schema.v1.workload.container.probe as p + +execProbe = p.Exec { + command: ["probe.sh"] +} +``` + +## Schema Http + +Http describes an action based on HTTP Get requests. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**url**
The full qualified url to send HTTP requests.|str|Undefined|**required**| +|**headers**
Collection of custom headers to set in the request|{str: str}|Undefined|optional| +### Examples +```python +import catalog.models.schema.v1.workload.container.probe as p + +httpProbe = p.Http { + url: "http://localhost:80" + headers: { + "X-HEADER": "VALUE" + } +} +``` + +## Schema Tcp + +Tcp describes an action based on opening a socket. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**url**
The full qualified url to open a socket.|str|Undefined|**required**| +### Examples +```python +import catalog.models.schema.v1.workload.container.probe as p + +tcpProbe = p.Tcp { + url: "tcp://localhost:1234" +} +``` + + diff --git a/docs_versioned_docs/version-v0.10/6-reference/2-modules/1-catalog-models/internal/network/port.md b/docs_versioned_docs/version-v0.10/6-reference/2-modules/1-catalog-models/internal/network/port.md new file mode 100644 index 00000000..9da16051 --- /dev/null +++ b/docs_versioned_docs/version-v0.10/6-reference/2-modules/1-catalog-models/internal/network/port.md @@ -0,0 +1,27 @@ +# port + +## Schema Port + +Port defines the exposed port of Service, which can be used to describe how the Service
get accessed. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**port**
The exposed port of the Service.|int|80|**required**| +|**targetPort**
The backend container port. If empty, set it the same as the port.|int|Undefined|optional| +|**protocol**
The protocol to access the port.|"TCP" \| "UDP"|"TCP"|**required**| +|**public**
Public defines whether the port can be accessed through Internet.|bool|False|**required**| +### Examples +```python +import catalog.models.schema.v1.workload.network as n + +port = n.Port { + port: 80 + targetPort: 8080 + protocol: "TCP" + public: True +} +``` + + diff --git a/docs_versioned_docs/version-v0.10/6-reference/2-modules/1-catalog-models/internal/secret/secret.md b/docs_versioned_docs/version-v0.10/6-reference/2-modules/1-catalog-models/internal/secret/secret.md new file mode 100644 index 00000000..79071b38 --- /dev/null +++ b/docs_versioned_docs/version-v0.10/6-reference/2-modules/1-catalog-models/internal/secret/secret.md @@ -0,0 +1,28 @@ +# secret + +## Schema Secret + +Secrets are used to provide data that is considered sensitive like passwords, API keys,
TLS certificates, tokens or other credentials. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**type**
Type of secret, used to facilitate programmatic handling of secret data.|"basic" \| "token" \| "opaque" \| "certificate" \| "external"|Undefined|**required**| +|**params**
Collection of parameters used to facilitate programmatic handling of secret data.|{str: str}|Undefined|optional| +|**data**
Data contains the non-binary secret data in string form.|{str: str}|Undefined|optional| +|**immutable**
Immutable, if set to true, ensures that data stored in the Secret cannot be updated.|bool|Undefined|optional| +### Examples +```python +import catalog.models.schema.v1.workload.secret as sec + +basicAuth = sec.Secret { + type: "basic" + data: { + "username": "" + "password": "" + } +} +``` + + diff --git a/docs_versioned_docs/version-v0.10/6-reference/2-modules/1-catalog-models/monitoring/prometheus.md b/docs_versioned_docs/version-v0.10/6-reference/2-modules/1-catalog-models/monitoring/prometheus.md new file mode 100644 index 00000000..72394e63 --- /dev/null +++ b/docs_versioned_docs/version-v0.10/6-reference/2-modules/1-catalog-models/monitoring/prometheus.md @@ -0,0 +1,23 @@ +# prometheus + +## Schema Prometheus + +Prometheus can be used to define monitoring requirements + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**path**
The path to scrape metrics from.|str|/metrics|optional| +|**port**
The port to scrape metrics from. When using Prometheus operator, this needs to be the port NAME. Otherwise, this can be a port name or a number.|str|container ports when scraping pod (monitorType is pod) and service port when scraping service (monitorType is service)|optional| +### Examples +```python +import catalog.models.schema.v1.monitoring as m + +monitoring: m.Prometheus{ + path: "/metrics" + port: "web" +} +``` + + diff --git a/docs_versioned_docs/version-v0.10/6-reference/2-modules/1-catalog-models/trait/opsrule.md b/docs_versioned_docs/version-v0.10/6-reference/2-modules/1-catalog-models/trait/opsrule.md new file mode 100644 index 00000000..42a22cde --- /dev/null +++ b/docs_versioned_docs/version-v0.10/6-reference/2-modules/1-catalog-models/trait/opsrule.md @@ -0,0 +1,21 @@ +# opsrule + +## Schema OpsRule + +OpsRule describes operation rules for various Day-2 Operations. Once declared, these
operation rules will be checked before any Day-2 operations. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**maxUnavailable**
The maximum percentage of the total pod instances in the component that can be
simultaneously unhealthy.|int \| str|"25%"|optional| +### Examples +```python +import catalog.models.schema.v1.trait as t + +opsRule : t.OpsRule { + maxUnavailable: "30%" +} +``` + + diff --git a/docs_versioned_docs/version-v0.10/6-reference/2-modules/1-catalog-models/workload/job.md b/docs_versioned_docs/version-v0.10/6-reference/2-modules/1-catalog-models/workload/job.md new file mode 100644 index 00000000..67d550c8 --- /dev/null +++ b/docs_versioned_docs/version-v0.10/6-reference/2-modules/1-catalog-models/workload/job.md @@ -0,0 +1,241 @@ +# job + +## Schemas +- [Job](#schema-job) + - [Container](#schema-container) + - [Filespec](#schema-filespec) + - [LifeCycle](#schema-lifecycle) + - [Probe](#schema-probe) + - [Exec](#schema-exec) + - [Http](#schema-http) + - [Tcp](#schema-tcp) + - [Secret](#schema-secret) + +## Schema Job + +Job is a kind of workload profile that describes how to run your application code. This
is typically used for tasks that take from a few seconds to a few days to complete. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**containers**
Containers defines the templates of containers to be ran.
More info: https://kubernetes.io/docs/concepts/containers|{str: [container.Container](#schema-container)}|Undefined|**required**| +|**schedule**|str|Undefined|**required**| +|**replicas**
Number of container replicas based on this configuration that should be ran.|int|2|**required**| +|**secrets**|{str: [secret.Secret](#schema-secret)}|Undefined|optional| +|**labels**
Labels are key/value pairs that are attached to the workload.|{str: str}|Undefined|optional| +|**annotations**
Annotations are key/value pairs that attach arbitrary non-identifying metadata to the workload.|{str: str}|Undefined|optional| +### Examples +```python +Instantiate a job with busybox image and runs every hour + +import catalog.models.schema.v1.workload as wl +import catalog.models.schema.v1.workload.container as c + +job: wl.Job { + containers: { + "busybox": c.Container{ + image: "busybox:1.28" + command: ["/bin/sh", "-c", "echo hello"] + } + } + schedule: "0 * * * *" +} +``` + +### Base Schema +[WorkloadBase](../internal/common#schema-workloadbase) + +## Schema Container + +Container describes how the Application's tasks are expected to be run. Depending on
the replicas parameter 1 or more containers can be created from each template. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**image**
Image refers to the Docker image name to run for this container.
More info: https://kubernetes.io/docs/concepts/containers/images|str|Undefined|**required**| +|**command**
Entrypoint array. Not executed within a shell.
Command will overwrite the ENTRYPOINT value set in the Dockfile, otherwise the Docker
image's ENTRYPOINT is used if this is not provided.|[str]|Undefined|optional| +|**args**
Arguments to the entrypoint.
Args will overwrite the CMD value set in the Dockfile, otherwise the Docker
image's CMD is used if this is not provided.|[str]|Undefined|optional| +|**env**
List of environment variables to set in the container.
The value of the environment variable may be static text or a value from a secret.|{str: str}|Undefined|optional| +|**workingDir**
The working directory of the running process defined in entrypoint.
Default container runtime will be used if this is not specified.|str|Undefined|optional| +|**resources**
Map of resource requirements the container should run with.
The resources parameter is a dict with the key being the resource name and the value being
the resource value.|{str: str}|Undefined|optional| +|**files**
List of files to create in the container.
The files parameter is a dict with the key being the file name in the container and the value
being the target file specification.|{str: [container.FileSpec](#schema-filespec)}|Undefined|optional| +|**dirs**
Collection of volumes mount into the container's filesystem.
The dirs parameter is a dict with the key being the folder name in the container and the value
being the referenced volume.|{str: str}|Undefined|optional| +|**livenessProbe**
LivenessProbe indicates if a running process is healthy.
Container will be restarted if the probe fails.|[p.Probe](#schema-probe)|Undefined|optional| +|**readinessProbe**
ReadinessProbe indicates whether an application is available to handle requests.|[p.Probe](#schema-probe)|Undefined|optional| +|**startupProbe**
StartupProbe indicates that the container has started for the first time.
Container will be restarted if the probe fails.|[p.Probe](#schema-probe)|Undefined|optional| +|**lifecycle**
Lifecycle refers to actions that the management system should take in response to container lifecycle events.|[lc.Lifecycle](#schema-lifecycle)|Undefined|optional| +### Examples +```python +import catalog.models.schema.v1.workload.container as c + +web = c.Container { + image: "nginx:latest" + command: ["/bin/sh", "-c", "echo hi"] + env: { + "name": "value" + } + resources: { + "cpu": "2" + "memory": "4Gi" + } +} +``` + +## Schema FileSpec + +FileSpec defines the target file in a Container. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**content**
File content in plain text.|str|Undefined|optional| +|**contentFrom**
Source for the file content, reference to a secret of configmap value.|str|Undefined|optional| +|**mode**
Mode bits used to set permissions on this file, must be an octal value
between 0000 and 0777 or a decimal value between 0 and 511|str|Undefined|**required**| +### Examples +```python +import catalog.models.schema.v1.workload.container as c + +tmpFile = c.FileSpec { + content: "some file contents" + mode: "0777" +} +``` + +## Schema Probe + +Probe describes a health check to be performed against a container to determine whether it is
alive or ready to receive traffic. There are three probe types: readiness, liveness, and startup. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**probeHandler**
The action taken to determine the alive or health of a container|[probe.Exec](#schema-exec) \| [probe.Http](#schema-http) \| [probe.Tcp](#schema-tcp)|Undefined|**required**| +|**initialDelaySeconds**
The number of seconds before health checking is activated.
More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle\#container-probes|int|Undefined|optional| +|**timeoutSeconds**
The number of seconds after which the probe times out.
More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle\#container-probes|int|Undefined|optional| +|**periodSeconds**
How often (in seconds) to perform the probe.|int|Undefined|optional| +|**successThreshold**
Minimum consecutive successes for the probe to be considered successful after having failed.|int|Undefined|optional| +|**failureThreshold**
Minimum consecutive failures for the probe to be considered failed after having succeeded.|int|Undefined|optional| +|**terminationGracePeriod**|int|Undefined|optional| +### Examples +```python +import catalog.models.schema.v1.workload.container.probe as p + +probe = p.Probe { + probeHandler: p.Http { + path: "/healthz" + } + initialDelaySeconds: 10 +} +``` + +## Schema Exec + +Exec describes a "run in container" action. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**command**
The command line to execute inside the container.|[str]|Undefined|**required**| +### Examples +```python +import catalog.models.schema.v1.workload.container.probe as p + +execProbe = p.Exec { + command: ["probe.sh"] +} +``` + +## Schema Http + +Http describes an action based on HTTP Get requests. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**url**
The full qualified url to send HTTP requests.|str|Undefined|**required**| +|**headers**
Collection of custom headers to set in the request|{str: str}|Undefined|optional| +### Examples +```python +import catalog.models.schema.v1.workload.container.probe as p + +httpProbe = p.Http { + url: "http://localhost:80" + headers: { + "X-HEADER": "VALUE" + } +} +``` + +## Schema Tcp + +Tcp describes an action based on opening a socket. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**url**
The full qualified url to open a socket.|str|Undefined|**required**| +### Examples +```python +import catalog.models.schema.v1.workload.container.probe as p + +tcpProbe = p.Tcp { + url: "tcp://localhost:1234" +} +``` + +## Schema Lifecycle + +Lifecycle describes actions that the management system should take in response
to container lifecycle events. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**preStop**
The action to be taken before a container is terminated due to an API request or
management event such as liveness/startup probe failure, preemption, resource contention, etc.
More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/\#container-hooks|[probe.Exec](#schema-exec) \| [probe.Http](#schema-http)|Undefined|optional| +|**postStart**
The action to be taken after a container is created.
More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/\#container-hooks|[probe.Exec](#schema-exec) \| [probe.Http](#schema-http)|Undefined|optional| +### Examples +```python +import catalog.models.schema.v1.workload.container.probe as p +import catalog.models.schema.v1.workload.container.lifecycle as lc + +lifecycleHook = lc.Lifecycle { + preStop: p.Exec { + command: ["preStop.sh"] + } + postStart: p.Http { + url: "http://localhost:80" + } +} +``` + +## Schema Secret + +Secret can be used to store sensitive data. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**type**
Type of secret, used to facilitate programmatic handling of secret data.
More info: https://kubernetes.io/docs/concepts/configuration/secret/\#secret-types|"basic" \| "opaque"|opaque|**required**| +|**data**
Data contains the non-binary secret data in string form.|{str: str}|Undefined|optional| +|**immutable**
Immutable, if set to true, ensures that data stored in the Secret cannot be updated.|bool|Undefined|optional| +### Examples +```python +import catalog.models.schema.v1.workload.secret as sec + +basicAuth = sec.Secret { + type: "basic" + data: { + "username": "" + "password": "" + } +} +``` + + diff --git a/docs_versioned_docs/version-v0.10/6-reference/2-modules/1-catalog-models/workload/service.md b/docs_versioned_docs/version-v0.10/6-reference/2-modules/1-catalog-models/workload/service.md new file mode 100644 index 00000000..dc19543e --- /dev/null +++ b/docs_versioned_docs/version-v0.10/6-reference/2-modules/1-catalog-models/workload/service.md @@ -0,0 +1,274 @@ +# service + +## Schemas +- [Service](#schema-service) + - [Container](#schema-container) + - [Filespec](#schema-filespec) + - [LifeCycle](#schema-lifecycle) + - [Probe](#schema-probe) + - [Exec](#schema-exec) + - [Http](#schema-http) + - [Tcp](#schema-tcp) + - [Secret](#schema-secret) + - [Port](#schema-port) + +## Schema Service + +Service is a kind of workload profile that describes how to run your application code. This
is typically used for long-running web applications that should "never" go down, and handle
short-lived latency-sensitive web requests, or events. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**containers**
Containers defines the templates of containers to be ran.
More info: https://kubernetes.io/docs/concepts/containers|{str: [container.Container](#schema-container)}|Undefined|**required**| +|**replicas**
Number of container replicas based on this configuration that should be ran.|int|2|**required**| +|**ports**
The list of ports of the Service should get exposed.|[[network.Port](#schema-port)]|Undefined|optional| +|**secrets**|{str: [secret.Secret](#schema-secret)}|Undefined|optional| +|**labels**
Labels are key/value pairs that are attached to the workload.|{str: str}|Undefined|optional| +|**annotations**
Annotations are key/value pairs that attach arbitrary non-identifying metadata to the workload.|{str: str}|Undefined|optional| +|**type**
Type represents the type of workload used by this Service. Currently, it supports several
types, including Deployment and CollaSet.|"Deployment" \| "CollaSet"|Deployment|optional| +### Examples +```python +# Instantiate a long-running service and its image is "nginx:v1" + +import catalog.models.schema.v1.workload as wl +import catalog.models.schema.v1.workload.container as c + +svc = wl.Service { + containers: { + "nginx": c.Container { + image: "nginx:v1" + } + } + ports: [ + n.Port { + port: 80 + public: True + } + n.Port { + port: 9090 + } + ] +} +``` + +### Base Schema +[WorkloadBase](../internal/common#schema-workloadbase) + +## Schema Container + +Container describes how the Application's tasks are expected to be run. Depending on
the replicas parameter 1 or more containers can be created from each template. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**image**
Image refers to the Docker image name to run for this container.
More info: https://kubernetes.io/docs/concepts/containers/images|str|Undefined|**required**| +|**command**
Entrypoint array. Not executed within a shell.
Command will overwrite the ENTRYPOINT value set in the Dockfile, otherwise the Docker
image's ENTRYPOINT is used if this is not provided.|[str]|Undefined|optional| +|**args**
Arguments to the entrypoint.
Args will overwrite the CMD value set in the Dockfile, otherwise the Docker
image's CMD is used if this is not provided.|[str]|Undefined|optional| +|**env**
List of environment variables to set in the container.
The value of the environment variable may be static text or a value from a secret.|{str: str}|Undefined|optional| +|**workingDir**
The working directory of the running process defined in entrypoint.
Default container runtime will be used if this is not specified.|str|Undefined|optional| +|**resources**
Map of resource requirements the container should run with.
The resources parameter is a dict with the key being the resource name and the value being
the resource value.|{str: str}|Undefined|optional| +|**files**
List of files to create in the container.
The files parameter is a dict with the key being the file name in the container and the value
being the target file specification.|{str: [container.FileSpec](#schema-filespec)}|Undefined|optional| +|**dirs**
Collection of volumes mount into the container's filesystem.
The dirs parameter is a dict with the key being the folder name in the container and the value
being the referenced volume.|{str: str}|Undefined|optional| +|**livenessProbe**
LivenessProbe indicates if a running process is healthy.
Container will be restarted if the probe fails.|[p.Probe](#schema-probe)|Undefined|optional| +|**readinessProbe**
ReadinessProbe indicates whether an application is available to handle requests.|[p.Probe](#schema-probe)|Undefined|optional| +|**startupProbe**
StartupProbe indicates that the container has started for the first time.
Container will be restarted if the probe fails.|[p.Probe](#schema-probe)|Undefined|optional| +|**lifecycle**
Lifecycle refers to actions that the management system should take in response to container lifecycle events.|[lc.Lifecycle](#schema-lifecycle)|Undefined|optional| +### Examples +```python +import catalog.models.schema.v1.workload.container as c + +web = c.Container { + image: "nginx:latest" + command: ["/bin/sh", "-c", "echo hi"] + env: { + "name": "value" + } + resources: { + "cpu": "2" + "memory": "4Gi" + } +} +``` + +## Schema FileSpec + +FileSpec defines the target file in a Container. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**mode**
Mode bits used to set permissions on this file, must be an octal value
between 0000 and 0777 or a decimal value between 0 and 511|str|Undefined|**required**| +|**content**
File content in plain text.|str|Undefined|optional| +|**contentFrom**
Source for the file content, reference to a secret of configmap value.|str|Undefined|optional| +### Examples +```python +import catalog.models.schema.v1.workload.container as c + +tmpFile = c.FileSpec { + content: "some file contents" + mode: "0777" +} +``` + +## Schema Probe + +Probe describes a health check to be performed against a container to determine whether it is
alive or ready to receive traffic. There are three probe types: readiness, liveness, and startup. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**probeHandler**
The action taken to determine the alive or health of a container|[probe.Exec](#schema-exec) \| [probe.Http](#schema-http) \| [probe.Tcp](#schema-tcp)|Undefined|**required**| +|**initialDelaySeconds**
The number of seconds before health checking is activated.
More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle\#container-probes|int|Undefined|optional| +|**timeoutSeconds**
The number of seconds after which the probe times out.
More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle\#container-probes|int|Undefined|optional| +|**periodSeconds**
How often (in seconds) to perform the probe.|int|Undefined|optional| +|**successThreshold**
Minimum consecutive successes for the probe to be considered successful after having failed.|int|Undefined|optional| +|**failureThreshold**
Minimum consecutive failures for the probe to be considered failed after having succeeded.|int|Undefined|optional| +|**terminationGracePeriod**|int|Undefined|optional| +### Examples +```python +import catalog.models.schema.v1.workload.container.probe as p + +probe = p.Probe { + probeHandler: p.Http { + path: "/healthz" + } + initialDelaySeconds: 10 +} +``` + +## Schema Exec + +Exec describes a "run in container" action. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**command**
The command line to execute inside the container.|[str]|Undefined|**required**| +### Examples +```python +import catalog.models.schema.v1.workload.container.probe as p + +execProbe = p.Exec { + command: ["probe.sh"] +} +``` + +## Schema Http + +Http describes an action based on HTTP Get requests. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**url**
The full qualified url to send HTTP requests.|str|Undefined|**required**| +|**headers**
Collection of custom headers to set in the request|{str: str}|Undefined|optional| +### Examples +```python +import catalog.models.schema.v1.workload.container.probe as p + +httpProbe = p.Http { + url: "http://localhost:80" + headers: { + "X-HEADER": "VALUE" + } +} +``` + +## Schema Tcp + +Tcp describes an action based on opening a socket. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**url**
The full qualified url to open a socket.|str|Undefined|**required**| +### Examples +```python +import catalog.models.schema.v1.workload.container.probe as p + +tcpProbe = p.Tcp { + url: "tcp://localhost:1234" +} +``` + +## Schema Lifecycle + +Lifecycle describes actions that the management system should take in response
to container lifecycle events. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**preStop**
The action to be taken before a container is terminated due to an API request or
management event such as liveness/startup probe failure, preemption, resource contention, etc.
More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/\#container-hooks|[probe.Exec](#schema-exec) \| [probe.Http](#schema-http)|Undefined|optional| +|**postStart**
The action to be taken after a container is created.
More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/\#container-hooks|[probe.Exec](#schema-exec) \| [probe.Http](#schema-http)|Undefined|optional| +### Examples +```python +import catalog.models.schema.v1.workload.container.probe as p +import catalog.models.schema.v1.workload.container.lifecycle as lc + +lifecycleHook = lc.Lifecycle { + preStop: p.Exec { + command: ["preStop.sh"] + } + postStart: p.Http { + url: "http://localhost:80" + } +} +``` + +## Schema Secret + +Secret can be used to store sensitive data. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**type**
Type of secret, used to facilitate programmatic handling of secret data.
More info: https://kubernetes.io/docs/concepts/configuration/secret/\#secret-types|"basic" \| "opaque"|opaque|**required**| +|**data**
Data contains the non-binary secret data in string form.|{str: str}|Undefined|optional| +|**immutable**
Immutable, if set to true, ensures that data stored in the Secret cannot be updated.|bool|Undefined|optional| +### Examples +```python +import catalog.models.schema.v1.workload.secret as sec + +basicAuth = sec.Secret { + type: "basic" + data: { + "username": "" + "password": "" + } +} +``` + +## Schema Port + +Port defines the exposed port of Service, which can be used to describe how the Service
get accessed. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**port**
The exposed port of the Service.|int|80|**required**| +|**protocol**
The protocol to access the port.|"TCP" \| "UDP"|"TCP"|optional| +|**public**
Public defines whether the port can be accessed through Internet.|bool|False|optional| +|**targetPort**
The backend container port. If empty, set it the same as the port.|int|Undefined|optional| +### Examples +```python +import catalog.models.schema.v1.workload.network as n + +port = n.Port { + port: 80 + targetPort: 8080 + protocol: "TCP" + public: True +} +``` + + diff --git a/docs_versioned_docs/version-v0.10/6-reference/2-modules/2-workspace-configs/_category_.json b/docs_versioned_docs/version-v0.10/6-reference/2-modules/2-workspace-configs/_category_.json new file mode 100644 index 00000000..81444988 --- /dev/null +++ b/docs_versioned_docs/version-v0.10/6-reference/2-modules/2-workspace-configs/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Workspace Configs" +} \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.10/6-reference/2-modules/2-workspace-configs/database/mysql.md b/docs_versioned_docs/version-v0.10/6-reference/2-modules/2-workspace-configs/database/mysql.md new file mode 100644 index 00000000..bb79f2fb --- /dev/null +++ b/docs_versioned_docs/version-v0.10/6-reference/2-modules/2-workspace-configs/database/mysql.md @@ -0,0 +1,66 @@ +# mysql + +## Module MySQL + +MySQL describes the attributes to locally deploy or create a cloud provider managed mysql database instance for the workload. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**cloud**
Cloud specifies the type of the cloud vendor. |"aws" \| "alicloud"|Undefined|**required**| +|**username**
Username specifies the operation account for the mysql database. |str|"root"|optional| +|**category**
Category specifies the edition of the mysql instance provided by the cloud vendor. |str|"Basic"|optional| +|**securityIPs**
SecurityIPs specifies the list of IP addresses allowed to access the mysql instance provided by the cloud vendor. |[str]|["0.0.0.0/0"]|optional| +|**privateRouting**
PrivateRouting specifies whether the host address of the cloud mysql instance for the workload to connect with is via public network or private network of the cloud vendor. |bool|true|optional| +|**size**
Size specifies the allocated storage size of the mysql instance. |int|10|optional| +|**subnetID**
SubnetID specifies the virtual subnet ID associated with the VPC that the cloud mysql instance will be created in. |str|Undefined|optional| +|**suffix**
Suffix specifies the suffix of the database name. |str|Undefined|optional| + +### Examples + +```yaml +runtimes: + terraform: + random: + version: 3.5.1 + source: hashicorp/random + aws: + version: 5.0.1 + source: hashicorp/aws + region: us-east-1 + +# MySQL workspace configs for AWS RDS +modules: + mysql: + default: + cloud: aws + size: 20 + instanceType: db.t3.micro + privateRouting: false + suffix: "-mysql" +``` + +```yaml +runtimes: + terraform: + random: + version: 3.5.1 + source: hashicorp/random + alicloud: + version: 1.209.1 + source: aliyun/alicloud + region: cn-beijing + +# MySQL workspace configs for Alicloud RDS +modules: + mysql: + default: + cloud: alicloud + size: 20 + instanceType: mysql.n2.serverless.1c + category: serverless_basic + privateRouting: false + subnetID: [your-subnet-id] + suffix: "-mysql" +``` \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.10/6-reference/2-modules/2-workspace-configs/database/postgres.md b/docs_versioned_docs/version-v0.10/6-reference/2-modules/2-workspace-configs/database/postgres.md new file mode 100644 index 00000000..ad4118e8 --- /dev/null +++ b/docs_versioned_docs/version-v0.10/6-reference/2-modules/2-workspace-configs/database/postgres.md @@ -0,0 +1,69 @@ +# postgres + +## Module PostgreSQL + +PostgreSQL describes the attributes to locally deploy or create a cloud provider managed postgres database instance for the workload. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**cloud**
Cloud specifies the type of the cloud vendor. |"aws" \| "alicloud"|Undefined|**required**| +|**username**
Username specifies the operation account for the postgres database. |str|"root"|optional| +|**category**
Category specifies the edition of the postgres instance provided by the cloud vendor. |str|"Basic"|optional| +|**securityIPs**
SecurityIPs specifies the list of IP addresses allowed to access the postgres instance provided by the cloud vendor. |[str]|["0.0.0.0/0"]|optional| +|**privateRouting**
PrivateRouting specifies whether the host address of the cloud postgres instance for the workload to connect with is via public network or private network of the cloud vendor. |bool|true|optional| +|**size**
Size specifies the allocated storage size of the postgres instance. |int|10|optional| +|**subnetID**
SubnetID specifies the virtual subnet ID associated with the VPC that the cloud postgres instance will be created in. |str|Undefined|optional| +|**suffix**
Suffix specifies the suffix of the database name. |str|Undefined|optional| + +### Examples + +```yaml +runtimes: + terraform: + random: + version: 3.5.1 + source: hashicorp/random + aws: + version: 5.0.1 + source: hashicorp/aws + region: us-east-1 + +# PostgreSQL workspace configs for AWS RDS +modules: + postgres: + default: + cloud: aws + size: 20 + instanceType: db.t3.micro + securityIPs: + - 0.0.0.0/0 + suffix: "-postgres" +``` + +```yaml +runtimes: + terraform: + random: + version: 3.5.1 + source: hashicorp/random + alicloud: + version: 1.209.1 + source: aliyun/alicloud + region: cn-beijing + +# PostgreSQL workspace configs for Alicloud RDS +modules: + postgres: + default: + cloud: alicloud + size: 20 + instanceType: pg.n2.serverless.1c + category: serverless_basic + privateRouting: false + subnetID: [your-subnet-id] + securityIPs: + - 0.0.0.0/0 + suffix: "-postgres" +``` \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.10/6-reference/2-modules/2-workspace-configs/monitoring/prometheus.md b/docs_versioned_docs/version-v0.10/6-reference/2-modules/2-workspace-configs/monitoring/prometheus.md new file mode 100644 index 00000000..f9c1754b --- /dev/null +++ b/docs_versioned_docs/version-v0.10/6-reference/2-modules/2-workspace-configs/monitoring/prometheus.md @@ -0,0 +1,40 @@ +# monitoring + +`monitoring` can be used to define workspace-level monitoring configurations. + +## Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**operatorMode**
Whether the Prometheus instance installed in the cluster runs as a Kubernetes operator or not. This determines the different kinds of resources Kusion manages.|true \| false|false|optional| +|**monitorType**
The kind of monitor to create. It only applies when operatorMode is set to True.|"Service" \| "Pod"|"Service"|optional| +|**interval**
The time interval which Prometheus scrapes metrics data. Only applicable when operator mode is set to true.
When operator mode is set to false, the scraping interval can only be set in the scraping job configuration, which kusion does not have permission to manage directly.|str|30s|optional| +|**timeout**
The timeout when Prometheus scrapes metrics data. Only applicable when operator mode is set to true.
When operator mode is set to false, the scraping timeout can only be set in the scraping job configuration, which kusion does not have permission to manage directly.|str|15s|optional| +|**scheme**
The scheme to scrape metrics from. Possible values are http and https.|"http" \| "https"|http|optional| + +### Examples +```yaml +modules: + monitoring: + default: + operatorMode: True + monitorType: Pod + scheme: http + interval: 30s + timeout: 15s + low_frequency: + operatorMode: False + interval: 2m + timeout: 1m + projectSelector: + - foo + - bar + high_frequency: + monitorType: Service + interval: 10s + timeout: 5s + projectSelector: + - helloworld + - wordpress + - prometheus-sample-app +``` \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.10/6-reference/2-modules/2-workspace-configs/networking/port.md b/docs_versioned_docs/version-v0.10/6-reference/2-modules/2-workspace-configs/networking/port.md new file mode 100644 index 00000000..fe84db36 --- /dev/null +++ b/docs_versioned_docs/version-v0.10/6-reference/2-modules/2-workspace-configs/networking/port.md @@ -0,0 +1,24 @@ +# port + +`port` can be used to define workspace-level networking configurations. + +## Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**type**
The specific cloud vendor that provides load balancer.| "alicloud" \| "aws"|Undefined|**required**| +| **labels**
The attached labels of the port.|{str:str}|Undefined|optional| +| **annotations**
The attached annotations of the port.|{str:str}|Undefined|optional| + +### Examples + +```yaml +modules: + port: + default: + type: alicloud + labels: + kusionstack.io/control: "true" + annotations: + service.beta.kubernetes.io/alibaba-cloud-loadbalancer-spec: slb.s1.small +``` \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.10/6-reference/2-modules/2-workspace-configs/trait/opsrule.md b/docs_versioned_docs/version-v0.10/6-reference/2-modules/2-workspace-configs/trait/opsrule.md new file mode 100644 index 00000000..d42c6244 --- /dev/null +++ b/docs_versioned_docs/version-v0.10/6-reference/2-modules/2-workspace-configs/trait/opsrule.md @@ -0,0 +1,19 @@ +# opsrule + +`opsrule` can be used to define workspace-level operational rule configurations. + +## Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**maxUnavailable**
The maximum percentage of the total pod instances in the component that can be
simultaneously unhealthy.|int \| str|Undefined|optional| + + +### Examples + +```yaml +modules: + opsRule: + default: + maxUnavailable: "40%" +``` \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.10/6-reference/2-modules/2-workspace-configs/workload/job.md b/docs_versioned_docs/version-v0.10/6-reference/2-modules/2-workspace-configs/workload/job.md new file mode 100644 index 00000000..8004cb83 --- /dev/null +++ b/docs_versioned_docs/version-v0.10/6-reference/2-modules/2-workspace-configs/workload/job.md @@ -0,0 +1,23 @@ +# job + +`job` can be used to define workspace-level job configuration. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +| **replicas**
Number of container replicas based on this configuration that should be ran. |int|2| optional | +| **labels**
Labels are key/value pairs that are attached to the workload. |{str: str}|Undefined| optional | +| **annotations**
Annotations are key/value pairs that attach arbitrary non-identifying metadata to the workload. |{str: str}|Undefined| optional | + +### Examples +```yaml +modules: + service: + default: + replicas: 3 + labels: + label-key: label-value + annotations: + annotation-key: annotation-value +``` \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.10/6-reference/2-modules/2-workspace-configs/workload/service.md b/docs_versioned_docs/version-v0.10/6-reference/2-modules/2-workspace-configs/workload/service.md new file mode 100644 index 00000000..fbefe8b8 --- /dev/null +++ b/docs_versioned_docs/version-v0.10/6-reference/2-modules/2-workspace-configs/workload/service.md @@ -0,0 +1,25 @@ +# service + +`service` can be used to define workspace-level service configuration. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +| **replicas**
Number of container replicas based on this configuration that should be ran. |int|2| optional | +| **labels**
Labels are key/value pairs that are attached to the workload. |{str: str}|Undefined| optional | +| **annotations**
Annotations are key/value pairs that attach arbitrary non-identifying metadata to the workload. |{str: str}|Undefined| optional | +| **type**
Type represents the type of workload used by this Service. Currently, it supports several
types, including Deployment and CollaSet. |"Deployment" \| "CollaSet"| Deployment |**required**| + +### Examples +```yaml +modules: + service: + default: + replicas: 3 + labels: + label-key: label-value + annotations: + annotation-key: annotation-value + type: CollaSet +``` \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.10/6-reference/2-modules/3-naming-conventions.md b/docs_versioned_docs/version-v0.10/6-reference/2-modules/3-naming-conventions.md new file mode 100644 index 00000000..ab7f668c --- /dev/null +++ b/docs_versioned_docs/version-v0.10/6-reference/2-modules/3-naming-conventions.md @@ -0,0 +1,34 @@ +--- +id: naming-conventions +sidebar_label: Resource Naming Conventions +--- + +# Resource Naming Conventions + +Kusion will automatically create Kubernetes or Terraform resources for the applications, many of which do not require users' awareness. This document will introduce the naming conventions for these related resources. + +## Kubernetes Resources + +Kusion adheres to specific rules when generating the Kubernetes resources for users' applications. The table below lists some common Kubernetes resource naming conventions. Note that `Namespace` can now be specified by users. + +| Resource | Concatenation Rule | Example ID | +| -------- | ------------------ | ---------- | +| Namespace | `` | v1:Namespace:wordpress-local-db | +| Deployment | ``-``-`` | apps/v1:Deployment:wordpress-local-db:wordpress-local-db-dev-wordpress | +| CronJob | ``-``-`` | batch/v1:CronJob:helloworld:helloworld-dev-helloworld | +| Service | ``-``-``-` or ` | v1:Service:helloworld:helloworld-dev-helloworld-public | + +## Terraform Resources + +Similarly, Kusion also adheres to specific naming conventions when generating the Terraform Resources. Some common resources are listed below. + +| Resource | Concatenation Rule | Example ID | +| -------- | ------------------ | ---------- | +| random_password | ``-`` | hashicorp:random:random_password:wordpress-db-mysql | +| aws_security_group | ``-`` | hashicorp:aws:aws_security_group:wordpress-db-mysql | +| aws_db_instance | `` | hashicorp:aws:aws_db_instance:wordpress-db | +| alicloud_db_instance | `` | aliyun:alicloud:alicloud_db_instance:wordpress-db | +| alicloud_db_connection | `` | aliyun:alicloud:alicloud_db_connection:wordpress | +| alicloud_rds_account | `` | aliyun:alicloud:alicloud_rds_account:wordpress | + +The `` is composed of two parts, one of which is the `key` of database declared in `AppConfiguration` and the other is the `suffix` declared in `workspace` configuration. Kusion will concatenate the database key and suffix, convert them to uppercase, and replace `-` with `_`. And the `` supported now includes `mysql` and `postgres`. diff --git a/docs_versioned_docs/version-v0.10/6-reference/2-modules/_category_.json b/docs_versioned_docs/version-v0.10/6-reference/2-modules/_category_.json new file mode 100644 index 00000000..4dadaa75 --- /dev/null +++ b/docs_versioned_docs/version-v0.10/6-reference/2-modules/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Kusion Modules" +} diff --git a/docs_versioned_docs/version-v0.10/6-reference/2-modules/index.md b/docs_versioned_docs/version-v0.10/6-reference/2-modules/index.md new file mode 100644 index 00000000..cb0d24a9 --- /dev/null +++ b/docs_versioned_docs/version-v0.10/6-reference/2-modules/index.md @@ -0,0 +1,45 @@ +# Kusion Modules + +KusionStack presets application configuration models described by KCL, where the model is called **Kusion Model**. The GitHub repository [KusionStack/catalog](https://github.com/KusionStack/catalog) is used to store these models, which is known as **Kusion Model Library**. + +The original intention of designing Kusion Model is to enhance the efficiency and improve the experience of YAML users. Through the unified application model defined by code, abstract and encapsulate complex configuration items, omit repetitive and derivable configurations, and supplement with necessary verification logic. Only the necessary attributes get exposed, users get an out-of-the-box, easy-to-understand configuration interface, which reduces the difficulty and improves the reliability of the configuration work. + +Kusion Model Library currently provides the Kusion Model `AppConfiguration`. The design of `AppConfiguration` is developer-centric, based on Ant Group's decades of practice in building and managing hyperscale IDP (Internal Developer Platform), and the best practice of community. `AppConfiguration` describes the full lifecycle of an application. + +A simple example of using `AppConfiguration` to describe an application is as follows: + +```bash +wordpress: ac.AppConfiguration { + workload: wl.Service { + containers: { + "wordpress": c.Container { + image: "wordpress:latest" + env: { + "WORDPRESS_DB_HOST": "secret://wordpress-db/hostAddress" + "WORDPRESS_DB_PASSWORD": "secret://wordpress-db/password" + } + resources: { + "cpu": "1" + "memory": "2Gi" + } + } + } + replicas: 2 + ports: [ + n.Port { + port: 80 + public: True + } + ] + } + + database: db.Database { + type: "alicloud" + engine: "MySQL" + version: "5.7" + size: 20 + instanceType: "mysql.n2.serverless.1c" + category: "serverless_basic" + } +} +``` \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.10/6-reference/3-roadmap.md b/docs_versioned_docs/version-v0.10/6-reference/3-roadmap.md new file mode 100644 index 00000000..7a1e4565 --- /dev/null +++ b/docs_versioned_docs/version-v0.10/6-reference/3-roadmap.md @@ -0,0 +1,15 @@ +# Roadmap + +For a finer-grained view into our roadmap and what is being worked on for a release, please refer to the [GitHub Issue Tracker](https://github.com/KusionStack/kusion/issues) + +## Resource Ecosystem +We plan to expand the range of resource types that our platform can handle. This includes not only traditional cloud IaaS resources, but also popular cloud-native products such as Prometheus, istio and Argo. By supporting a wider variety of resources, we aim to address the heterogeneous needs of modern applications and allow users to harness the full power of the cloud-native technologies. + +## App Progressive Rollout +One of the key challenges in delivering applications at scale is to balance the need for speed with the need for reliability. To help our users achieve this balance, we will introduce progressive rollout strategies, such as canary release, rolling release, and percentage release. These techniques enable users to test new features or versions on a small subset of their users or infrastructure before rolling them out to the entire system. By doing so, users can minimize the risk of downtime or errors caused by untested changes. + +## Custom Pipelines +Thie current workflow of KusionStack is `write`,`preview` and `apply`, but to handle more complex deployments we need to empower users to customize the deployment pipelines to fit their specific workflows and requirements. This includes the ability to define custom stages, add or remove steps, and integrate with third-party tools. With customizable pipelines, users can streamline their deployment process, automate repetitive tasks, and satisfy their own needs by themselves. + +## Runtime Plugin +We have already supported IaaS cloud resources and Kubernetes resources, but we need a more flexible mechanism to support a broader range of on-premise infrastructure. By supporting a diverse set of infrastructures, we can help users avoid vendor lock-in, optimize their resource usage, and scale their applications across different regions and geographies. diff --git a/docs_versioned_docs/version-v0.10/6-reference/_category_.json b/docs_versioned_docs/version-v0.10/6-reference/_category_.json new file mode 100644 index 00000000..a3b4dd92 --- /dev/null +++ b/docs_versioned_docs/version-v0.10/6-reference/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Reference" +} diff --git a/docs_versioned_docs/version-v0.10/7-faq/1-install-error.md b/docs_versioned_docs/version-v0.10/7-faq/1-install-error.md new file mode 100644 index 00000000..a0fde76a --- /dev/null +++ b/docs_versioned_docs/version-v0.10/7-faq/1-install-error.md @@ -0,0 +1,39 @@ +--- +sidebar_position: 1 +--- + +# Installation + +## 1. Could not find `libintl.dylib` + +This problem is that some tools depends on the `Gettext` library, but macOS does not have this library by default. You can try to solve it in the following ways: + +1. (Skip this step for non-macOS m1) For macOS m1 operating system, make sure you have a homebrew arm64e-version installed in /opt/homebrew, otherwise install the arm version of brew with the following command + +``` +/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" +# add to path +export PATH=/opt/homebrew/bin:$PATH +``` + +2. `brew install gettext` +3. Make sure `libintl.8.dylib` exists in `/usr/local/opt/gettext/lib` directory +4. If brew is installed in another directory, the library can be created by copying it to the corresponding directory + +## 2. macOS system SSL related errors + +Openssl dylib library not found or SSL module is not available problem + +1. (Skip this step for non-macOS m1) For macOS m1 operating system, make sure you have a homebrew arm64e-version installed in /opt/homebrew, otherwise install the arm version of brew with the following command + +``` +/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" +# add to path +export PATH=/opt/homebrew/bin:$PATH +``` + +2. Install openssl (version 1.1) via brew + +``` +brew install openssl@1.1 +``` diff --git a/docs_versioned_docs/version-v0.10/7-faq/2-kcl.md b/docs_versioned_docs/version-v0.10/7-faq/2-kcl.md new file mode 100644 index 00000000..596aa881 --- /dev/null +++ b/docs_versioned_docs/version-v0.10/7-faq/2-kcl.md @@ -0,0 +1,7 @@ +--- +sidebar_position: 2 +--- + +# KCL + +Visit the [KCL website](https://kcl-lang.io/docs/user_docs/support/faq-kcl) for more documents. \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.10/7-faq/_category_.json b/docs_versioned_docs/version-v0.10/7-faq/_category_.json new file mode 100644 index 00000000..7c4b229f --- /dev/null +++ b/docs_versioned_docs/version-v0.10/7-faq/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "FAQ" +} diff --git a/docs_versioned_docs/version-v0.11/1-what-is-kusion/1-overview.md b/docs_versioned_docs/version-v0.11/1-what-is-kusion/1-overview.md new file mode 100644 index 00000000..0601be5b --- /dev/null +++ b/docs_versioned_docs/version-v0.11/1-what-is-kusion/1-overview.md @@ -0,0 +1,49 @@ +--- +id: overview +title: Overview +slug: / +--- + +# Overview + +Welcome to Kusion! This introduction section covers what Kusion is, the problem Kusion aims to solve, and how Kusion compares to other software. If you just want to dive into using Kusion, feel free to skip ahead to the [Getting Started](getting-started/install-kusion) section. + +## What is Kusion? +Kusion is an intent-based Platform Orchestrator that enables developers to specify their desired intent in a declarative way and then using a consistent workflow to drive continuous deployment through application lifecycle. Inspired by the phrase **Fusion on Kubernetes**, Kusion aims to help application and platform developers to develop and deliver in a self-serviceable, fast, reliable, and collaborative way. + +![arch](https://raw.githubusercontent.com/KusionStack/kusion/main/docs/workflow.png) + + +## Why Kusion? + +Developers should be able to deploy and run their applications and services end to end. **"You build it, you run it", the original promise of DevOps.** + +But the modern day for most software organizations this promise quickly become unrelalistic since the increasingly complex cloud-native toolchains, while cloud native technologies made huge improvements in areas such as scalability, availability and operability, it also brings downside - the growing burden on developers, which leads to the rise of [Platform Engineering](https://platformengineering.org/). + +Another challenge we saw is that a series of [antipatterns](https://web.devopstopologies.com/#anti-types) emerge when regular software organizations tries to implement true DevOps. Without well proven reference architecture and supporting tools, it's much more difficult to accomplish the original promise. + +On one hand, **Kusion was build to minimize developer's cognitive load**. With application-centric configuration model, you don't need to deal with tedious infrastructure and configuration management tooling, all you need to be familiar with is [AppConfiguration](configuration-walkthrough/overview). This approach shields developers from the configurational complexity of Kubernetes but still enable standardization by design. + +On the other hand, **Kusion defines a new way for different engineering organizations to collaborate**. With the separation of concerns, different roles could focus on their aspects of the configuration based on their knowledge and responsibility, whereas Kusion will dynamically manage and "glue" the opinionated configurations together. Through such a division of labor, the platform team can better manage the differences and complexities of the platform, and app developers could participate in ops work with much less cognitive load. + +## Kusion Highlights + +* **Platform as Code** + + Specify desired application intent through declarative configuration code, drive continuous deployment with any CI/CD systems or GitOps to match that intent. No ad-hoc scripts, no hard maintain custom workflows, just declarative configuration code. + +* **Dynamic Configuration Management** + + Enable platform teams to set baseline-templates, control how and where to deploy application workloads and provision accessory resources. While still enabling application developers freedom via workload-centric specification and deployment. + +* **Security & Compliance Built In** + + Enforce security and infrastructure best practices with out-of-box [base models](https://github.com/KusionStack/catalog), create security and compliance guardrails for any Kusion deploy with third-party Policy as Code tools. All accessory resource secrets are automatically injected into Workloads. + +* **Lightweight and Open Model Ecosystem** + + Pure client-side solution ensures good portability and the rich APIs make it easier to integrate and automate. Large growing model ecosystem covers all stages in application lifecycle, with extensive connections to various infrastructure capabilities. + +:::tip + +**Kusion is an early project.** The end goal of Kusion is to boost [Internal Developer Platform](https://internaldeveloperplatform.org/) revolution, and we are iterating on Kusion quickly to strive towards this goal. For any help or feedback, please contact us in [Slack](https://github.com/KusionStack/community/discussions/categories/meeting) or [issues](https://github.com/KusionStack/kusion/issues). diff --git a/docs_versioned_docs/version-v0.11/1-what-is-kusion/2-kusion-vs-x.md b/docs_versioned_docs/version-v0.11/1-what-is-kusion/2-kusion-vs-x.md new file mode 100644 index 00000000..a5ed333d --- /dev/null +++ b/docs_versioned_docs/version-v0.11/1-what-is-kusion/2-kusion-vs-x.md @@ -0,0 +1,37 @@ +--- +id: kusion-vs-x +--- + +# Kusion vs Other Software + +It can be difficult to understand how different software compare to each other. Is one a replacement for the other? Are they complementary? etc. In this section, we compare Kusion to other software. + +**vs. GitOps (ArgoCD, FluxCD, etc.)** + +According to the [open GitOps principles](https://opengitops.dev/), GitOps systems typically have its desired state expressed declaratively, continuously observe actual system state and attempt to apply the desired state. In the design of Kusion toolchain, we refer to those principles but have no intention to reinvent any GitOps systems wheel. + +Kusion adopts your GitOps process and improves it with richness of features. The declarative [AppConfiguration](../concepts/app-configuration) model can be used to express desired intent, once intent is declared [Kusion CLI](../reference/commands) takes the role to make production match intent as safely as possible. + +**vs. PaaS (Heroku, Vercel, etc.)** + +Kusion shares the same goal with traditional PaaS platforms to provide application delivery and management capabilities. The intuitive difference from the full functionality PaaS platforms is that Kusion is a client-side toolchain, not a complete PaaS platform. + +Also traditional PaaS platforms typically constrain the type of applications they can run but there is no such constrain for Kusion which means Kusion provides greater flexibility. + +Kusion allows you to have platform-like features without the constraints of a traditional PaaS. However, Kusion is not attempting to replace any PaaS platforms, instead Kusion can be used to deploy to a platform such as Heroku. + +**vs. KubeVela** + +KubeVela is a modern software delivery and management control plane which makes it easier to deploy and operate applications on top of Kubernetes. + +Although some might initially perceive an overlap between Kusion and KubeVela, they are in fact complementary and can be integrated to work together. As a lightweight, purely client-side tool, coupled with corresponding [Generator](https://github.com/KusionStack/kusion-module-framework) implementation, Kusion can render [AppConfiguration](../concepts/app-configuration) schema to generate CRD resources for KubeVela and leverage KubeVela's control plane to implement application delivery. + +**vs. Helm** + +The concept of Helm originates from the [package management](https://en.wikipedia.org/wiki/Package_manager) mechanism of the operating system. It is a package management tool based on templated YAML files and supports the execution and management of resources in the package. + +Kusion is not a package manager. Kusion naturally provides a superset of Helm capabilities with the modeled key-value pairs, so that developers can use Kusion directly as a programable alternative to avoid the pain of writing text templates. For users who have adopted Helm, the stack compilation results in Kusion can be packaged and used in Helm format. + +**vs. Kubernetes** + +Kubernetes(K8s) is a container scheduling and management runtime widely used around the world, an "operating system" core for containers, and a platform for building platforms. Above the Kubernetes API layer, Kusion aims to provide app-centric **abstraction** and unified **workspace**, better **user experience** and automation **workflow**, and helps developers build the app delivery model easily and collaboratively. diff --git a/docs_versioned_docs/version-v0.11/1-what-is-kusion/_category_.json b/docs_versioned_docs/version-v0.11/1-what-is-kusion/_category_.json new file mode 100644 index 00000000..0817eb90 --- /dev/null +++ b/docs_versioned_docs/version-v0.11/1-what-is-kusion/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "What is Kusion?" +} diff --git a/docs_versioned_docs/version-v0.11/2-getting-started/1-install-kusion.md b/docs_versioned_docs/version-v0.11/2-getting-started/1-install-kusion.md new file mode 100644 index 00000000..540881d6 --- /dev/null +++ b/docs_versioned_docs/version-v0.11/2-getting-started/1-install-kusion.md @@ -0,0 +1,144 @@ +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Install Kusion + +You can install the latest Kusion CLI on MacOS, Linux and Windows. + +## MacOs/Linux + +For the MacOs and Linux, Homebrew and sh script are supported. Choose the one you prefer from the methods below. + + + + +The recommended method for installing on MacOS and Linux is to use the brew package manager. + +**Install Kusion** + +```bash +# tap formula repository Kusionstack/tap +brew tap KusionStack/tap + +# install Kusion +brew install KusionStack/tap/kusion +``` + +**Update Kusion** + +```bash +# update formulae from remote +brew update + +# update Kusion +brew upgrade KusionStack/tap/kusion +``` + +**Uninstall Kusion** + +```bash +# uninstall Kusion +brew uninstall KusionStack/tap/kusion +``` + +```mdx-code-block + + +``` + +**Install Kusion** + +```bash +# install Kusion, default latest version +curl https://www.kusionstack.io/scripts/install.sh | sh +``` + +**Install the Specified Version of Kusion** + +You can also install the specified version of Kusion by appointing the version as shell script parameter, where the version is the [available tag](https://github.com/KusionStack/kusion/tags) trimming prefix "v", such as 0.11.0, 0.10.0, etc. In general, you don't need to specify Kusion version, just use the command above to install the latest version. + +```bash +# install Kusion of specified version 0.11.0 +curl https://www.kusionstack.io/scripts/install.sh | sh -s 0.11.0 +``` + +**Uninstall Kusion** + +```bash +# uninstall Kusion +curl https://www.kusionstack.io/scripts/uninstall.sh | sh +``` + +```mdx-code-block + + +``` + +## Windows + +For the Windows, Scoop and Powershell script are supported. Choose the one you prefer from the methods below. + + + + +The recommended method for installing on Windows is to use the scoop package manager. + +**Install Kusion** + +```bash +# add scoop bucket KusionStack +scoop bucket add KusionStack https://github.com/KusionStack/scoop-bucket.git + +# install kusion +scoop install KusionStack/kusion +``` + +**Update Kusion** + +```bash +# update manifest from remote +scoop update + +# update Kusion +scoop install KusionStack/kusion +``` + +**Uninstall Kusion** + +```bash +# uninstall Kusion +brew uninstall KusionStack/kusion +``` + +```mdx-code-block + + +``` + +**Install Kusion** + +```bash +# install Kusion, default latest version +powershell -Command "iwr -useb https://www.kusionstack.io/scripts/install.ps1 | iex" +``` + +**Install the Specified Version of Kusion** + +You can also install the specified version of Kusion by appointing the version as shell script parameter, where the version is the [available tag](https://github.com/KusionStack/kusion/tags) trimming prefix "v", such as 0.11.0, etc. In general, you don't need to specify Kusion version, just use the command above to install the latest version. + +```bash +# install Kusion of specified version 0.10.0 +powershell {"& { $(irm https://www.kusionstack.io/scripts/install.ps1) } -Version 0.11.0" | iex} +``` + +**Uninstall Kusion** + +```bash +# uninstall Kusion +powershell -Command "iwr -useb https://www.kusionstack.io/scripts/uninstall.ps1 | iex" +``` + +```mdx-code-block + + +``` diff --git a/docs_versioned_docs/version-v0.11/2-getting-started/2-deliver-quickstart.md b/docs_versioned_docs/version-v0.11/2-getting-started/2-deliver-quickstart.md new file mode 100644 index 00000000..c6155764 --- /dev/null +++ b/docs_versioned_docs/version-v0.11/2-getting-started/2-deliver-quickstart.md @@ -0,0 +1,215 @@ +--- +id: deliver-quickstart +--- + +# Run Your First App on Kubernetes with Kusion + +In this tutorial, we will walk through how to deploy a quickstart application on Kubernetes with Kusion. The demo application can interact with a locally deployed MySQL database, which is declared as an accessory in the config codes and will be automatically created and managed by Kusion. + +## Prerequisites + +Before we start to play with this example, we need to have the Kusion CLI installed and run an accessible Kubernetes cluster. Here are some helpful documents: + +- Install [Kusion CLI](./1-install-kusion.md). +- Run a [Kubernetes](https://kubernetes.io) cluster. Some light and convenient options for Kubernetes local deployment include [k3s](https://docs.k3s.io/quick-start), [k3d](https://k3d.io/v5.4.4/#installation), and [MiniKube](https://minikube.sigs.k8s.io/docs/tutorials/multi_node). + +## Initialize Project + +We can start by initializing this tutorial project with `kusion init` cmd. + +```shell +# Create a new directory and navigate into it. +mkdir quickstart && cd quickstart + +# Initialize the demo project with the name of the current directory. +kusion init +``` + +The created project structure looks like below: + +```shell +tree +. +├── default +│   ├── kcl.mod +│   ├── main.k +│   └── stack.yaml +└── project.yaml + +2 directories, 4 files +``` + +:::info +More details about the project and stack structure can be found in [Project](../3-concepts/1-project/1-overview.md) and [Stack](../3-concepts/2-stack/1-overview.md). +::: + +### Review Configuration Files + +Now let's have a glance at the configuration codes of `default` stack: + +```shell +cat default/main.k +``` + +```python +import kam.v1.app_configuration as ac +import kam.v1.workload as wl +import kam.v1.workload.container as c +import network as n + +# main.k declares the customized configuration codes for default stack. +quickstart: ac.AppConfiguration { + workload: wl.Service { + containers: { + quickstart: c.Container { + image: "kusionstack/kusion-quickstart:latest" + } + } + } + accessories: { + "network": n.Network { + ports: [ + n.Port { + port: 8080 + } + ] + } + } +} +``` + +The configuration file `main.k`, usually written by the **App Developers**, declares the customized configuration codes for `default` stack, including an `AppConfiguration` instance with the name of `quickstart`. The `quickstart` application consists of a `Workload` with the type of `kam.v1.workload.Service`, which runs a container named `quickstart` using the image of `kusionstack/kusion-quickstart:latest`. + +Besides, it declares a **Kusion Module** with the type of `network.Network`, exposing `8080` port to be accessed for the long-running service. + +The `AppConfiguration` model can hide the major complexity of Kubernetes resources such as `Namespace`, `Deployment`, and `Service` which will be created and managed by Kusion, providing the concepts that are **application-centric** and **infrastructure-agnostic** for a more developer-friendly experience. + +:::info +More details about the `AppConfiguration` model and built-in Kusion Module can be found in [kam](https://github.com/KusionStack/kam) and [catalog](https://github.com/KusionStack/catalog). +::: + +The declaration of the dependency packages can be found in `default/kcl.mod`: + +```shell +cat default/kcl.mod +``` + +```shell +[dependencies] +kam = { git = "https://github.com/KusionStack/kam.git", tag = "0.1.0" } +network = { oci = "oci://ghcr.io/kusionstack/network", tag = "0.1.0" } +``` + +:::info +More details about the application model and module dependency declaration can be found in [Kusion Module guide for app dev](../3-concepts/3-kusion-module/3-app-dev-guide.md). +::: + +## Application Delivery + +Use the following command to deliver the quickstart application in `default` stack on your accessible Kubernetes cluster, while watching the resource creation and automatically port-forwarding the specified port (8080) from local to the Kubernetes Service of the application. We can check the details of the resource preview results before we confirm to apply the diffs. + +```shell +cd default && kusion apply --watch --port-forward 8080 +``` + +![](/img/docs/user_docs/getting-started/kusion_apply_quickstart.gif) + +:::info +During the first apply, the models and modules that the application depends on will be downloaded, so it may take some time (usually within one minute). You can take a break and have a cup of coffee. +::: + +:::info +Kusion by default will create the Kubernetes resources of the application in the namespace the same as the project name. If you want to customize the namespace, please refer to **[T.B.D]**. +::: + +Now we can visit [http://localhost:8080](http://localhost:8080) in our browser and play with the demo application! + +![](/img/docs/user_docs/getting-started/quickstart_page.png) + +## Add MySQL Accessory + +As you can see, the demo application page indicates that the MySQL database is not ready yet. Hence, we will now add a MySQL database as an accessory for the workload. + +We can add the Kusion-provided built-in dependency in the `default/kcl.mod`, so that we can use the `MySQL` module in the configuration codes. + +```shell +[dependencies] +kam = { git = "https://github.com/KusionStack/kam.git", tag = "0.1.0" } +network = { oci = "oci://ghcr.io/kusionstack/network", tag = "0.1.0" } +mysql = { oci = "oci://ghcr.io/kusionstack/mysql", tag = "0.1.0" } +``` + +We can update the `default/main.k` with the following configuration codes: + +```python +# The configuration codes in the perspective of developers. +import kam.v1.app_configuration as ac +import kam.v1.workload as wl +import kam.v1.workload.container as c +import network as n +import mysql + +quickstart: ac.AppConfiguration { + workload: wl.Service { + containers: { + quickstart: c.Container { + image: "kusionstack/kusion-quickstart:latest" + env: { + "DB_HOST": "$(KUSION_DB_HOST_QUICKSTART_DEFAULT_QUICKSTART_MYSQL)" + "DB_USERNAME": "$(KUSION_DB_USERNAME_QUICKSTART_DEFAULT_QUICKSTART_MYSQL)" + "DB_PASSWORD": "$(KUSION_DB_PASSWORD_QUICKSTART_DEFAULT_QUICKSTART_MYSQL)" + } + } + } + } + accessories: { + "network": n.Network { + ports: [ + n.Port { + port: 8080 + } + ] + } + "mysql": mysql.MySQL { + type: "local" + version: "8.0" + } + } +} +``` + +The configuration codes above declare a local `mysql.MySQL` with the engine version of `8.0` as an accessory for the application workload. The necessary Kubernetes resources for deploying and using the local MySQL database will be generated and users can get the `host`, `username` and `password` of the database through the [MySQL Credentials And Connectivity](../6-reference/2-modules/1-developer-schemas/database/mysql.md#credentials-and-connectivity) of Kusion in application containers. + +:::info +For more information about the naming convention of Kusion built-in MySQL module, you can refer to [Module Naming Convention](../6-reference/2-modules/3-naming-conventions.md). +::: + +After that, we can re-apply the application: + +```shell +kusion apply --watch --port-forward 8080 +``` + +![](/img/docs/user_docs/getting-started/kusion_re_apply_quickstart.gif) + +:::info +You may wait another minute to download the MySQL Module. +::: + +Let's visit [http://localhost:8080](http://localhost:8080) in our browser, and we can find that the application has successfully connected to the MySQL database. The connection information is also printed on the page. + +![](/img/docs/user_docs/getting-started/quickstart_page_with_mysql.png) + +Now please feel free to enjoy the demo application! + +![](/img/docs/user_docs/getting-started/quickstart_mysql_validation.gif) + +## Delete Application + +We can delete the quickstart demo workload and related accessory resources with the following cmd: + +```shell +kusion destroy --yes +``` + +![](/img/docs/user_docs/getting-started/kusion_destroy_quickstart.gif) diff --git a/docs_versioned_docs/version-v0.11/2-getting-started/_category_.json b/docs_versioned_docs/version-v0.11/2-getting-started/_category_.json new file mode 100644 index 00000000..41f4c00e --- /dev/null +++ b/docs_versioned_docs/version-v0.11/2-getting-started/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Getting Started" +} diff --git a/docs_versioned_docs/version-v0.11/3-concepts/1-project/1-overview.md b/docs_versioned_docs/version-v0.11/3-concepts/1-project/1-overview.md new file mode 100644 index 00000000..edcc84d7 --- /dev/null +++ b/docs_versioned_docs/version-v0.11/3-concepts/1-project/1-overview.md @@ -0,0 +1,12 @@ +--- +sidebar_label: Overview +id: overview +--- + +# Overview + +A project in Kusion is defined as a folder that contains a `project.yaml` file and is generally recommended to be linked to a Git repository. Typically, the mapping between a project and a repository is 1:1, however, it is possible to have multiple projects connected to a single repository — for example, in the case of a monorepo. A project consists of one or more applications. + +The purpose of the project is to bundle application configurations there are relevant. Specifically, it organizes logical configurations for internal components to orchestrate the application and assembles these configurations to suit different roles, such as developers and SREs, thereby covering the entire lifecycle of application development. + +From the perspective of the application development lifecycle, the configurations delineated by the project is decoupled from the application code. It takes an immutable image as input, allowing users to perform operations and maintain the application within an independent configuration codebase. \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.11/3-concepts/1-project/2-configuration.md b/docs_versioned_docs/version-v0.11/3-concepts/1-project/2-configuration.md new file mode 100644 index 00000000..101ac77a --- /dev/null +++ b/docs_versioned_docs/version-v0.11/3-concepts/1-project/2-configuration.md @@ -0,0 +1,29 @@ +--- +id: configuration +sidebar_label: Project file reference +--- + +# Kusion project file reference + +Every Kusion project has a project file, `project.yaml`, which specifies metadata about your project, such as the project name and project description. The project file must begin with lowercase `project` and have an extension of either `.yaml` or `.yml`. + +## Attributes + +| Name | Required | Description | Options | +|:------------- |:--------------- |:------------- |:------------- | +| `name` | required | Name of the project containing alphanumeric characters, hyphens, underscores. | None | +| `description` | optional | A brief description of the project. | None | +| `extensions` | optional | List of extensions on the project. | [See blow](#extensions) | + +### Extensions + +Extensions allow you to customize how resources are generated or customized as part of release. + +#### kubernetesNamespace + +The Kubernetes namespace extension allows you to customize namespace within your application generate Kubernetes resources. + +| Key | Required | Description | Example | +|:------|:--------:|:-------------|:---------| +| kind | y | The kind of extension being used. Must be 'kubernetesNamespace' | `kubernetesNamespace` | +| namespace | y | The namespace where all application-scoped resources generate Kubernetes objects. | `default` | diff --git a/docs_versioned_docs/version-v0.11/3-concepts/1-project/_category_.json b/docs_versioned_docs/version-v0.11/3-concepts/1-project/_category_.json new file mode 100644 index 00000000..3ca65e52 --- /dev/null +++ b/docs_versioned_docs/version-v0.11/3-concepts/1-project/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Project" +} diff --git a/docs_versioned_docs/version-v0.11/3-concepts/2-stack/1-overview.md b/docs_versioned_docs/version-v0.11/3-concepts/2-stack/1-overview.md new file mode 100644 index 00000000..c6dcd2b5 --- /dev/null +++ b/docs_versioned_docs/version-v0.11/3-concepts/2-stack/1-overview.md @@ -0,0 +1,16 @@ +--- +sidebar_label: Overview +id: overview +--- + +# Overview + +A stack in Kusion is defined as a folder within the project directory that contains a `stack.yaml` file. Stacks provide a mechanism to isolate multiple sets of different configurations in the same project. It is also the smallest unit of operation that can be configured and deployed independently. + +The most common way to leverage stacks is to denote different phases of the software development lifecycle, such as `development`, `staging`, `production`, etc. For instance, in the case where the image and resource requirements for an application workload might be different across different phases in the SDLC, they can be represented by different stacks in the same project, namely `dev`, `stage` and `prod`. + +To distinguish this from the deploy-time concept of a "target environment" - which Kusion defines as `workspaces`, **stack** is a development-time concept for application developers to manage different configurations. One way to illustrate the difference is that you can easily be deploying the `prod` stack to multiple target environments, for example, `aws-prod-us-east`, `aws-prod-us-east-2` and `azure-prod-westus`. + +## High Level Schema + +![High_Level_Schema](/img/docs/user_docs/concepts/high-level-schema.png) \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.11/3-concepts/2-stack/2-configuration.md b/docs_versioned_docs/version-v0.11/3-concepts/2-stack/2-configuration.md new file mode 100644 index 00000000..00b4e9e1 --- /dev/null +++ b/docs_versioned_docs/version-v0.11/3-concepts/2-stack/2-configuration.md @@ -0,0 +1,29 @@ +--- +id: configuration +sidebar_label: Stack file reference +--- + +# Kusion stack file reference + +Every Kusion project's stack has a stack file, `stack.yaml`, which specifies metadata about your stack, such as the stack name and stack description. The stack file must begin with lowercase `stack` and have an extension of either `.yaml` or `.yml`. + +## Attributes + +| Name | Required | Description | Options | +|:------------- |:--------------- |:------------- |:------------- | +| `name` | required | Name of the stack containing alphanumeric characters, hyphens, underscores. | None | +| `description` | optional | A brief description of the stack. | None | +| `extensions` | optional | List of extensions on the stack. | [See blow](#extensions) | + +### Extensions + +Extensions allow you to customize how resources are generated or customized as part of release. + +#### kubernetesNamespace + +The Kubernetes namespace extension allows you to customize namespace within your application generate Kubernetes resources. + +| Key | Required | Description | Example | +|:------|:--------:|:-------------|:---------| +| kind | y | The kind of extension being used. Must be 'kubernetesNamespace' | `kubernetesNamespace` | +| namespace | y | The namespace where all application-scoped resources generate Kubernetes objects. | `default` | \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.11/3-concepts/2-stack/_category_.json b/docs_versioned_docs/version-v0.11/3-concepts/2-stack/_category_.json new file mode 100644 index 00000000..6425c52e --- /dev/null +++ b/docs_versioned_docs/version-v0.11/3-concepts/2-stack/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Stack" +} diff --git a/docs_versioned_docs/version-v0.11/3-concepts/3-kusion-module/1-overview.md b/docs_versioned_docs/version-v0.11/3-concepts/3-kusion-module/1-overview.md new file mode 100644 index 00000000..b6487117 --- /dev/null +++ b/docs_versioned_docs/version-v0.11/3-concepts/3-kusion-module/1-overview.md @@ -0,0 +1,16 @@ +# Overview + +A Kusion module is a reusable building block designed by platform engineers to standardize application deployments and enable app developers to self-service. It consists of two parts: + +- App developer-oriented schema: It is a [KCL schema](https://kcl-lang.io/docs/user_docs/guides/schema-definition/). Fields in this schema are recommended to be understandable to application developers and workspace-agnostic. For example, a database Kusion module schema only contains fields like database engine type and database version. +- Kusion module generator: It is a piece of logic that generates the Intent with an instantiated schema mentioned above, along with platform configurations ([workspace](../workspace)). As a building block, Kusion module hides the complexity of infrastructures. A database Kusion module not only represents a cloud RDS, but it also contains logic to configure other resources such as security groups and IAM policies. Additionally, it seamlessly injects the database host address, username, and password into the workload's environment variables. The generator logic can be very complex in some situations so we recommend implementing it in a GPL like [go](https://go.dev/). + +Here are some explanations of the Kusion Module: + +1. It represents an independent unit that provides a specific capability to the application with clear business semantics. +2. It consists of one or multiple infrastructure resources (K8s/Terraform resources), but it is not merely a collection of unrelated resources. For instance, a database, monitoring capabilities, and network access are typical Kusion Modules. +3. Modules should not have dependencies or be nested within each other. +4. AppConfig is not a Module. +5. Kusion Module is a superset of [KPM](https://www.kcl-lang.io/docs/user_docs/guides/package-management/quick-start). It leverages the KPM to manage KCL schemas in the Kusion module. + +![kusion-module](/img/docs/concept/kusion-module.png) \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.11/3-concepts/3-kusion-module/2-develop-guide.md b/docs_versioned_docs/version-v0.11/3-concepts/3-kusion-module/2-develop-guide.md new file mode 100644 index 00000000..a874675e --- /dev/null +++ b/docs_versioned_docs/version-v0.11/3-concepts/3-kusion-module/2-develop-guide.md @@ -0,0 +1,166 @@ +# Platform Engineer Develop Guide + +## Prerequisites + +To follow this guide, you will need: + +- Go 1.22 or higher installed and configured +- Kusion v0.11.1 or higher installed locally + +## Workflow + +As a platform engineer, the workflow of developing a Kusion module looks like this: + +1. Communicate with app developers and identify the fields that should exposed to them in the dev-orient schema +2. Identify module input parameters that should be configured by platform engineers in the [workspace](../workspace) +3. Define the app dev-orient schema +4. Develop the module by implementing gRPC interfaces + +The first two steps primarily involve communication with the application development team, and the specific details are not included in this tutorial. This tutorial begins with the subsequent two steps. + +## Set up a developing environment + +Developing a Kusion module includes defining a KCL schema and developing a module binary in golang. We will provide a scaffold repository and a new command `kusion mod init` to help developers set up the developing environment easily. + +After executing the command + +```shell +kusion mod init +``` + +Kusion will download a [scaffold repository](https://github.com/KusionStack/kusion-module-scaffolding) and rename this project with your module name. The scaffold contains code templates and all files needed for developing a Kusion module. + +## Developing + +The scaffold repository directory structure is shown below: + +```shell +$ tree kawesome/ +. +├── example +│   ├── dev +│   │   ├── example_workspace.yaml +│   │   ├── kcl.mod +│   │   ├── main.k +│   │   └── stack.yaml +│   └── project.yaml +├── kawesome.k +├── kcl.mod +└── src + ├── Makefile + ├── go.mod + ├── go.sum + ├── kawesome_generator.go + └── kawesome_generator_test.go +``` + +When developing a Kusion module with the scaffold repository, you could follow the steps below: + +1. **Define the module name and version** + - For go files. Rename the module name in the `go.mod` and related files to your Kusion module name. + ```yaml + module kawsome + go 1.22 + require ( + ... + ) + ``` + - For KCL files. Rename package name and version in the `kcl.mod` + ```toml + [package] + name = "kawesome" + version = 0.1.0 + ``` + + We assume the module named is `kawesome` and the version is `0.1.0` in this guide. + +2. **Define the dev-orient schemas**. They would be initialized by app developers. In this scaffold repository, we've defined a schema named Kawesome in `kawesome.k` that consists of two resources `Service` and `RandomPassword` and they will be generated into a Kubernetes Service and a Terraform RandomPassword later. + +```python +schema Kawesome: +""" Kawesome is a sample module schema consisting of Service +and RandomPassword + +Attributes +---------- +service: Service, default is Undefined, required. + The exposed port of Workload, which will be generated into Kubernetes Service. +randomPassword: RandomPassword, default is Undefined, required. + The sensitive random string, which will be generated into Terraform random_password. + +Examples +-------- +import kawesome as ks + +accessories: { + "kusionstack/kawesome@v0.1.0": ks.Kawesome { + service: ks.Service { + port: 8080 + } + randomPassword: ks.RandomPassword { + length: 20 + } + } +} +""" + +# The exposed port of Workload, which will be generated into Kubernetes Service. +service: Service + +# The sensitive random string, which will be generated into Terraform random_password. +randomPassword: RandomPassword +``` + +3. **Implement the gRPC generate interface.** The `generate` interface consumes the application developer's config described in the [`AppConfiguration`](../app-configuration) and the platform engineer's config described in the [`workspace`](../workspace) to generate all infrastructure resources represented by this module. + +```go +func (k *Kawesome) Generate(_ context.Context, request *module.GeneratorRequest) (*module.GeneratorResponse, error){ + // generate your infrastructure resoruces +} + +// Patcher contains fields should be patched into the workload corresponding fields +type Patcher struct { + // Environments represent the environment variables patched to all containers in the workload. + Environments []v1.EnvVar `json:"environments" yaml:"environments"` + // Labels represent the labels patched to both the workload and pod. + Labels map[string]string `json:"labels" yaml:"labels"` + // Annotations represent the annotations patched to both the workload and pod. + Annotations map[string]string `json:"annotations" yaml:"annotations"` +} +``` + +The `GeneratorRequest` contains the application developer's config, platform engineer's config, workload config and related metadata a module could need to generate infrastructure resources. +In the `GeneratorResponse`, there are two fields, `Resources` and `Patchers`. The `Resource` represents resources that should operated by Kusion and they will be appended into the [Spec](../spec). The `Patchers` are used to patch other resources. In this version, Kusion will parse them and patch workload corresponding fields. + +## Publish + +Publish the Kusion module to an OCI registry with the command `kusion mod push`. + +Publish a stable version +```shell +kusion mod push /path/to/your-module oci://ghcr.io/kusionstack --latest=true --creds +``` + +Publish a pre-release version +```shell +kusion mod push /path/to/your-module oci://ghcr.io/kusionstack --latest=false --creds +``` + +:::info +The OCI URL format is `oci:///` and please ensure that your token has the appropriate permissions to write to the registry. +::: + +More details can be found in the `kusion mod push` reference doc. + +## Initialize the workspace + +```yaml +modules: + kusionstack/kawesome@0.1.0: + default: + service: + labels: + kusionstack.io/module-name: kawesome +``` + +Initialize module platform configuration in the `workspace.yaml` to standardize the module's behavior. Please notice the key of this module should match this format: `namespace/moduleName@version` \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.11/3-concepts/3-kusion-module/3-app-dev-guide.md b/docs_versioned_docs/version-v0.11/3-concepts/3-kusion-module/3-app-dev-guide.md new file mode 100644 index 00000000..68c7a410 --- /dev/null +++ b/docs_versioned_docs/version-v0.11/3-concepts/3-kusion-module/3-app-dev-guide.md @@ -0,0 +1,97 @@ +# Application Developer User Guide + +## Choose modules you need + +For all KusionStack built-in modules, you can find all available versions and documents in the [reference](../../6-reference/2-modules/index.md) + +## Import and initialize modules + +### Add dependencies + +Taking `kawesome` as an example, the directory structure is shown below: + +```shell +example +├── dev +│   ├── example_workspace.yaml +│   ├── kcl.mod +│   ├── main.k +│   └── stack.yaml +└── project.yaml +``` + +Before importing modules in your AppConfiguration, you should add them to the dependencies part of the `kcl.mod` file. + +``` toml +[package] +name = "example" + +[dependencies] +kam = { git = "https://github.com/KusionStack/kam.git", tag = "0.1.0" } +kawesome = { oci = "oci://ghcr.io/kusionstack/kawesome", tag = "0.1.0" } + +[profile] +entries = ["main.k"] +``` + +The kam dependency represents the [Kusion Application Module](https://github.com/KusionStack/kam.git) which contains the AppConfiguration and other basic modules. The `kawesome` is the Kusion module we are going to use in the AppConfiguration. + +### Initialize modules + +```python +import kam.v1.app_configuration as ac +import kam.v1.workload as wl +import kam.v1.workload.container as c +import kawesome.kawesome as ks + +kawesome: ac.AppConfiguration { + # Declare the workload configurations. + workload: wl.Service { + containers: { + kawesome: c.Container { + image: "hashicorp/http-echo" + } + } + } + # Declare the kawesome module configurations. + accessories: { + "kawesome": ks.Kawesome { + service: ks.Service{ + port: 5678 + } + } + } +} +``` + +Initialize the `kawesome` module in the `accessories` block of the AppConfiguration. The key of the `accessories` item represents the module name and the value represents the actual module you required. + +## Preview the result + +Execute the preview command to validate the result. + +```shell +kusion preview +``` + +```shell + ✔︎ Generating Spec in the Stack dev... +Stack: dev +ID Action +hashicorp:random:random_password:example-dev-kawesome Create +v1:Namespace:example Create +v1:Service:example:example-dev-kawesome Create +apps.kusionstack.io/v1alpha1:PodTransitionRule:example:example-dev-kawesome Create +apps.kusionstack.io/v1alpha1:CollaSet:example:example-dev-kawesome Create + + +? Which diff detail do you want to see? [Use arrows to move, type to filter] +> all + hashicorp:random:random_password:example-dev-kawesome Create + v1:Namespace:example Create + v1:Service:example:example-dev-kawesome Create + apps.kusionstack.io/v1alpha1:PodTransitionRule:example:example-dev-kawesome Create + apps.kusionstack.io/v1alpha1:CollaSet:example:example-dev-kawesome Create + cancel + +``` \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.11/3-concepts/3-kusion-module/_category_.json b/docs_versioned_docs/version-v0.11/3-concepts/3-kusion-module/_category_.json new file mode 100644 index 00000000..a346baad --- /dev/null +++ b/docs_versioned_docs/version-v0.11/3-concepts/3-kusion-module/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Kusion Module" +} diff --git a/docs_versioned_docs/version-v0.11/3-concepts/4-workspace.md b/docs_versioned_docs/version-v0.11/3-concepts/4-workspace.md new file mode 100644 index 00000000..74393741 --- /dev/null +++ b/docs_versioned_docs/version-v0.11/3-concepts/4-workspace.md @@ -0,0 +1,206 @@ +--- +id: workspace +sidebar_label: Workspace +--- + +# Workspace + +Workspace is a logical concept that maps to an actual target environment to deploy a stack to. In today's context, this _usually_ represents a Kubernetes cluster for the application workload and an optional cloud account to provision infrastructure resources that the workload depends on (A database, for example). Aside from the deployment destination, workspaces are also designed to be associated with a series of platform configurations that are used by all the stacks deployed to said workspace. + +When executing the command `kusion generate`, Kusion will "match" the AppConfiguration and the approriate workspace configuration to dynamically generate the `Spec`, which contains the complete manifest to describe the resources in the stack. The relationship of the Project, Stack and Workspace is shown as below. Notice that all three ways to organize stacks are valid. + +![project-stack-workspace](/img/docs/concept/project-stack-workspace.png) + +Workspace is designed to address separation of concerns. As opposed to the development-time concept of a "stack", a workspace is a deploy-time concept that represents a deployment target, a.k.a an actual runtime environment. Workspaces should be entirely managed by Platform Engineers to alleviate the burden on developers to understand environment-specific details. + +To dig a little deeper, a workspace represents the need for a distinct set of "platform opinions". That includes things that application developer either don't want to or shouldn't need to worry about, such as which Kubernetes cluster to deploy to, the credentials to deploy to said clusters, and other infrastructure details like what database instance to provision. + +Workspace is intended to be flexible so you can map them as your see fit to the boundaries that are relevant to your use case. For example, you can map a workspace to a cloud region (aws-us-east-1), provided that regional isolation is sufficient for you (this is an extreme case). Alternatively, a workspace can be map to the combination of a cloud region and an SDLC phase (aws-dev-us-east-1), provided that it represents the right boundary from a platform perspective. + +The workspace configuration is in a deterministic format and currently written in YAML. The subcommands of `kusion workspace` are provided to manage the workspaces. When using `kusion workspace`, the workspace configuration will be saved as YAML file in local file system. To avoid the possible risks, the environment variables are provided to hold the sensitive data such as Access Keys and Secret keys. + +## Workspace Configuration + +The configuration of a Workspace is stored in a single YAML file, which consists of `modules` and `runtimes`. An example of Workspace configuration is shown as below. + +```yaml +# The platform configuration for Modules or KAMs. +# For each Module or KAM, the configuration format is as below. +# # ${module_identifier} or ${KAM_name}: +# # default: # default configuration, applied to all projects +# # ${field1}: ${value1} +# # #{field2}: ${value2} +# # ... +# # ${patcher_name}: #patcher configuration, applied to the projects assigned in projectSelector +# # ${field1}: ${value1_override} +# # ... +# # projectSelector: +# # - ${project1_name} +# # - ${project2_name} +# # ... +modules: + kusionstack/mysql@0.1.0: + default: + cloud: alicloud + size: 20 + instanceType: mysql.n2.serverless.1c + category: serverless_basic + privateRouting: false + subnetID: ${mysql_subnet_id} + databaseName: kusion + largeSize: + size: 50 + projectSelector: + - foo + - bar + +# The configuration of Runtimes, support Kubernetes and Terraform. +# For each Runtime, the configuration format is as below. +# # ${runtime_name}: +# # ${field1}: ${value1} +# # ${field2}: ${value2} +# # ... +runtimes: + kubernetes: + kubeConfig: /etc/kubeconfig.yaml + terraform: + aws: + version: 1.0.4 + source: hashicorp/aws + region: us-east-1 +``` + +### modules + +The `modules` are the platform-part configurations of Modules and KAMs, where the identifier of them are `${namespace}/${module_name}@${module_tag}` and `${kam_name}`. For each Module or KAM configuration, it is composed of a `default` and several `patcher` blocks. The `default` block contains the universal configuration of the Workspace, and can be applied to all Stacks in the Workspace, which is composed of the values of the Module's or KAM's fields. The `patcher` block contains the exclusive configuration for certain Stacks, which includes not only the fields' values, but also the applied Projects. + +The `patcher` block is designed to increase the flexibility for platform engineers managing Workspaces. Cause the Workspace should map to the real physical environment, in the actual production practice, it's almost impossible that all the Stacks share the same platform configuration, although we want them the same. + +The values of the same fields in `patcher` will override the `default`, and one field in multiple patchers is forbidden to assign to the same Project. That is, if there are more than one `patcher` declaring the same field with different values, the applied Projects are prohibited to overlap. And, The name of `patcher` must not be `default`. + +In the `patcher`, the applied Projects are assigned by the field `ProjectSelector`, which is an array of the Project names. The `ProjectSelector` is provided rather than something may like `StackSelector`, which specifies the applied Stacks. Here are the reasons. Explaining from the perspective of using Workspace, the mapping of Workspace and Stack is specified by the Kusion operation commands' users. While explaining from the perspective of the relationship among Project, Stack and Workspace, Workspace is designed for the reuse of platform-level configuration among multiple Projects. When a Project "encounters" a Workspace, it becomes a "Stack instance", which can be applied to a series of real resources. If using something like `StackSelector`, the reuse would not get realized, and Workspace would also lose its relevance. For more information of the relationship, please refer to [Project](project/overview) and [Stack](stack/overview). + +Different Module and KAM has different name, fields, and corresponding format and restrictions. When writing the configuration, check the corresponding Module's or KAM's description, and make sure all the requisite Modules and KAMs have correctly configured. Please refer to [Kuiosn Module](kusion-module/overview) and find more information. The example above gives a sample of the Module `mysql`. + +### runtimes + +The `runtimes` are the interface that Kusion interacts with the real infrastructure, which are only configured by the platform engineers in Workspace. Kusion supports the runtimes `Kubernetes` and `Terraform` for now. + +For `Kubernetes` runtime, the path of the KubeConfig file is provided to configure, which is specified by the filed `kubeConfig`. Besides, the environment variable `KUBECONFIG` is also supported with higher priority. If both not set, the default path `$HOME/.kube/config` will be used. For the example above, the `kubeConfig` is set in the workspace configuration. + +The `Terraform` runtime is composed of multiple Terraform providers' configurations, where the key is the provider name, and the values varies across different providers. For the configuration fields, Kusion keeps the same with Terraform, including the supported environment variables. Please refer to [Terraform Registry](https://registry.terraform.io/) and find more information. For the example above, a sample of aws runtime configuration is given, while the `access_key` and `access_secret` is not set in the Workspace file, and expected setting by the environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`. + +## Managing Workspace + +The subcommands of `kusion workspace` are used to manage Workspaces, including `create`, `show`, `list`, `switch`, `update` and `delete`. Cause the Workspace configurations are stored persistently, the current or a specified Backend will be used. For more information of Backend, please refer to [Backend](backend). + +Kusion will create a `default` Workspace with empty configuration in every Backend automatically, and set it as the current. When first using Kusion, or no configuration of Workspace, the `default` Workspace will be used. + +### Creating Workspace + +Use `kusion workspace create ${name} -f ${configuration_file_path}` to create a new Workspace with the configuration in a YAML file. The Workspace is identified by the `name`, and must be a new one, while the configuration must be written in a YAML file with correct format. + +The command above will create the Workspace in current Backend. If to create a Workspace in another backend, please use flag `--backend` to specify. The Workspace names in a Backend must be different, but allow the same in different Backends. + +In some scenarios, when a Workspace is created, it is expected to be the current. For simplification, the flag `--current` is provided to set the Workspace current alongside the creation. + +Be attention, creating `default` Workspace is not allowed, because it's created by Kusion automatically. + +The example is shown as below. + +```shell +# create a workspace in current backend +kusion workspace create dev -f dev.yaml + +# create a workspace in current backend ans set it as current +kusion workspace create dev -f dev.yaml --current + +# create a workspace in specified backend +kusion workspace create dev -f dev.yaml --backend oss-pre +``` + +The Workspaces to create are decided by the platform engineers. We recommend that they are organized by the following rules: + +- **SDLC phases**, such as `dev`, `pre`, `prod`; +- **cloud vendors**, such as `aws`, `alicloud`; +- combination of the two above, such as `dev-aws`, `prod-alicloud`. + +In design, Kusion does not support deploying Stack to multiple clouds or regions within a single Workspace. While users can technically define a Module that provisions resources across multiple clouds or regions, Kusion does not recommend this practice, and will not provide technical support for such configuration. If the platform engineers need to manage resources across multiple clouds or regions, they should create separate Workspaces. + +### Listing Workspace + +Use `kusion workspace list` to get all the workspace names. + +The example is shown as below. In order to simplify, The following examples will not give using specified backend, which is supported by `--backend` flag. + +```shell +# list all the workspace names +kusion workspace list +``` + +### Switching Workspace + +In order not to specify the Workspace name for each Kusion operation command, `kusion workspace switch ${name}` is provided to switch the current Workspace. Then when executing `kusion generate`, the current Workspace will be used. The to-switch Workspace must be created. + +The example is shown as below. + +```shell +# switch workspace +kusion workspace switch dev +``` + +### Showing Workspace + +Use `kusion workspace show ${name}` to get the Workspace configuration. If the `name` is not specified, the configuration of current Workspace will get returned. + +The example is shown as below. + +```shell +# show a specified workspace configuration +kusion workspace show dev + +# show the current workspace configuration +kusion workspace show +``` + +### Updating Workspace + +When the Workspace needs to update, use `kusion workspace update ${name} -f ${configuration_file_path}` to update with the new configuration file. The whole updated configuration is asked to provide, and the Workspace must be created. Get the Workspace configuration first, then refresh the configuration and execute the command, which are the recommended steps. If the `name` is not specified, the current Workspace will be used. + +Updating the `default` Workspace is allowed. And the flag `--current` is also supported to set it as the current. + +The example is shown as below. + +```shell +# update a specified workspace +kusion workspace update dev -f dev_new.yaml + +# update a specified workspace and set it as current +kusion workspace update dev -f dev_new.yaml --current + +# update the current workspace +kusion workspace update -f dev_new.yaml +``` + +### Deleting Workspace + +When a Workspace is not in use anymore, use `kusion workspace delete ${name}` to delete a Workspace. If the `name` is not specified, the current Workspace will get deleted, and the `default` Workspace will be set as the current Workspace. Therefore, deleting the `default` Workspace is not allowed. + +The example is shown as below. + +```shell +# delete a specified workspace +kusion workspace delete dev + +# delete the current workspace +kusion workspace delete +``` + +## Using Workspace + +Workspace is used in the command `kusion generate`, the following steps help smooth the operation process. + +1. Write the Workspace configuration file with the format shown above, and fulfill all the necessary fields; +2. Create the workspace with `kusion workspace create`, then Kusion perceives the Workspace. The flag `--current` can be used to set it as the current. +3. Execute `kusion generate` in a Stack to generate the whole Spec, the AppConfiguration and Workspace configuration get rendered automatically, and can be applied to the real infrastructure. If the appointed Workspace or Backend is asked, the flags `--workspace` and `--backend` will help achieve that. +4. If the Workspace needs to update, delete, switch, etc. Use the above commands to achieve that. diff --git a/docs_versioned_docs/version-v0.11/3-concepts/5-appconfiguration.md b/docs_versioned_docs/version-v0.11/3-concepts/5-appconfiguration.md new file mode 100644 index 00000000..8e40bd47 --- /dev/null +++ b/docs_versioned_docs/version-v0.11/3-concepts/5-appconfiguration.md @@ -0,0 +1,38 @@ +--- +id: app-configuration +sidebar_label: AppConfiguration +--- + +# AppConfiguration + +As a modern cloud-native application delivery toolchain, declarative intent-based actuation is the central idea of Kusion, and `AppConfiguration` model plays the role of describing the intent, which provides a simpler path for on-boarding developers to the platform without leaking low level details in runtime infrastructure and allows developers to fully focus on the application logic itself. + +The `AppConfiguration` model consolidates workload and their dependent accessories for the application deployment, along with any pipeline and operational requirements into one standardized, infrastructure-independent declarative specification. This declarative specification represents the intuitive user intent for the application, which drives a standardized and efficient application delivery and operation process in a hybrid environment. + +![appconfig.png](/img/docs/concept/appconfig.png) + +AppConfiguration consists of four core concepts, namely `Workload`, `Accessory`, `Pipeline`, and `Dependency`. We will walk through these concepts one by one. + +#### Workload + +Workload is a representation of the business logic that runs in the cluster. Common workload types include long-running services that should “never” go down and batch jobs that take from a few seconds to a few days to complete. A valid AppConfiguration instance must include at least one Workload, which is made of one or more containers, along with their configurations, such as the container image, environment variables, and resource requirements. + +In most cases, a Workload is a backend service or the frontend of an Application. For example, in a micro-service architecture, each service would be represented by a distinct Workload. This allows developers to manage and deploy their code in a more organized and efficient manner. + +#### Accessory + +Using the analogy of a car, workload is the core engine of application, but only having the engine isn’t enough for the application to function properly. In most cases there must be other supporting parts for the workload to operate as intended. For those supporting parts we call them Accessory. Accessory refers to various runtime capabilities and operational requirements provided by the underlying infrastructure, such as database, network load-balancer, storage and so on. + +From the perspective of team collaboration, the platform team should be responsible for creating and maintaining various accessory definitions, providing reusable building blocks out-of-the-box. Application developers just need to leverage the existing accessories to cover the evolving application needs. This helps software organizations achieve separation of concern, so that different roles can focus on the subject matter they are an expert of. + +#### Pipeline + +Running reliable applications requires reliable delivery pipelines. By default, Kusion provides a relatively fixed built-in application delivery pipeline, which should be sufficient for most use cases. However, as the application scale and complexity grows, so does the need for a customizable delivery pipeline. Developers wish for more fine-tuned control and customization over the workflow to delivery their applications. That’s why we introduced the Pipeline section in AppConfiguration model. + +A customized delivery pipeline is made of several steps, each corresponds to an operation that needs to be executed, such as running certain tests after a deployment, scanning artifacts for vulnerabilities prior to a deployment, and so on. Implementation-wise, the execution of each step should be carried out in the form of a plugin, developed and managed by the platform owners. + +#### Topologies + +Application dependencies refer to the external services or other software that an application relies on in order to function properly. These dependencies may be required in order to provide certain functionality or to use certain features in the application. + +Similar to declaring a dependency from an application to an accessory, AppConfiguration lets you declare the dependencies between different applications in the same way. diff --git a/docs_versioned_docs/version-v0.11/3-concepts/6-spec.md b/docs_versioned_docs/version-v0.11/3-concepts/6-spec.md new file mode 100644 index 00000000..4f920255 --- /dev/null +++ b/docs_versioned_docs/version-v0.11/3-concepts/6-spec.md @@ -0,0 +1,22 @@ +--- +id: spec +sidebar_label: Spec +--- + +# Spec + +The Spec represents the operational intentions that you aim to deliver using Kusion. These intentions are expected to contain all components throughout the DevOps lifecycle, including resources (workload, database, load balancer, etc.), dependencies, and policies. The Kusion module generators are responsible for converting all AppConfigurations and environment configurations into the Spec. Once the Spec is generated, the Kusion Engine takes charge of updating the actual infrastructures to match the Spec. + +## Purpose + +### Single Source of Truth + +In Kusion's workflow, the platform engineer builds Kusion modules and provides environment configurations, application developers choose Kusion modules they need and deploy operational intentions to an environment with related environment configurations. They can also input dynamic parameters like the container image when executing the `kusion generate` command. So the final operational intentions include configurations written by application developers, environment configurations and dynamic inputs. Due to this reason, we introduce **Spec** to represent the SSoT(Single Source of Truth) of Kusion. It is the result of `kusion generate` which contains all operational intentions from different sources. + +### Consistency + +Delivering an application to different environments with identical configurations is a common practice, especially for applications that require scalable distribution. In such cases, an immutable configuration package is helpful. By utilizing the Spec, all configurations and changes are stored in a single file. As the Spec is the input of Kusion, it ensures consistency across different environments whenever you execute Kusion with the same Spec file. + +### Rollback and Disaster Recovery + +The ability to roll back is crucial in reducing incident duration. Rolling back the system to a previously validated version is much faster compared to attempting to fix it during an outage. We regard a validated Spec as a snapshot of the system and recommend storing the Spec in a version control system like Git. This enables better change management practices and makes it simpler to roll back to previous versions if needed. In case of a failure or outage, having a validated Spec simplifies the rollback process, ensuring that the system can be quickly recovered. diff --git a/docs_versioned_docs/version-v0.11/3-concepts/7-backend.md b/docs_versioned_docs/version-v0.11/3-concepts/7-backend.md new file mode 100644 index 00000000..2538edc9 --- /dev/null +++ b/docs_versioned_docs/version-v0.11/3-concepts/7-backend.md @@ -0,0 +1,284 @@ +--- +id: backend +sidebar_label: Backend +--- + +# Backend + +Backend is Kusion's storage, which defines the place to store Workspace, Spec and State. By default, Kusion uses the `local` type of backend to store the state on the local disk. While in the scenario of team collaboration, the Workspace, Spec and State can be stored on a remote backend, such as `mysql`, `oss` and `s3`, to allow multiple users' access. + +The command `kusion config` is used to configure the backend configuration. Configuring a whole backend or an individual config item are both supported. For the sensitive data, the environment variables are supported, and with higher priority. + +Furthermore, Kusion provides the operation of setting current backend. Thus, the trouble of specifying backend can be saved when executing operation commands and managing `workspace`. + +## Available Backend Types + +There are four available backend types: `local`, `mysql`, `oss`, `s3`. + +### local + +The `local` type backend uses local file system as storage, which is suitable for local operations, but not ideal for multi-user collaboration. The supported config items are as below. + +- **path**: `type string`, `optional`, specify the directory to store the Workspace, Spec, and State files. The subdirectories `workspaces`, `specs` and `states` are used to store the corresponding files separately. It's recommended to use an empty or a Kusion exclusive directory as the local backend path. If not set, the default path `${KUSION_HOME}` is in use. + +The whole local type backend configuration is as below. + +```yaml +{ + "type": "local", + "configs": { + "path": "${local_path}" # type string, optional, the directory to store files. + } +} +``` + +### mysql + +The `mysql` type backend uses mysql database as storage. The supported config items are as below. + +- **dbName**: `type string`, `required`, the name of the database. +- **user**: `type string`, `required`, the username of the database. +- **password**: `type string`, `optional`, the password of the database, support declaring by environment variable `KUSION_BACKEND_MYSQL_PASSWORD`. +- **host** - `type string`, `required`, the access address for the database. +- **port** - `type int`, `optional`, the port of the database. If not set, the default value `3306` will be used. + +Please be attention, mysql type are not supported to store Spec for now. For Workspace and State, the table `worksapce` and `state` are used to store the corresponding content separately, whose structures are determinate. The table structures are shown below. + +Noted that there are not fields `id`, `gmt_create(created_at)`, `gmt_modified(updated_at)`, etc., which are usually automatically controlled by the database. Kusion does not use these fields, while the existence of them does not affect the normal operation of Kusion. And the length of the varchar can be changed according to the real scenario. + +```sql +-- table workspace +CREATE TABLE `workspace` ( + `workspace` varchar(127) NOT NULL COMMENT 'workspace name', + `content` longtext NOT NULL COMMENT 'workspace content, in JSON format', + `is_current` tinyint(1) DEFAULT NULL COMMENT 'specify is current workspace or not', + UNIQUE KEY `uk_workspace` (`name`), + KEY `idx_is_current` (`is_current`) +); + +-- table state +CREATE TABLE `state` ( + `project` varchar(127) NOT NULL COMMENT 'project name', + `stack` varchar(127) NOT NULL COMMENT 'stack name', + `workspace` varchar(127) NOT NULL COMMENT 'workspace name', + `content` longtext NOT NULL COMMENT 'state content, in JSON format', + UNIQUE KEY `uk_state` (`project`, `stack`, `worksapce`) +); +``` + +The whole mysql type backend configuration is as below. + +```yaml +{ + "type": "mysql", + "configs": { + "dbName": "${mysql_db_name}", # type string, required, the database name. + "user": "${mysql_user}", # type string, required, the database user. + "password": "${mysql_password}", # type string, optional, the database password. + "host": "${mysql_host}", # type string, required, the database host. + "port": "${mysql_port}" # type string, optional, the database port. If not set, use the default port 3306. + } +} +``` + +The supported environment variable is as below. + +```bash +export KUSION_BACKEND_MYSQL_PASSWORD="${mysql_password}" # configure password +``` + +### oss + +The `oss` type backend uses the Alicloud Object Storage Service (OSS) as storage. The supported config items are as below. + +- **endpoint**: `type string`, `required`, specify the access endpoint for alicloud oss bucket. +- **accessKeyID**: `type string`, `required`, specify the alicloud account accessKeyID, support declaring by environment variable `OSS_ACCESS_KEY_ID`. +- **accessKeySecret**: `type string`, `required`, specify the alicloud account accessKeySecret, support declaring by environment variable `OSS_ACCESS_KEY_SECRET`. +- **bucket**: `type string`, `required`, specify the name of the alicloud oss bucket. +- **prefix**: `type string`, `optional`, constitute the prefix to store the Workspace, Spec, and State files, whose prefixes are `${prefix}/workspaces`, `${prefix}/specs` and `${prefix}/states` respectively. Using prefix can create a "dedicated space" for the Kusion data, which is beneficial for the management and reuse of the bucket. If not set, there is no prefix, the files are stored in the root path of the bucket if analogy to a file system. + +Noted that `accessKeyID` and `accessKeySecret` are required for the whole configuration combined by the configuration managed by the command `kusion config` and the environment variables. For the `kusion config` alone, they are not obligatory. And for the safety reason, using environment variables is the recommended way. + +The whole oss type backend configuration is as below. + +```yaml +{ + "type": "oss", + "configs": { + "endpoint": "${oss_endpoint}", # type string, required, the oss endpoint. + "accessKeyID": "${oss_access_key_id}", # type string, ooptional for the command "kusion config", the oss access key id. + "accessKeySecret": "${oss_access_key_secret}", # type string, optional for the command "kusion config", the oss access key secret. + "bucket": "${oss_bucket}", # type string, required, the oss bucket. + "prefix": "${oss_prefix}" # type string, optional, the prefix to store the files. + } +} +``` + +The supported environment variables are as below. + +```bash +export OSS_ACCESS_KEY_ID="${oss-access-key-id}" # configure accessKeyID +export OSS_ACCESS_KEY_SECRET="${oss-access-key-secret}" # configure accessKeySecret +``` + +### s3 + +The `s3` type backend uses the AWS Simple Storage Service (S3) as storage. The supported config items are as below. + +- **region**: `type string`, `required`, specify the region of aws s3 bucket, support declaring by environment variable `AWS_DEFAULT_REGION` or `AWS_REGION`, where the latter has higher priority. +- **endpoint**: `type string`, `optional`, specify the access endpoint for aws s3 bucket. +- **accessKeyID**: `type string`, `required`, specify the aws account accessKeyID, support declaring by environment variable `AWS_ACCESS_KEY_ID`. +- **accessKeySecret**: `type string`, `required`, specify the aws account.accessKeySecret, support declaring by environment variable `AWS_SECRET_ACCESS_KEY`. +- **bucket**: `type string`, `required`, specify the name of the aws s3 bucket. +- **prefix**: `type string`, `optional`, constitute the prefix to store the Workspace, Spec, and State files, whose prefixes are `${prefix}/workspaces`, `${prefix}/specs` and `${prefix}/states` respectively. + +Noted that `region`, `accessKeyID` and `accessKeySecret` are optional for the `kusion config` command. + +The whole s3 type backend configuration is as below. + +```yaml +{ + "type": "s3", + "configs": { + "region": "${s3_region}", # type string, optional for the command "kusion config", the aws region. + "endpoint": "${s3_endpoint}", # type string, optional, the aws endpoint. + "accessKeyID": "${s3_access_key_id}", # type string, optional for the command "kusion config", the aws access key id. + "accessKeySecret": "${s3_access_key_secret}", # type string, optional for the command "kusion config", the aws access key secret. + "bucket": "${s3_bucket}", # type string, required, the s3 bucket. + "prefix": "${s3_prefix}" # type string, optional, the prefix to store the files. + } +} +``` + +The supported environment variables are as below. + +```bash +export AWS_DEFAULT_REGION="${s3_region}" # configure region, lower priority than AWS_REGION +export AWS_REGION="${s3_region}" # configure region, higher priority than AWS_DEFAULT_REGION +export AWS_ACCESS_KEY_ID="${s3_access_key_id}" # configure accessKeyID +export AWS_SECRET_ACCESS_KEY="${s3_access_key_secret}" # configure accessKeySecret +``` + + +## Setting a Backend + +When there is a new backend or the backend configuration needs to update, use the command `kusion config set ${key} ${value}` to set a backend. A backend is identified by a unique name, and its whole configuration is made up of the backend type and its corresponding config items. + +Be attention, do not confuse backend with backend type. For example, a backend named `s3_prod` uses `s3` as its storage, the `s3_prod` is the backend, while the `s3` is the backend type. + +There are four configuration modes: + +- setting a whole backend +- setting a backend type +- setting a whole set of backend config items +- setting a backend config item + +A unique backend name is required to do the configuration. Take `s3` type backend with name `s3_prod` for an example to explain how these modes work. + +### Setting a Whole Backend + +The key to configure a whole backend is `backends.${name}`, whose value must be the JSON marshal result in a specified format, which is determined by the backend type. Enclosing the value in single quotation marks is a good choice, which can keep the format correct. + +```shell +# set a whole backend +kusion config set backends.s3_prod '{"type":"s3","configs":{"bucket":"kusion"}}' +``` + +### Setting a Backend Type + +The key to set a backend type is `backends.${name}.type`, whose value must be `local`, `mysql`, `oss` or `s3`. + +```shell +# set a backend type +kusion config set backends.s3_prod.type s3 +``` + +### Setting a Whole Set of Backend Config Items + +The key to set a whole set of backend config items is `backends.${name}.configs`, whose value must be the JSON marshal result in a specified format, which is determined by the backend type. The backend config must be set after the backend type, and corresponds to the backend type. + +```shell +# set a whole backend config +kusion config set backends.s3_prod.configs '{"bucket":"kusion"}' +``` + +### Setting a Backend Config Item + +The key to set a backend config item is `backends.${name}.configs.${item}`. The item name and value type both depend on the backend type. Like the whole backend config, the config item must be valid and set after the backend type. + +```shell +# set a backend config item +kusion config set backends.s3_prod.configs.bucket kusion +``` + +When executing `kusion config set`, the configuration will be stored in a local file. For security reason, the environment variables are supported to configure some config items, such as `password`, `accessKeyID`, `accessKeySecret`. Using environment variables rather than `kusion config` set to set sensitive data is the best practice. If both configured, the environment variables have higher priority. For details about the supported environment variables, please see above. + +Kusion has a default backend with `local` type and the path is `$KUSION_HOME`, whose name is exactly `default`. The `default` backend is forbidden to modification, that is setting or unsetting the default backend is not allowed. Besides, the keyword `current` is also used by Kusion itself, please do not use it as the backend name. + +## Unsetting a Backend + +When a backend is not in use, or the configuration is out of date, use the command `kusion config unset ${key}` to unset a backend or a specified config item. Same as the setting, there are also four modes of unsetting. + +- unsetting a whole backend +- unsetting a backend type +- unsetting a whole set of backend config items +- unsetting a backend config item + +When unsetting a whole backend, the backend must not be the current backend. When unsetting the backend type, the config items must be empty and the backend not be the current. + +Unsetting the `default` backend is forbidden. + +## Setting the Current Backend + +In order not to specify backend for every operation command. Kusion provides the mechanism of setting current backend, then the current workspace will be use by default. This is very useful when you execute a series of Kusion operation commands, for they usually use the same backend. + +Use the command `kusion config set backends.current ${name}` to set the current backend, where the `name` must be the already set backend. + +```shell +# set the current workspace +kusion config set backends.current s3_prod +``` + +Setting the current backend to `default` is legal. Actually, if there is no backend related configuration, the current backend is the `default` backend. + +## Getting Backend Configuration + +Use the command `kusion config get ${key}` to get a whole backend configuration or a specified backend config item. The `key` is same as setting and unsetting operation, the whole list can be found in the [Configuration](configuration). + +```shell +# get a whole backend +kusion config get backends.s3_prod + +# get a specified config item +kusion config get backends.s3_prod.configs.bucekt +``` + +Besides, the command `kusion config list` can also be used, which returns the whole kusion configuration, while the backend configuration is included. + +```shell +# get the whole Kusion configuration +kusion config list +``` + +## Using Backend + +The backend is used to store Workspace, Spec, and State. Thus, the following commands use the backend, shown as below. + +- subcommands of `kusion workspace`: use to store the Workspace; +- `kusion generate`: use to store the Spec; +- `kusion preview`, `kusion apply`, `kusion destroy`: use to store the State; + +For all the commands above, the flag `--backend` is provided to specify the backend, or using the current backend. When using backend, you usually need to specify the sensitive data by environment variables. The example is shown below. + +```shell +# set environment variables of sensitive and other necessary data +export AWS_REGION="${s3_region}" +export AWS_ACCESS_KEY_ID="${s3_access_key_id}" +export AWS_SECRET_ACCESS_KEY="${s3_access_key_secret}" + +# use current backend +kusion apply + +# use a specified backend +kusion apply --backend s3_prod +``` diff --git a/docs_versioned_docs/version-v0.11/3-concepts/8-configuration.md b/docs_versioned_docs/version-v0.11/3-concepts/8-configuration.md new file mode 100644 index 00000000..aaebdadb --- /dev/null +++ b/docs_versioned_docs/version-v0.11/3-concepts/8-configuration.md @@ -0,0 +1,131 @@ +--- +id: configuration +sidebar_label: Configuration +--- + +# Configuration + +Kusion can be configured with some global settings, which are separate from the AppConfiguration written by the application developers and the workspace configurations written by the platform engineers. + +The configurations are only relevant to the Kusion itself, and can be managed by command `kusion config`. The configuration items are specified, which are in the hierarchical format with full stop for segmentation, such as `backends.current`. For now, only the backend configurations are included. + +The configuration is stored in the file `${KUSION_HOME}/config.yaml`. For sensitive data, such as password, access key id and secret, setting them in the configuration file is not recommended, using the corresponding environment variables is safer. + +## Configuration Management + +Kusion provides the command `kusion config`, and its sub-commands `get`, `list`, `set`, `unset` to manage the configuration. The usages are shown as below: + +### Get a Specified Configuration Item + +Use `kusion config get` to get the value of a specified configuration item, only the registered item can be obtained correctly. The example is as below. + +```shell +# get a configuration item +kusion config get backends.current +``` + +### List the Configuration Items + +Use `kusion config list` to list all the Kusion configurations, where the result is in the YAML format. The example is as below. + +```shell +# list all the Kusion configurations +kusion config list +``` + +### Set a Specified Configuration Item + +Use `kusion config set` to set the value of a specified configuration item, where the type of the value of is also determinate. Kusion supports `string`, `int`, `bool`, `array` and `map` as the value type, which should be conveyed in the following format through CLI. + +- `string`: the original format, such as `local-dev`, `oss-pre`; +- `int`: convert to string, such as `3306`, `80`; +- `bool`: convert to string, only support `true` and `false`; +- `array`: convert to string with JSON marshal, such as `'["mysql","oss"]'`. To preserve the format, enclosing the string content in single quotes is a good idea, or there may be unexpected errors; +- `map`: convert to string with JSON marshal, such as `'{"path":"\etc"}'`. + +Besides the type, some configuration items have more setting requirements. The configuration item dependency may exist, that is, a configuration item must be set after another item. And there may exist more restrictions for the configuration values themselves. For example, the valid keys for the map type value, the data range for the int type value. For detailed configuration item information, please refer to the following content of this article. + +The example of setting configuration item is as blow. + +```shell +# set a configuration item of type string +kusion config set backends.pre.type mysql + +# set a configuration item of type int +kusion config set backends.pre.configs.port 3306 + +# set a configuration item of type map +kusion config set backends.prod `{"configs":{"bucket":"kusion"},"type":"s3"}` +``` + +### Unset a Specified Configuration Item + +Use `kusion config unset` to unset a specified configuration item. Be attention, some items have dependencies, which must be unset in a correct order. The example is as below. + +```shell +# unset a specified configuration item +kusion config unset backends.pre +``` + +## Backend Configurations + +The backend configurations define the place to store Workspace, Spec and State files. Multiple backends and current backend are supported to set. + +### Available Configuration Items + +- **backends.current**: type `string`, the current used backend name. It can be set as the configured backend name. If not set, the default local backend will be used. +- **backends.${name}**: type `map`, a total backend configuration, contains type and config items, whose format is as below. It can be unset when the backend is not the current. +```yaml +{ + "type": "${backend_type}", # type string, required, support local, mysql, oss, s3. + "configs": ${backend_configs} # type map, optional for type local, required for the others, the specific keys depend on the type, refer to the description of backends.${name}.configs. +} +``` +- **backends.${name}.type**: type `string`, the backend type, support `local`, `mysql`, `s3` and `oss`. It can be unset when the backend is not the current, and the corresponding `backends.${name}.configs` are empty. +- **backends.${name}.configs**: type `map`, the backend config items, whose format depends on the backend type and is as below. It must be set after `backends.${name}.type`. +```yaml +# type local +{ + "path": "${local_path}" # type string, optional, the directory to store the files. If not set, use the default path ${KUSION_HOME}. +} + +# type mysql + { + "dbName": "${mysql_db_name}", # type string, required, the database name. + "user": "${mysql_user}", # type string, required, the database user. + "password": "${mysql_password}", # type string, optional, the database password, which can be also obtained by environment variable KUSION_BACKEND_MYSQL_PASSWORD. + "host": "${mysql_host}", # type string, required, the database host. + "port": "${mysql_port}" # type string, optional, the database port. If not set, use the default port 3306. + } + +# type oss + { + "endpoint": "${oss_endpoint}", # type string, required, the oss endpoint. + "accessKeyID": "${oss_access_key_id}", # type string, optional, the oss access key id, which can be also obtained by environment variable OSS_ACCESS_KEY_ID. + "accessKeySecret": "${oss_access_key_secret}", # type string, optional, the oss access key secret, which can be also obtained by environment variable OSS_ACCESS_KEY_SECRET + "bucket": "${oss_bucket}", # type string, required, the oss bucket. + "prefix": "${oss_prefix}" # type string, optional, the prefix to store the files. + } + + # type s3 + { + "region": "${s3_region}", # type string, optional, the aws region, which can be also obtained by environment variables AWS_REGION and AWS_DEFAULT_REGION. + "endpoint": "${s3_endpoint}", # type string, optional, the aws endpoint. + "accessKeyID": "${s3_access_key_id}", # type string, optional, the aws access key id, which can be also obtained by environment variable AWS_ACCESS_KEY_ID. + "accessKeySecret": "${s3_access_key_secret}", # type string, optional, the aws access key secret, which can be also obtained by environment variable AWS_SECRET_ACCESS_KEY + "bucket": "${s3_bucket}", # type string, required, the s3 bucket. + "prefix": "${s3_prefix}" # type string, optional, the prefix to store the files. + } +``` +- **backends.${name}.configs.path**: type `string`, the path of local type backend. It must be set after `backends.${name}.type` and which must be `local`. +- **backends.${name}.configs.dbName**: type `string`, the database name of mysql type backend. It must be set after `backends.${name}.type` and which must be `mysql`. +- **backends.${name}.configs.user**: type `string`, the database user of mysql type backend. It must be set after `backends.${name}.type` and which must be `mysql`. +- **backends.${name}.configs.password**: type `string`, the database password of mysql type backend. It must be set after `backends.${name}.type` and which must be `mysql`. It can be also obtained by environment variable `KUSION_BACKEND_MYSQL_PASSWORD`. +- **backends.${name}.configs.host**: type `string`, the database host of mysql type backend. It must be set after `backends.${name}.type` and which must be `mysql`. +- **backends.${name}.configs.port**: type `int`, the database port of mysql type backend. It must be set after `backends.${name}.type` and which must be `mysql`. If not set, the default value `3306` will be used. +- **backends.${name}.configs.endpoint**: type `string`, the endpoint of oss or s3 type backend. It must be set after `backends.${name}.type` and which must be `oss` or `s3`. +- **backends.${name}.configs.accessKeyID**: type `string`, the access key id of oss or s3 type backend. It must be set after `backends.${name}.type` and which must be `oss` or `s3`. For `oss`, it can be also obtained by environment variable `OSS_ACCESS_KEY_ID`; while for s3, it is `AWS_ACCESS_KEY_ID`. +- **backends.${name}.configs.accessKeySecret**: type `string`, the access key secret of oss or s3 type backend. It must be set after `backends.${name}.type` and which must be `oss` or `s3`. For `oss`, it can be also obtained by environment variable `OSS_ACCESS_KEY_SECRET`; while for s3, it is `AWS_SECRET_ACCESS_KEY`. +- **backends.${name}.configs.bucket**: type `string`, the bucket of oss or s3 type backend. It must be set after `backends.${name}.type` and which must be `oss` or `s3`. +- **backends.${name}.configs.prefix**: type `string`, the prefix to store the files of oss or s3 type backend. It must be set after `backends.${name}.type` and which must be `oss` or `s3`. +- **backends.${name}.configs.region**: type `string`, the aws region of s3 type backend. It must be set after `backends.${name}.type` and which must be `s3`. It can be also obtained by environment variables `AWS_REGION` and `AWS_DEFAULT_REGION`, where the former is priority. diff --git a/docs_versioned_docs/version-v0.11/3-concepts/9-how-kusion-works.md b/docs_versioned_docs/version-v0.11/3-concepts/9-how-kusion-works.md new file mode 100644 index 00000000..1f2970e3 --- /dev/null +++ b/docs_versioned_docs/version-v0.11/3-concepts/9-how-kusion-works.md @@ -0,0 +1,137 @@ +--- +id: how-kusion-works +sidebar_label: How Kusion Works? +--- + +# How Kusion Works? + +Kusion is the platform engineering engine of [KusionStack](https://github.com/KusionStack). It delivers intentions described with Kusion Modules defined in [Catalog](https://github.com/KusionStack/catalog) to Kubernetes, Clouds and On-Prem infrastructures. + +![arch](https://raw.githubusercontent.com/KusionStack/kusion/main/docs/workflow.png) + +## Overview + +The workflow of KusionStack is illustrated in the diagram above, and it consists of three steps. The first step is `Write`, where platform engineers provide Kusion Modules and application developers write AppConfigurations based on the Kusion Modules to describe their operational intent. + +The second step is the `Generate` process, which results in the creation of the SSoT (Single Source of Truth), also known as the [Spec](spec) of the current operational task. If you need version management of the SSoT, we recommend you manage the Spec with a VCS (Version Control System) tool like git. + +The third step is `Apply` which makes the Spec effective. Kusion parses the operational intent based on the Spec produced in the previous step. Before applying the intent, Kusion will execute the Preview command (you can also execute this command manually) which will use a three-way diff algorithm to preview changes and prompt users to make sure all changes meet expectations; the Apply command will then actualize the operational intent onto various infrastructure platforms. Currently, it supports three runtimes: Terraform, Kubernetes, and on-prem infrastructures. + +As a user of Kusion, if you prefer not to be conscious of so many steps, you can simply use `kusion apply`, and Kusion will automatically execute all the aforementioned steps for you. + +## Platform Developer’s Workflow + +### Design Kusion Modules + +[Kusion Module](kusion-module/overview) is a reusable building block designed by platform engineers and contains two components: an application developer-oriented schema and a Kusion module generator. When platform engineers have developed a Kusion module, they can push it to a [catalog](https://github.com/KusionStack/catalog) repository to make it into a KCL package. + +Given a database Kusion module as an example, the schema definition is shown below and the generator logic can be found [here](https://github.com/KusionStack/catalog/tree/main/modules/mysql/src). + +```python +schema MySQL: + """ MySQL describes the attributes to locally deploy or create a cloud provider + managed mysql database instance for the workload. + + Attributes + ---------- + type: "local" | "cloud", defaults to Undefined, required. + Type defines whether the mysql database is deployed locally or provided by + cloud vendor. + version: str, defaults to Undefined, required. + Version defines the mysql version to use. + + Examples + -------- + Instantiate a local mysql database with version of 5.7. + + import catalog.models.schema.v1.accessories.mysql + + accessories: { + "mysql": mysql.MySQL { + type: "local" + version: "8.0" + } + } + """ + + # The deployment mode of the mysql database. + type: "local" | "cloud" + + # The mysql database version to use. + version: str + +``` + +### Instantiate and Set Up Workspaces + +Each [workspace](workspace) includes a corresponding Platform config file maintained by platform engineers. +Platform engineers should instantiate all workspaces and fulfill all fields with platform default values. Kusion will merge the workspace configuration with AppConfiguration in the Stack of the same name. An example is as follows. + +```yaml +# MySQL configurations for AWS RDS +modules: + kusionstack/mysql@0.1.0: + default: + cloud: aws + size: 20 + instanceType: db.t3.micro + securityIPs: + - 0.0.0.0/0 + suffix: "-mysql" +``` + +The `mysql` block represents a Kusion module. The fields inside are parts of the inputs for the Kusion module generator. For more details about the workspace, please refer to the [workspace](workspace) section. + +## Application Developer’s Workflow + +### Instantiate AppConfiguration and Apply + +Application developers choose Kusion modules they need and instantiate them in the AppConfiguration to describe their operation intentions. We have built some built-in Kusion modules in the repository [Catalog](https://github.com/KusionStack/catalog) and we warmly welcome you to join us in building this ecosystem together. + +`main.k` is the **only** configuration maintained by application developers and schemas in this file are defined from the application developer's perspective to reduce their cognitive load. An example is as follows. + +```pthyon +import kam.v1.app_configuration as ac +import kam.v1.workload as wl +import kam.v1.workload.container as c +import network as n +import mysql + +wordpress: ac.AppConfiguration { +    workload: wl.Service { +        containers: { +            wordpress: c.Container { +                image: "wordpress:6.3" +                env: { +                    "WORDPRESS_DB_HOST": "$(KUSION_DB_HOST_WORDPRESS_MYSQL)" +                    "WORDPRESS_DB_USER": "$(KUSION_DB_USERNAME_WORDPRESS_MYSQL)" +                    "WORDPRESS_DB_PASSWORD": "$(KUSION_DB_PASSWORD_WORDPRESS_MYSQL)" +                    "WORDPRESS_DB_NAME": "mysql" +                } +                resources: { +                    "cpu": "500m" +                    "memory": "512Mi" +                } +            } +        } +        replicas: 1 +    } +    accessories: { +        "network": n.Network { +            ports: [ +              n.Port { +                  port: 80 +              } +            ] +        } +        "mysql": mysql.MySQL { +            type: "cloud" +            version: "8.0" +        } +    } +} +``` + +`workload` and `database` are both Kusion modules provided by platform engineers and Kusion will convert them into actual infrastructure API calls eventually. + +Finally, application developers can deliver their operational intent to infrastructures with one command `kusion apply`. diff --git a/docs_versioned_docs/version-v0.11/3-concepts/_category_.json b/docs_versioned_docs/version-v0.11/3-concepts/_category_.json new file mode 100644 index 00000000..bccddbf1 --- /dev/null +++ b/docs_versioned_docs/version-v0.11/3-concepts/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Concepts" +} diff --git a/docs_versioned_docs/version-v0.11/4-configuration-walkthrough/1-overview.md b/docs_versioned_docs/version-v0.11/4-configuration-walkthrough/1-overview.md new file mode 100644 index 00000000..dfd3064b --- /dev/null +++ b/docs_versioned_docs/version-v0.11/4-configuration-walkthrough/1-overview.md @@ -0,0 +1,225 @@ +--- +id: overview +--- + +# Configuration File Overview + +Kusion consumes one or more declarative configuration files (written in KCL) that describe the application, and delivers intent to the target runtime including Kubernetes, clouds, or on-prem infrastructure. + +This documentation series walks you through the odds and ends of managing such configuration files. + +## Table of Content + +- [Configuration File Overview](#configuration-file-overview) + - [Table of Content](#table-of-content) + - [Directory Structure](#directory-structure) + - [AppConfiguration Model](#appconfiguration-model) + - [Authoring Configuration Files](#authoring-configuration-files) + - [Identifying KCL file](#identifying-kcl-file) + - [KCL Schemas and KAM](#kcl-schemas-and-kam) + - [Kusion Modules](#kusion-modules) + - [Import Statements](#import-statements) + - [Understanding kcl.mod](#understanding-kclmod) + - [Building Blocks](#building-blocks) + - [Instantiating an application](#instantiating-an-application) + - [Using `kusion init`](#using-kusion-init) + - [Using references](#using-references) + +## Directory Structure + +Kusion expects the configuration file to be placed in a certain directory structure because it might need some metadata (that is not stored in the application configuration itself) in order to proceed. + +:::info + +See [Project](../concepts/project/overview) and [Stack](../concepts/stack/overview) for more details about Project and Stack. +::: + +A sample multi-stack directory structure looks like the following: +``` +~/playground$ tree multi-stack-project/ +multi-stack-project/ +├── README.md +├── base +│   └── base.k +├── dev +│   ├── kcl.mod +│   ├── main.k +│   └── stack.yaml +├── prod +│   ├── kcl.mod +│   ├── main.k +│   └── stack.yaml +└── project.yaml +``` + +In general, the directory structure follows a hierarchy where the top-level is the project configurations, and the sub-directories represent stack-level configurations. + +You may notice there is a `base` directory besides all the stacks. The `base` directory is not mandatory, but rather a place to store common configurations between different stacks. A common pattern we observed is to use stacks to represent different stages (dev, stage, prod, etc.) in the software development lifecycle, and/or different deployment targets (azure-eastus, aws-us-east-1, etc). A project can have as many stacks as needed. + +In practice, the applications deployed into dev and prod might very likely end up with a similar set of configurations except a few fields such as the application image (dev might be on newer versions), resource requirements (prod might require more resources), etc. + +As a general best practice, we recommend managing the common configurations in `base.k` as much as possible to minimize duplicate code. We will cover how override works in [Base and Override](base-override). + +## AppConfiguration Model + +`AppConfiguration` is the out-of-the-box model we build that describes an application. It serves as the declarative intent for a given application. + +The schema for `AppConfiguration` is defined in the [KusionStack/kam](https://github.com/KusionStack/kam/blob/main/v1/app_configuration.k) repository. It is designed as a unified, application-centric model that encapsulates the comprehensive configuration details and in the meantime, hides the complexity of the infrastructure as much as possible. + +`AppConfiguration` consists of multiple sub-components that each represent either the application workload itself, its dependencies (in the form of [Kusion Modules](../concepts/kusion-module/overview)), relevant workflows or operational expectations. We will deep dive into the details on how to author each of these elements in this upcoming documentation series. + +For more details on the `AppConfiguration`, please refer to the [design documentation](../concepts/app-configuration). + +## Authoring Configuration Files + +[KCL](https://kcl-lang.io/) is the choice of configuration language consumed by Kusion. KCL is an open-source constraint-based record and functional language. KCL works well with a large number of complex configurations via modern programming language technology and practice, and is committed to provide better modularity, scalability, stability and extensibility. + +### Identifying KCL file + +KCL files are identified with `.k` suffix in the filename. + +### KCL Schemas and KAM + +Similar to most modern General Programming Languages (GPLs), KCL provide packages that are used to organize collections of related KCL source files into modular and re-usable units. + +In the context of Kusion, we abstracted a core set of KCL Schemas (such as the aforementioned `AppConfiguration`, `Workload`, `Container`, etc)that represent the concepts that we believe that are relatively universal and developer-friendly, also known as [Kusion Application Model](https://github.com/KusionStack/kam), or KAM. + +### Kusion Modules + +To extend the capabilities beyond the core KAM model, we use a concept known as [Kusion Modules](../concepts/kusion-module/overview) to define components that could best abstract the capabilities during an application delivery. We provide a collection of official out-of-the-box Kusion Modules that represents the most common capabilities. They are maintained in [KusionStack's GitHub container registry](https://github.com/orgs/KusionStack/packages). When authoring an application configuration file, you can simply declare said Kusion Modules as dependencies and import them to declare ship-time capabilities that the application requires. + +If the modules in the KusionStack container registry does not meet the needs of your applications, Kusion provides the necessary mechanisms to extend with custom-built Kusion Modules. You can always create and publish your own module, then import the new module in your application configuration written in KCL. + +For the steps to develop your own module, please refer to the Module developer guide. + +### Import Statements + +An example of the import looks like the following: +``` +### import from the official kam package +import kam.v1.app_configuration as ac +import kam.v1.workload as wl +import kam.v1.workload.container as c + +### import kusion modules +import monitoring as m +import network.network as n +``` + +Take `import kam.v1.workload as wl` as an example, the `.v1.workload` part after `import kam` represents the relative path of a specific schema to import. In this case, the `workload` schema is defined under `v1/workload` directory in the `kam` package. + +### Understanding kcl.mod + +Much similar to the concept of `go.mod`, Kusion uses `kcl.mod` as the source of truth to manage metadata (such as package name, dependencies, etc.) for the current package. Kusion will also auto-generate a `kcl.mod.lock` as the dependency lock file. + +The most common usage for `kcl.mod` is to manage the dependency of your application configuration file. + +:::info + +Please note this `kcl.mod` will be automatically generated if you are using `kusion init` to initialize a project with a template. You will only need to modify this file if you are modifying the project metadata outside the initialization process, such as upgrading the dependency version or adding a new dependency altogether, etc. +:::info + +There are 3 sections in a `kcl.mod` file: +- `package`, representing the metadata for the current package. +- `dependencies`, describing the packages the current package depends on. Supports referencing either a git repository or an OCI artifact. +- `profile`, defining the behavior for Kusion. In the example below, it describes the list of files Kusion should look for when parsing the application configuration. + +An example of `kcl.mod`: +``` +[package] +name = "multi-stack-project" +edition = "0.5.0" +version = "0.1.0" + +[dependencies] +monitoring = { oci = "oci://ghcr.io/kusionstack/monitoring", tag = "0.1.0" } +kam = { git = "https://github.com/KusionStack/kam.git", tag = "0.1.0" } +# Uncomment the line below to use your own modified module +# my-module = { oci = "oci://ghcr.io/my-repository/my-package", tag = "my-version" } + +[profile] +entries = ["../base/base.k", "main.k"] +``` + +### Building Blocks + +Configuration files consist of building blocks that are made of instances of schemas. An `AppConfiguration` instance consists of several child schemas, most of which are optional. The only mandatory one is the `workload` instance. We will take a closer look in the [workload walkthrough](workload). The order of the building blocks does NOT matter. + +The major building blocks as of version `0.11.0`: +``` +myapp: ac.AppConfiguration { + workload: wl.Service { + containers: { + "myapp": c.Container {} + ... + } + secrets: {} + ... + } + # optional dependencies, usually expressed in kusion modules + accessories: { + ... + } + ... +} +``` + +We will deep dive into each one of the building blocks in this documentation series. + +### Instantiating an application + +In Kusion's out-of-the-box experience, an application is identified with an instance of `AppConfiguration`. You may have more than one application in the same project or stack. + +Here's an example of a configuration that can be consumed by Kusion (assuming it is placed inside the proper directory structure that includes project and stack configurations, with a `kcl.mod` present): + +``` +import kam.v1.app_configuration as ac +import kam.v1.workload as wl +import kam.v1.workload.container as c +import network.network as n + +gocity: ac.AppConfiguration { + workload: wl.Service { + containers: { + "gocity": c.Container { + image = "howieyuen/gocity:latest" + resources: { + "cpu": "500m" + "memory": "512Mi" + } + } + } + replicas: 1 + } + accessories: { + "network": n.Network { + ports: [ + n.Port { + port: 80 + public: True + } + ] + } + } +} +``` + +Don't worry about what `workload` or `n.Network` stand for at the moment. We will deep dive into each one of them in this upcoming documentation series. + +### Using `kusion init` + +Kusion offers a `kusion init` sub-command which initializes a new project using some pre-built templates, which saves you from the hassle of manually building the aforementioned directory structure that Kusion expects. + +There is a built-in template `single-stack-sample` in the Kusion binary that can be used offline. + +We also maintain a [kusion-templates repository](https://github.com/KusionStack/kusion-templates) that hosts a list of more comprehensive project scaffolds. You can access them via `kusion init --online` command which requires connectivity to `github.com`. + +The pre-built templates are meant to help you get off the ground quickly with some simple out-of-the-box examples. You can refer to the [QuickStart documentation](../getting-started/deliver-quickstart) for some step-by-step tutorials. + +### Using references + +The reference documentation for the `kam` package and the official Kusion Modules is located in [Reference](../reference/modules/developer-schemas/app-configuration). + +If you are using them out of the box, the reference documentation provides a comprehensive view for each schema involved, including all the attribute names and description, their types, default value if any, and whether a particular attribute is required or not. There will also be an example attached to each schema reference. + +We will also deep dive into some common examples in the upcoming sections. \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.11/4-configuration-walkthrough/2-kcl-basics.md b/docs_versioned_docs/version-v0.11/4-configuration-walkthrough/2-kcl-basics.md new file mode 100644 index 00000000..996527f7 --- /dev/null +++ b/docs_versioned_docs/version-v0.11/4-configuration-walkthrough/2-kcl-basics.md @@ -0,0 +1,144 @@ +--- +id: kcl-basics +--- + +# KCL Basics + +## Table of Content +- [Variable assignments](#variable-assignments) +- [Common built-in types](#common-built-in-types) +- [Lists and maps](#lists-and-maps) +- [Conditional statements](#conditional-statements) +- [The : and = operator](#the--and--operator) +- [Advanced KCL capabilities](#advanced-kcl-capabilities) + +[KCL](https://kcl-lang.io/) is the choice of configuration language consumed by Kusion. KCL is an open source constraint-based record and functional language. KCL works well with a large number of complex configurations via modern programming language technology and practice, and is committed to provide better modularity, scalability, stability and extensibility. + +## Variable assignments + +There are two ways to initialize a variable in KCL. You can either use the `:` operator or the `=` operator. We will discuss the difference between them in [this section later](#the--and--operator). + +Here are the two ways to create a variable and initialize it: +``` +foo = "Foo" # Declare a variable named `foo` and its value is a string literal "Foo" +bar: "Bar" # Declare a variable named `bar` and its value is a string literal "Bar" +``` + +You will be able to override a variable assignment via the `=` operator. We will discuss this in depth in the [`:` and `=` operator section](#the--and--operator). + +## Common built-in types + +KCL supports `int`, `float`, `bool` and `string` as the built-in types. + +Other types are defined in the packages that are imported into the application configuration files. One such example would be the `AppConfiguration` object (or `Container`, `Probe`, `Port` object, etc) that are defined in the `kam` repository. + +## Lists and maps + +Lists are represented using the `[]` notation. +An example of lists: +``` +list0 = [1, 2, 3] +list1 = [4, 5, 6] +joined_list = list0 + list1 # [1, 2, 3, 4, 5, 6] +``` + +Maps are represented using the `{}` notation. +An example of maps: +``` +a = {"one" = 1, "two" = 2, "three" = 3} +b = {'one' = 1, 'two' = 2, 'three' = 3} +assert a == b # True +assert len(a) == 3 # True +``` + +## Conditional statements +You can also use basic control flow statements when writing the configuration file. + +An example that sets the value of `replicas` conditionally based on the value of `containers.myapp.resources.cpu`: +``` +import kam.v1.app_configuration as ac +import kam.v1.workload as wl +import kam.v1.workload.container as c + +myapp: ac.AppConfiguration { + workload: wl.Service { + containers: { + "myapp": c.Container { + image: "" + resources: { + "cpu": "500m" + "memory": "512Mi" + } + } + } + replicas: 1 if containers.myapp.resources.cpu == "500m" else 2 + } +} +``` + +For more details on KCL's control flow statements, please refer to the [KCL documentation](https://kcl-lang.io/docs/reference/lang/tour#control-flow-statements). + +## The `:` and `=` operator + +You might have noticed there is a mixed usage of the `:` and `=` in the samples above. + +:::info + +**TLDR: The recommendation is to use `:` in the common configurations, and `=` for override in the environment-specific configurations.** +::: + +In KCL: +- `:` represents a union-ed value assignment. In the pattern `identifier: E` or `identifier: T E`, the value of the expression `E` with optional type annotation `T` will be merged and union-ed into the element value. +- `=` represents a value override. In the pattern `identifier = E` or `identifier = T E`, The value of the expression `E` with optional type annotation `T` will override the `identifier` attribute value. + +Let's take a look at an example: +``` +# This is one configuration that will be merged. +config: Config { + data.d1 = 1 +} +# This is another configuration that will be merged. +config: Config { + data.d2 = 2 +} +``` + +The above is equivalent to the snippet below since the two expressions for `config` get merged/union-ed into one: +``` +config: Config { + data.d1 = 1 + data.d2 = 1 +} +``` + +whereas using the `=` operators will result in a different outcome: +``` +# This is first configuration. +config = Config { + data.d1 = 1 +} +# This is second configuration that will override the prior one. +config = Config { + data.d2 = 2 +} +``` + +The config above results in: +``` +config: Config { + data.d2 = 2 +} +``` + +Please note that the `:` attribute operator represents an idempotent merge operation, and an error will be thrown when the values that need to be merged conflict with each other. + +``` +data0 = {id: 1} | {id: 2} # Error:conflicting values between {'id': 2} and {'id': 1} +data1 = {id: 1} | {id = 2} # Ok, the value of `data` is {"id": 2} +``` + +More about `:` and `=` operator can be found in the [KCL documentation](https://kcl-lang.io/docs/reference/lang/tour#config-operations). + +## Advanced KCL capabilities + +For more advanced KCL capabilities, please visit the [KCL website](https://kcl-lang.io/docs/user_docs/support/faq-kcl). \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.11/4-configuration-walkthrough/3-base-override.md b/docs_versioned_docs/version-v0.11/4-configuration-walkthrough/3-base-override.md new file mode 100644 index 00000000..42a70c3b --- /dev/null +++ b/docs_versioned_docs/version-v0.11/4-configuration-walkthrough/3-base-override.md @@ -0,0 +1,92 @@ +--- +id: base-override +--- + +# Base and Override + +In practice, what we have observed for production-grade applications is that they usually need to be deployed to a wide range of different targets, be it different environments in the SDLC, or different clouds, regions or runtimes for cost/regulation/performance or disaster recovery related reasons. + +In that context, we advocate for a pattern where you can leverage some Kusion and KCL features to minimize the amount of duplicate configurations, by separating the common base application configuration and environment-specific ones. + +:::info + +The file names in the below examples don't matter as long as they are called out and appear in the correct order in the `entries` field (the field is a list) in `kcl.mod`. The files with common configurations should appear first in the list and stack-specific ones last. The latter one takes precedence. + +The configurations also don't have be placed into a single `.k` file. For complex projects, they can be broken down into smaller organized `.k` files for better readability. +::: + +Base configuration defined in `base/base.k`: +``` +import kam.v1.app_configuration as ac +import kam.v1.workload as wl +import kam.v1.workload.container as c +import network.network as n + +myapp: ac.AppConfiguration { + workload: wl.Service { + containers: { + "myapp": c.Container { + image: "" + resources: { + "cpu": "500m" + "memory": "512Mi" + } + } + } + replicas: 1 + } + accessories: { + "network": n.Network { + ports: [ + n.Port { + port: 80 + public: True + } + ] + } + } +} +``` + +Environment-specific configuration defined in `dev/main.k`: +``` +import kam.v1.app_configuration as ac + +# main.k declares customized configurations for dev stack. +myapp: ac.AppConfiguration { + workload: wl.Service { + containers: { + "myapp": c.Container { + # dev stack has different app configuration from the base + image = "gcr.io/google-samples/gb-frontend:v5" + resources = { + "cpu": "250m" + "memory": "256Mi" + } + } + } + replicas = 2 + } +} +``` + +Alternatively, you could locate a specific property (in this case below, the `Container` object) in the `AppConfiguration` object using the dot selector shorthand(such as `workload.containers.myapp` or `workload.replicas` below): +``` +import kam.v1.app_configuration as ac + +# main.k declares customized configurations for dev stack. +myapp: ac.AppConfiguration { + workload.replicas = 2 + workload.containers.myapp: { + # dev stack has different app configuration + image = "gcr.io/google-samples/gb-frontend:v5" + resources = { + "cpu": "250m" + "memory": "256Mi" + } + } +} +``` +This is especially useful when the application configuration is complex but the override is relatively straightforward. + +The two examples above are equivalent when overriding the base. \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.11/4-configuration-walkthrough/4-workload.md b/docs_versioned_docs/version-v0.11/4-configuration-walkthrough/4-workload.md new file mode 100644 index 00000000..605a4e32 --- /dev/null +++ b/docs_versioned_docs/version-v0.11/4-configuration-walkthrough/4-workload.md @@ -0,0 +1,325 @@ +# Workload + +The `workload` attribute in the `AppConfiguration` instance is used to describe the specification for the application workload. The application workload generally represents the computing component for the application. + +A `workload` maps to an `AppConfiguration` instance 1:1. If there are more than one workload, they should be considered different applications. + +## Table of Content +- [Import](#import) +- [Types of workloads](#types-of-workloads) +- [Configure containers](#configure-containers) + - [Application image](#application-image) + - [Resource Requirements](#resource-requirements) + - [Health Probes](#health-probes) + - [Lifecycle Hooks](#lifecycle-hooks) + - [Create Files](#create-files) + - [Customize container initialization](#customize-container-initialization) +- [Configure Replicas](#configure-replicas) +- [Differences between Service and Job](#differences-between-service-and-job) +- [Workload References](#workload-references) + +## Import + +In the examples below, we are using schemas defined in the `catalog` package. For more details on KCL package import, please refer to the [Configuration File Overview](overview). + +The `import` statements needed for the following walkthrough: +``` +import kam.v1.app_configuration as ac +import kam.v1.workload as wl +import kam.v1.workload.container as c +import kam.v1.workload.container.probe as p +import kam.v1.workload.container.lifecycle as lc +``` + +## Types of Workloads + +There are currently two types of workloads: + +- `Service`, representing a long-running, scalable workload type that should "never" go down and respond to short-lived latency-sensitive requests. This workload type is commonly used for web applications and services that expose APIs. +- `Job`, representing batch tasks that take from a few seconds to days to complete and then stop. These are commonly used for batch processing that is less sensitive to short-term performance fluctuations. + +To instantiate a `Service`: +``` +myapp: ac.AppConfiguration { + workload: wl.Service {} +} +``` + +To instantiate a `Job`: +``` +myapp: ac.AppConfiguration { + workload: wl.Job {} +} +``` + +Of course, the `AppConfiguration` instances above is not sufficient to describe an application. We still need to provide more details in the `workload` section. + +## Configure containers + +Kusion is built on top of cloud-native philosophies. One of which is that applications should run as loosely coupled microservices on abstract and self-contained software units, such as containers. + +The `containers` attribute in a workload instance is used to define the behavior for the containers that run application workload. The `containers` attribute is a map, from the name of the container to the `catalog.models.schema.v1.workload.container.Container` Object which includes the container configurations. + +:::info + +The name of the container is in the context of the configuration file, so you could refer to it later. It's not referring to the name of the container in the Kubernetes cluster (or any other runtime). +::: + +Everything defined in the `containers` attribute is considered an application container, as opposed to a sidecar container. Sidecar containers will be introduced in a different attribute in a future version. + +In most of the cases, only one application container is needed. Ideally, we recommend mapping an `AppConfiguration` instance to a microservice in the microservice terminology. + +We will walk through the details of configuring a container using an example of the `Service` type. + +To add an application container: +``` +myapp: ac.AppConfiguration { + workload: wl.Service { + containers: { + "myapp": c.Container {} + } + } +} +``` + +### Application image + +The `image` attribute in the `Container` schema specifies the application image to run. This is the only required field in the `Container` schema. + +To specify an application image: +``` +myapp: ac.AppConfiguration { + workload: wl.Service { + containers: { + "myapp": c.Container { + image: "gcr.io/google-samples/gb-frontend:v5" + } + # ... + } + } +} +``` + +### Resource Requirements + +The `resources` attribute in the `Container` schema specifies the application resource requirements such as cpu and memory. + +You can specify an upper limit (which maps to resource limits only) or a range as the resource requirements (which maps to resource requests and limits in Kubernetes). + +To specify an upper bound (only resource limits): +``` +myapp: ac.AppConfiguration { + workload: wl.Service { + containers: { + "myapp": c.Container { + image: "gcr.io/google-samples/gb-frontend:v5" + resources: { + "cpu": "500m" + "memory": "512Mi" + } + # ... + } + } + } +} +``` + +To specify a range (both resource requests and limits): +``` +myapp: ac.AppConfiguration { + workload: wl.Service { + containers: { + "myapp": c.Container { + image: "gcr.io/google-samples/gb-frontend:v5" + # Sets requests to cpu=250m and memory=256Mi + # Sets limits to cpu=500m and memory=512Mi + resources: { + "cpu": "250m-500m" + "memory": "256Mi-512Mi" + } + # ... + } + } + } +} +``` + +### Health Probes + +There are three types of `Probe` defined in a `Container`: + +- `livenessProbe` - used to determine if the container is healthy and running +- `readinessProbe` - used to determine if the container is ready to accept traffic +- `startupProbe` - used to determine if the container has started properly. Liveness and readiness probes don't start until `startupProbe` succeeds. Commonly used for containers that takes a while to start + +The probes are optional. You can only have one Probe of each kind for a given `Container`. + +To configure a `Http` type `readinessProbe` that probes the health via HTTP request and a `Exec` type `livenessProbe` which executes a command: +``` +myapp: ac.AppConfiguration { + workload: wl.Service { + containers: { + "myapp": c.Container { + image: "gcr.io/google-samples/gb-frontend:v5" + # ... + # Configure an Http type readiness probe at /healthz + readinessProbe: p.Probe { + probeHandler: p.Http { + url: "/healthz" + } + initialDelaySeconds: 10 + timeoutSeconds: 5 + periodSeconds: 15 + successThreshold: 3 + failureThreshold: 1 + } + # Configure an Exec type liveness probe that executes probe.sh + livenessProbe: p.Probe { + probeHandler: p.Exec { + command: ["probe.sh"] + } + initialDelaySeconds: 10 + } + } + } + } +} +``` + +### Lifecycle Hooks + +You can also configure lifecycle hooks that triggers in response to container lifecycle events such as liveness/startup probe failure, preemption, resource contention, etc. + +There are two types that is currently supported: + +- `PreStop` - triggers before the container is terminated. +- `PostStart` - triggers after the container is initialized. + +``` +myapp: ac.AppConfiguration { + workload: wl.Service { + containers: { + "myapp": c.Container { + image: "gcr.io/google-samples/gb-frontend:v5" + # ... + # Configure lifecycle hooks + lifecycle: lc.Lifecycle { + # Configures an Exec type pre-stop hook that executes preStop.sh + preStop: p.Exec { + command: ["preStop.sh"] + } + # Configures an Http type pre-stop hook at /post-start + postStart: p.Http { + url: "/post-start" + } + } + } + } + } +} +``` + +### Create Files + +You can also create files on-demand during the container initialization. + +To create a custom file and mount it to `/home/admin/my-file` when the container starts: +``` +myapp: ac.AppConfiguration { + workload: wl.Service { + containers: { + "myapp": c.Container { + image: "gcr.io/google-samples/gb-frontend:v5" + } + # ... + # Creates a file during container startup + files: { + "/home/admin/my-file": c.FileSpec { + content: "some file contents" + mode: "0777" + } + } + } + } +} +``` + +### Customize container initialization + +You can also customize the container entrypoint via `command`, `args`, and `workingDir`. These should **most likely not be required**. In most of the cases, the entrypoint details should be baked into the application image itself. + +To customize the container entrypoint: +``` +myapp: ac.AppConfiguration { + workload: wl.Service { + containers: { + "myapp": c.Container { + image: "gcr.io/google-samples/gb-frontend:v5" + # ... + # This command will overwrite the entrypoint set in the image Dockerfile + command: ["/usr/local/bin/my-init-script.sh"] + # Extra arguments append to command defined above + args: [ + "--log-dir=/home/my-app/logs" + "--timeout=60s" + ] + # Run the command as defined above, in the directory "/tmp" + workingDir: "/tmp" + } + } + } +} +``` + +## Configure Replicas + +The `replicas` field in the `workload` instance describes the number of identical copies to run at the same time. It is generally recommended to have multiple replicas in production environments to eliminate any single point of failure. In Kubernetes, this corresponds to the `spec.replicas` field in the relevant workload manifests. + +To configure a workload to have a replica count of 3: +``` +myapp: ac.AppConfiguration { + workload: wl.Service { + containers: { + # ... + } + replicas: 3 + # ... + } + # ... +} +``` + +## Differences between Service and Job + +The two types of workloads, namely `Service` and `Job`, share a majority of the attributes with some minor differences. + +### Exposure + +A `Service` usually represents a long-running, scalable workload that responds to short-lived latency-sensitive requests and never go down. Hence, a `Service` has an additional attribute that determines how it is exposed and can be accessed. A `Job` does NOT have the option to be exposed. We will explore more in the [application networking walkthrough](networking). + +### Job Schedule + +A `Job` can be configured to run in a recurring manner. In this case, the job will have a cron-format schedule that represents its recurring schedule. + +To configure a job to run at 21:00 every night: +``` +myjob: ac.AppConfiguration { + workload: wl.Job { + containers: { + "busybox": c.Container { + image: "busybox:1.28" + # Run the following command as defined + command: ["/bin/sh", "-c", "echo hello"] + } + } + # Run every hour. + schedule: "0 * * * *" + } +} +``` + +## Workload References + +You can find workload references [here](../reference/modules/developer-schemas/workload/service). + +You can find workload schema source [here](https://github.com/KusionStack/catalog/tree/main/models/schema/v1/workload). \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.11/4-configuration-walkthrough/5-networking.md b/docs_versioned_docs/version-v0.11/4-configuration-walkthrough/5-networking.md new file mode 100644 index 00000000..b7d43401 --- /dev/null +++ b/docs_versioned_docs/version-v0.11/4-configuration-walkthrough/5-networking.md @@ -0,0 +1,148 @@ +--- +id: networking +--- + +# Application Networking + +In addition to configuring application's [container specifications](workload#configure-containers), you can also configure its networking behaviors, including how to expose the application and how it can be accessed. You can specify a `network` module in the `accessories` field in `AppConfiguration` to achieve that. + +In future versions, this will also include ingress-based routing strategy and DNS configurations. + +## Import + +In the examples below, we are using schemas defined in the `kam` package and the `network` Kusion Module. For more details on KCL package and module import, please refer to the [Configuration File Overview](overview). + +The `import` statements needed for the following walkthrough: +``` +import kam.v1.app_configuration as ac +import kam.v1.workload as wl +import network.network as n +``` + +The `kcl.mod` must contain reference to the network module: +``` +#... + +[dependencies] +network = { oci = "oci://ghcr.io/kusionstack/network", tag = "0.1.0" } + +#... +``` + +## Private vs Public Access + +Private network access means the service can only be access from within the target cluster. + +Public access is implemented using public load balancers on the cloud. This generally requires a Kubernetes cluster that is running on the cloud with a vendor-specific service controller. + +Any ports defined default to private access unless explicitly specified. + +To expose port 80 to be accessed privately: +``` +myapp: ac.AppConfiguration { + workload: wl.Service { + # ... + } + accessories: { + "network": n.Network { + ports: [ + n.Port { + port: 80 + } + ] + } + } +} +``` + +To expose port 80 to be accessed publicly: +``` +myapp: ac.AppConfiguration { + workload: wl.Service { + # ... + } + accessories: { + "network": n.Network { + ports: [ + n.Port { + port: 80 + public: True + } + ] + } + } +} +``` + +:::info +The CSP (Cloud Service Provider) used to provide load balancers is defined by platform engineers in workspace. +::: + +## Mapping ports + +To expose a port `80` that maps to a different port `8088` on the container: +``` +myapp: ac.AppConfiguration { + workload: wl.Service { + # ... + } + accessories: { + "network": n.Network { + ports: [ + n.Port { + port: 80 + targetPort: 8088 + } + ] + } + } +} +``` + +## Exposing multiple ports + +You can also expose multiple ports and configure them separately. + +To expose port 80 to be accessed publicly, and port 9099 for private access (to be scraped by Prometheus, for example): +``` +myapp: ac.AppConfiguration { + workload: wl.Service { + # ... + } + accessories: { + "network": n.Network { + ports: [ + n.Port { + port: 80 + public: True + } + n.Port { + port: 9099 + } + ] + } + } +} +``` + +## Choosing protocol + +To expose a port using the `UDP` protocol: +``` +myapp: ac.AppConfiguration { + workload: wl.Service { + # ... + } + accessories: { + "network": n.Network { + ports: [ + n.Port { + port: 80 + targetPort: 8088 + protocol: "UDP" + } + ] + } + } +} +``` \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.11/4-configuration-walkthrough/6-database.md b/docs_versioned_docs/version-v0.11/4-configuration-walkthrough/6-database.md new file mode 100644 index 00000000..0bdbc36c --- /dev/null +++ b/docs_versioned_docs/version-v0.11/4-configuration-walkthrough/6-database.md @@ -0,0 +1,426 @@ +--- +id: databse +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Managed Databases + +You could also specify a database needed for the application. That can be achieved via a `mysql` or a `postgres` module (or bring-your-own-module) in the `accessories` field in `AppConfiguration` to achieve that. + +You can currently have several databases with **different database names** for an application at the same time. + +## Import + +In the examples below, we are using schemas defined in the `kam` package and the `mysql` Kusion Module. For more details on KCL package and module import, please refer to the [Configuration File Overview](./1-overview.md#configuration-file-overview). + +The `import` statements needed for the following walkthrough: +``` +import kam.v1.app_configuration as ac +import kam.v1.workload as wl +import mysql.mysql +import postgres.postgres +``` + +The `kcl.mod` must contain reference to the `mysql` module or `postgres` module: +``` +#... + +[dependencies] +mysql = { oci = "oci://ghcr.io/kusionstack/mysql", tag = "0.1.0" } +postgres = { oci = "oci://ghcr.io/kusionstack/postgres", tag = "0.1.0" } +#... +``` + +## Types of Database offerings + +As of version 0.11.0, Kusion supports the following database offerings on the cloud: +- MySQL and PostgreSQL Relational Database Service (RDS) on [AWS](https://aws.amazon.com/rds/) +- MySQL and PostgreSQL Relational Database Service (RDS) on [AliCloud](https://www.alibabacloud.com/product/databases) + +More database types on more cloud vendors will be added in the future. + +Alternatively, Kusion also supports creating a database at `localhost` for local testing needs. A local database is quicker to stand up and easier to manage. It also eliminates the need for an account and any relevant costs with the cloud providers in the case that a local testing environment is sufficient. + +:::info +You do need a local Kubernetes cluster to run the local database workloads. You can refer to [Minikube](https://minikube.sigs.k8s.io/docs/start/) or [Kind](https://kind.sigs.k8s.io/docs/user/quick-start/) to get started. +To see an end-to-end use case for standing up a local testing environment including a local database, please refer to the [Kusion Quickstart](../2-getting-started/2-deliver-quickstart.md). +::: + +## Cloud Credentials and Permissions + +Kusion provisions databases on the cloud via [terraform](https://www.terraform.io/) providers. For it to create _any_ cloud resources, it requires a set of credentials that belongs to an account that has the appropriate write access so the terraform provider can be initialized properly. + +For AWS, the environment variables needed: +``` +export AWS_ACCESS_KEY_ID="xxxxxxxxxxx" # replace it with your AccessKey +export AWS_SECRET_ACCESS_KEY="xxxxxxx" # replace it with your SecretKey +export AWS_REGION=us-east-1 # replace it with your region +``` + +For AliCloud, the environment variables needed: +``` +export ALICLOUD_ACCESS_KEY="xxxxxxxxx" # replace it with your AccessKey +export ALICLOUD_SECRET_KEY="xxxxxxxxx" # replace it with your SecretKey +export ALICLOUD_REGION=cn-hangzhou # replace it with your region +``` + +The user account that owns these credentials would need to have the proper permission policies attached to create databases and security groups. If you are using the cloud-managed policies, the policies needed to provision a database and configure firewall rules are listed below. + +For AWS: +- `AmazonVPCFullAccess` for creating and managing database firewall rules via security group +- `AmazonRDSFullAccess` for creating and managing RDS instances + +For AliCloud: +- `AliyunVPCFullAccess` for creating and managing database firewall rules via security group +- `AliyunRDSFullAccess` for creating and managing RDS instances + +Alternatively, you can use customer managed policies if the cloud provider built-in policies don't meet your needs. The list of permissions needed are in the [AmazonRDSFullAccess Policy Document](https://docs.aws.amazon.com/aws-managed-policy/latest/reference/AmazonRDSFullAccess.html#AmazonRDSFullAccess-json) and [AmazonVPCFullAccess Policy Document](https://docs.aws.amazon.com/aws-managed-policy/latest/reference/AmazonVPCFullAccess.html). It will most likely be a subset of the permissions in the policy documents. + +## Configure Database + +### Provision a Cloud Database + +Assuming the steps in the [Cloud Credentials and Permissions](#cloud-credentials-and-permissions) section is setup properly, you can now provision cloud databases via Kusion. + +#### AWS RDS Instance +To provision an AWS RDS instance with MySQL v8.0 or PostgreSQL v14.0, you can append the following YAML file to your own workspace configurations and update the corresponding workspace with command `kusion workspace update`. + + + + +```yaml +# MySQL configurations for AWS RDS +modules: + kusionstack/mysql@0.1.0: + default: + cloud: aws + size: 20 + instanceType: db.t3.micro + securityIPs: + - 0.0.0.0/0 + suffix: "-mysql" +``` + +```mdx-code-block + + +``` +```yaml +# PostgreSQL configurations for AWS RDS +modules: + kusionstack/postgres@0.1.0: + default: + cloud: aws + size: 20 + instanceType: db.t3.micro + securityIPs: + - 0.0.0.0/0 + suffix: "-postgres" +``` + +```mdx-code-block + + +``` + +For KCL configuration file declarations: + + + + +```python +wordpress: ac.AppConfiguration { + # ... + accessories: { + "mysql": mysql.MySQL { + type: "cloud" + version: "8.0" + } + } +} +``` + +```mdx-code-block + + +``` + +```python +pgadmin: ac.AppConfiguration { + # ... + accessories: { + "postgres": postgres.PostgreSQL { + type: "cloud" + version: "14.0" + } + } +} +``` + +```mdx-code-block + + +``` + +It's highly recommended to replace `0.0.0.0/0` and closely manage the whitelist of IPs that can access the database for security purposes. The `0.0.0.0/0` in the example above or if `securityIPs` is omitted altogether will allow connections from anywhere which would typically be a security bad practice. + +The `instanceType` field determines the computation and memory capacity of the RDS instance. The `db.t3.micro` instance type in the example above represents the `db.t3` instance class with a size of `micro`. In the same `db.t3` instance family there are also `db.t3.small`, `db.t3.medium`, `db.t3.2xlarge`, etc. + +The full list of supported `instanceType` values can be found [here](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html#Concepts.DBInstanceClass.Support). + +You can also adjust the storage capacity for the database instance by changing the `size` field which is storage size measured in gigabytes. The minimum is 20. More details can be found [here](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Storage.html#Concepts.Storage.GeneralSSD). + +#### AliCloud RDS Instance + +To provision an Alicloud RDS instance with MySQL or PostgreSQL, you can append the following YAML file to your own workspace configurations and update the corresponding workspace with command `kusion workspace update`. Note that AliCloud RDS has several additional fields such as `category`, `subnetID` and `privateRouting`: + + + + +```yaml +# MySQL configurations for Alicloud RDS +modules: + kusionstack/mysql@0.1.0: + default: + cloud: alicloud + size: 20 + instanceType: mysql.n2.serverless.1c + category: serverless_basic + privateRouting: false + subnetID: [your-subnet-id] + securityIPs: + - 0.0.0.0/0 + suffix: "-mysql" +``` + +```mdx-code-block + + +``` +```yaml +# PostgreSQL configurations for Alicloud RDS +modules: + kusionstack/postgres@0.1.0: + default: + cloud: alicloud + size: 20 + instanceType: pg.n2.serverless.1c + category: serverless_basic + privateRouting: false + subnetID: [your-subnet-id] + securityIPs: + - 0.0.0.0/0 + suffix: "-postgres" +``` + +```mdx-code-block + + +``` + +For KCL configuration file declarations: + + + + +```python +wordpress: ac.AppConfiguration { + # ... + accessories: { + "mysql": mysql.MySQL { + type: "cloud" + version: "8.0" + } + } +} +``` + +```mdx-code-block + + +``` + +```python +pgadmin: ac.AppConfiguration { + # ... + accessories: { + "postgres": postgres.PostgreSQL { + type: "cloud" + version: "14.0" + } + } +} +``` + +```mdx-code-block + + +``` + +We will walkthrough `subnetID` and `privateRouting` in the [Configure Network Access](#configure-network-access) section. + +The full list of supported `instanceType` values can be found in: +- [MySQL instance types(x86)](https://www.alibabacloud.com/help/en/rds/apsaradb-rds-for-mysql/primary-apsaradb-rds-for-mysql-instance-types#concept-2096487) +- [PostgreSQL instance types](https://www.alibabacloud.com/help/en/rds/apsaradb-rds-for-postgresql/primary-apsaradb-rds-for-postgresql-instance-types#concept-2096578) + +### Local Database + +To deploy a local database with MySQL v8.0 or PostgreSQL v14.0: + + + + +```python +wordpress: ac.AppConfiguration { + # ... + accessories: { + "mysql": mysql.MySQL { + type: "local" + version: "8.0" + } + } +} +``` + +```mdx-code-block + + +``` + +```python +pgadmin: ac.AppConfiguration { + # ... + accessories: { + "postgres": postgres.PostgreSQL { + type: "local" + version: "14.0" + } + } +} +``` + +```mdx-code-block + + +``` + +## Database Credentials + +There is no need to manage the database credentials manually. Kusion will automatically generate a random password, set it as the credential when creating the database, and then inject the hostname, username and password into the application runtime. + +You have the option to BYO (Bring Your Own) username for the database credential by specifying the `username` attribute in the `workspace.yaml`: +```yaml +modules: + kusionstack/mysql@0.1.0: + default: + # ... + username: "my_username" +``` + +You **cannot** bring your own password. The password will always be managed by Kusion automatically. + +The database credentials are injected into the environment variables of the application container. You can access them via the following env vars: +``` +# env | grep KUSION_DB +KUSION_DB_HOST_WORDPRESS_MYSQL=wordpress.xxxxxxxx.us-east-1.rds.amazonaws.com +KUSION_DB_USERNAME_WORDPRESS_MYSQL=xxxxxxxxx +KUSION_DB_PASSWORD_WORDPRESS_MYSQL=xxxxxxxxx +``` + +:::info +More details about the environment of database credentials injected by Kusion can be found at [mysql credentials and connectivity](../6-reference/2-modules/1-developer-schemas/database/mysql.md#credentials-and-connectivity) and [postgres credentials and connectivity](../6-reference/2-modules/1-developer-schemas/database/postgres.md#credentials-and-connectivity) +::: + +You can use these environment variables out of the box. Or most likely, your application might retrieve the connection details from a different set of environment variables. In that case, you can map the kusion environment variables to the ones expected by your application using the `$()` expression. + +This example below will assign the value of `KUSION_DB_HOST_WORDPRESS_MYSQL` into `WORDPRESS_DB_HOST`, `KUSION_DB_USERNAME_WORDPRESS_MYSQL` into `WORDPRESS_DB_USER`, likewise for `KUSION_DB_PASSWORD_WORDPRESS_MYSQL` and `WORDPRESS_DB_PASSWORD`: +``` +wordpress: ac.AppConfiguration { + workload: wl.Service { + containers: { + wordpress: c.Container { + image = "wordpress:6.3-apache" + env: { + "WORDPRESS_DB_HOST": "$(KUSION_DB_HOST_WORDPRESS_MYSQL)" + "WORDPRESS_DB_USER": "$(KUSION_DB_USERNAME_WORDPRESS_MYSQL)" + "WORDPRESS_DB_PASSWORD": "$(KUSION_DB_PASSWORD_WORDPRESS_MYSQL)" + } + # ... + } + } + # ... + } + accessories: { + # ... + } +} +``` + +## Configure Network Access + +You can also optionally configure the network access to the database as part of the `AppConfiguration`. This is highly recommended because it dramatically increases the security posture of your cloud environment in the means of least privilege principle. + +The `securityIPs` field in the `Database` schema declares the list of network addresses that are allowed to access the database. The network addresses are in the [CIDR notation](https://aws.amazon.com/what-is/cidr/) and can be either a private IP range ([RFC-1918](https://datatracker.ietf.org/doc/html/rfc1918) and [RFC-6598](https://datatracker.ietf.org/doc/html/rfc6598) address) or a public one. + +If the database need to be accessed from a public location (which should most likely not be the case in a production environment), `securityIPs` need to include the public IP address of the traffic source (For instance, if the RDS database needs to be accessed from your computer). + +To configure AWS RDS to restrict network access from a VPC with a CIDR of `10.0.1.0/24` and a public IP of `103.192.227.125`: + +```yaml +modules: + kusionstack/mysql@0.1.0: + default: + cloud: aws + # ... + securityIPs: + - "10.0.1.0/24" + - "103.192.227.125/32" +``` + +Depending on the cloud provider, the default behavior of the database firewall settings may differ if omitted. + +### Subnet ID + +On AWS, you have the option to launch the RDS instance inside a specific VPC if a `subnetID` is present in the application configuration. By default, if `subnetID` is not provided, the RDS will be created in the default VPC for that account. However, the recommendation is to self-manage your VPCs to provider better isolation from a network security perspective. + +On AliCloud, the `subnetID` is required. The concept of subnet maps to VSwitch in AliCloud. + +To place the RDS instance into a specific VPC on Alicloud: + +```yaml +modules: + kusionstack/mysql@0.1.0: + default: + cloud: alicloud + # ... + subnetID: "subnet-xxxxxxxxxxxxxxxx" +``` + +### Private Routing + +There is an option to enforce private routing on certain cloud providers if both the workload and the database are running on the cloud. + +On AliCloud, you can set the `privateRouting` flag to `True`. The database host generated will be a private FQDN that is only resolvable and accessible from within the AliCloud VPCs. Setting `privateRouting` flag to `True` when `type` is `aws` is a no-op. + +To enforce private routing on AliCloud: + +```yaml +modules: + kusionstack/mysql@0.1.0: + default: + cloud: alicloud + # ... + privateRouting: true +``` + +Kusion will then generate a private FQDN and inject it into the application runtime as the environment variable `KUSION_DB_HOST_` for the application to use. A complete list of Kusion-managed environment variables for mysql database can be found [here](../6-reference/2-modules/1-developer-schemas/database/mysql.md#credentials-and-connectivity). + +Otherwise when using the public FQDN to connect to a database from the workload, the route will depend on cloud provider's routing preference. The options are generally either: +- Travel as far as possible on the cloud provider's global backbone network, or also referred to as cold potato routing, or +- Egress as early as possible to the public Internet and re-enter the cloud provider's datacenter later, or also referred to as hot potato routing + +The prior generally has better performance but is also more expensive. + +You can find a good read on the [AWS Blog](https://aws.amazon.com/blogs/architecture/internet-routing-and-traffic-engineering/) or the [Microsoft Learn](https://learn.microsoft.com/en-us/azure/virtual-network/ip-services/routing-preference-overview). \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.11/4-configuration-walkthrough/7-secret.md b/docs_versioned_docs/version-v0.11/4-configuration-walkthrough/7-secret.md new file mode 100644 index 00000000..d48879a3 --- /dev/null +++ b/docs_versioned_docs/version-v0.11/4-configuration-walkthrough/7-secret.md @@ -0,0 +1,245 @@ +--- +id: secret +--- + +# Secrets + +Secrets are used to store sensitive data like passwords, API keys, TLS certificates, tokens, or other credentials. Kusion provides multiple secret types, and makes it easy to be consumed in containers. + +For application dependent cloud resources that are managed by Kusion, their credentials are automatically managed by Kusion (generated and injected into application runtime environment variable). You shouldn't have to manually create those. + +## Using secrets in workload + +Secrets must be defined in AppConfiguration. The values can be generated by Kusion or reference existing secrets stored in third-party vault. Secrets can be consumed in containers by referencing them through the `secret:///` URI syntax. + +### Consume secret in an environment variable + +You can consume the data in Secrets as environment variable in your container. For example the db container uses an environment variable to set the root password. + +``` +import kam.v1.app_configuration as ac +import kam.v1.workload as wl +import kam.v1.workload.container as c +import kam.v1.workload.secret as sec + +sampledb: ac.AppConfiguration { + workload: wl.Service { + containers: { + "db": c.Container { + image: "mysql" + env: { + # Consume db-root-password secret in environment + "ROOT_PASSWORD": "secret://db-root-password/token" + } + } + } + # Secrets used to generate token + secrets: { + "init-info": sec.Secret { + type: "token" + } + } + } +} +``` + +The example shows the secret `root-password` being consumed as an environment variable in the db container. The secret is of type token and will automatically be generated at runtime by Kusion. + +### Consume all secret keys as environment variables + +Sometimes your secret contains multiple data that need to be consumed as environment variables. The example below shows how to consume all the values in a secret as environment variables named after the keys. + +``` +import kam.v1.app_configuration as ac +import kam.v1.workload as wl +import kam.v1.workload.container as c +import kam.v1.workload.secret as sec + +sampledb: ac.AppConfiguration { + workload: wl.Service { + containers: { + "db": c.Container { + image: "mysql" + env: { + # Consume all init-info secret keys as environment variables + "secret://init-info": "" + } + } + } + # Secrets used to init mysql instance + secrets: { + "init-info": sec.Secret { + type: "opaque" + data: { + "ROOT_PASSWORD": "admin" + } + } + } + } +} +``` + +This will set the environment variable "ROOT_PASSWORD" to the value "admin" in the db container. + +## Types of secrets + +Kusion provides multiple types of secrets to application developers. + +1. Basic: Used to generate and/or store usernames and passwords. +2. Token: Used to generate and/or store secret strings for password. +3. Opaque: A generic secret that can store arbitrary user-defined data. +4. Certificate: Used to store a certificate and its associated key that are typically used for TLS. +5. External: Used to retrieve secret form third-party vault. + +### Basic secrets + +Basic secrets are defined in the secrets block with the type "basic". + +``` +import kam.v1.app_configuration as ac +import kam.v1.workload as wl +import kam.v1.workload.secret as sec + +sampleapp: ac.AppConfiguration { + workload: wl.Service { + # ... + secrets: { + "auth-info": sec.Secret { + type: "basic" + data: { + "username": "admin" + "password": "******" + } + } + } + } +} +``` + +The basic secret type is typically used for basic authentication. The key names must be username and password. If one or both of the fields are defined with a non-empty string, those values will be used. If the empty string, the default value, is used Acorn will generate random values for one or both. + +### Token secrets + +Token secrets are useful for generating a password or secure string used for passwords when the user is already known or not required. + +``` +import kam.v1.app_configuration as ac +import kam.v1.workload as wl +import kam.v1.workload.secret as sec + +sampleapp: ac.AppConfiguration { + workload: wl.Service { + # ... + secrets: { + "api-token": sec.Secret { + type: "token" + data: { + "token": "" + } + } + } + } +} +``` + +The token secret type must be defined. The `token` field in the data object is optional and if left empty Kusion will generate the token, which is 54 characters in length by default. If the `token` is defined that value will always be used. + +### Opaque secrets + +Opaque secrets have no defined structure and can have arbitrary key value pairs. + +``` +import kam.v1.app_configuration as ac +import kam.v1.workload as wl +import kam.v1.workload.secret as sec + +sampleapp: ac.AppConfiguration { + workload: wl.Service { + # ... + secrets: { + "my-secret": sec.Secret { + type: "opaque" + } + } + } +} +``` + +### Certificate secrets + +Certificate secrets are useful for storing a certificate and its associated key. One common use for TLS Secrets is to configure encryption in transit for an Ingress, but you can also use it with other resources or directly in your workload. + +``` +import kam.v1.app_configuration as ac +import kam.v1.workload as wl +import kam.v1.workload.secret as sec + +sampleapp: ac.AppConfiguration { + workload: wl.Service { + # ... + secrets: { + "server-cert": sec.Secret { + type: "certificate" + data: { + # Please do not put private keys in configuration files + "tls.crt": "The cert file content" + "tls.key": "The key file content" + } + } + } + } +} +``` + +### External secrets + +As a general principle, storing secrets in a plain text configuration file is highly discouraged, keeping secrets outside of Git is especially important for future-proofing, even encrypted secrets are not recommended to check into Git. The most common approach is to store secrets in a third-party vault (such as Hashicorp Vault, AWS Secrets Manager and Azure Key Vault, etc) and retrieve the secret in the runtime only. External secrets are used to retrieve sensitive data from external secret store to make it easy to be consumed in containers. + +``` +import kam.v1.app_configuration as ac +import kam.v1.workload as wl +import kam.v1.workload.secret as sec + +sampleapp: ac.AppConfiguration { + workload: wl.Service { + # ... + secrets: { + "api-access-token": sec.Secret { + type: "external" + data: { + # Please do not put private keys in configuration files + "accessToken": "ref://api-auth-info/accessToken?version=1" + } + } + } + } +} +``` + +The value field in data object follow `ref://PATH[?version=]` URI syntax. `PATH` is the provider-specific path for the secret to be retried. Kusion provides out-of-the-box integration with `Hashicorp Vault`, `AWS Secrets Manager`, `Azure Key Vault` and `Alicloud Secrets Manager`. + +## Immutable secrets + +You can also declare a secret as immutable to prevent it from being changed accidentally. + +To declare a secret as immutable: + +``` +import kam.v1.app_configuration as ac +import kam.v1.workload as wl +import kam.v1.workload.secret as sec + +sampleapp: ac.AppConfiguration { + workload: wl.Service { + # ... + secrets: { + "my-secret": sec.Secret { + # ... + immutable: True + } + } + } +} +``` + +You can change a secret from mutable to immutable but not the other way around. That is because the Kubelet will stop watching secrets that are immutable. As the name suggests, you can only delete and re-create immutable secrets but you cannot change them. \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.11/4-configuration-walkthrough/8-monitoring.md b/docs_versioned_docs/version-v0.11/4-configuration-walkthrough/8-monitoring.md new file mode 100644 index 00000000..4b13c058 --- /dev/null +++ b/docs_versioned_docs/version-v0.11/4-configuration-walkthrough/8-monitoring.md @@ -0,0 +1,102 @@ +# Application Monitoring + +You could also specify the collection of monitoring requirements for the application. That can be achieved via a `monitoring` module (or bring-your-own-module) in the `accessories` field in `AppConfiguration` to achieve that. + +As of version 0.11.0, Kusion supports integration with Prometheus by managing scraping behaviors in the configuration file. + +:::info + +For the monitoring configuration to work (more specifically, consumed by Prometheus), this requires the target cluster to have installed Prometheus correctly, either as a Kubernetes operator or a server/agent. + +More about how to set up Prometheus can be found in the [Prometheus User Guide for Kusion](../user-guides/observability/prometheus) +::: + +## Import + +In the examples below, we are using schemas defined in the `kam` package and the `monitoring` Kusion Module. For more details on KCL package and module import, please refer to the [Configuration File Overview](overview). + +The `import` statements needed for the following walkthrough: +``` +import kam.v1.app_configuration as ac +import kam.v1.workload as wl +import monitoring as m +``` + +## Workspace configurations + +In addition to the KCL configuration file, there are also workspace-level configurations that should be set first. In an ideal scenario, this step is done by the platform engineers. + +In the event that they do not exist for you or your organization, e.g. if you are an individual developer, you can either do it yourself or use the [default values](#default-values) provided by the KusionStack team. The steps to do this yourself can be found in the [Prometheus User Guide for Kusion](../user-guides/observability/prometheus#setting-up-workspace-configs). + +:::info + +For more details on how workspaces work, please refer to the [workspace concept](../3-concepts/4-workspace.md) +::: + +By separating configurations that the developers are interested in and those that platform owners are interested in, we can reduce the cognitive complexity of the application configuration and achieve separation of concern. + +You can append the following YAML file to your own workspace configurations and update the corresponding workspace with command `kusion workspace update`. + +```yaml +modules: + kusionstack/monitoring@v0.1.0: + default: + interval: 30s + monitorType: Pod + operatorMode: true + scheme: http + timeout: 15s +``` + +## Managing Scraping Configuration +To manage scrape configuration for the application: +``` +myapp: ac.AppConfiguration { + workload: wl.Service { + # ... + } + # Add the monitoring configuration backed by Prometheus + accessories: { + "monitoring": m.Prometheus { + path: "/metrics" + port: "web" + } + } +} +``` + +The example above will instruct the Prometheus job to scrape metrics from the `/metrics` endpoint of the application on the port named `web`. + +To instruct Prometheus to scrape from `/actuator/metrics` on port `9099` instead: +``` +myapp: ac.AppConfiguration { + workload: wl.Service { + # ... + } + # Add the monitoring configuration backed by Prometheus + accessories: { + "monitoring": m.Prometheus { + path: "/actuator/metrics" + port: "9099" + } + } +} +``` + +Note that numbered ports only work when your Prometheus is not running as an operator. + +Neither `path` and `port` are required fields if Prometheus runs as an operator. If omitted, `path` defaults to `/metrics`, and `port` defaults to the container port or service port, depending on which resource is being monitored. If Prometheus does not run as an operator, both fields are required. + +Scraping scheme, interval and timeout are considered platform-managed configurations and are therefore managed as part of the [workspace configurations](../user-guides/observability/prometheus#setting-up-workspace-configs). + +More details about how the Prometheus integration works can be found in the [design documentation](https://github.com/KusionStack/kusion/blob/main/docs/prometheus.md). + +## Default values + +If no workspace configurations are found, the default values provided by the KusionStack team are: +- Scraping interval defaults to 30 seconds +- Scraping timeout defaults to 15 seconds +- Scraping scheme defaults to http +- Defaults to NOT running as an operator + +If any of the default values does not meet your need, you can change them by [setting up the workspace configuration](../user-guides/observability/prometheus#setting-up-workspace-configs). \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.11/4-configuration-walkthrough/9-operational-rules.md b/docs_versioned_docs/version-v0.11/4-configuration-walkthrough/9-operational-rules.md new file mode 100644 index 00000000..b64056b3 --- /dev/null +++ b/docs_versioned_docs/version-v0.11/4-configuration-walkthrough/9-operational-rules.md @@ -0,0 +1,54 @@ +--- +id: operational-rules +--- + +# Operational Rules + +You could also specify the collection of operational rule requirements for the application. That can be achieved via a `opsrule` module (or bring-your-own-module) in the `accessories` field in `AppConfiguration` to achieve that. Operational rules are used as a preemptive measure to police and stop any unwanted changes. + +## Import + +In the examples below, we are using schemas defined in the `kam` package and the `opsrule` Kusion Module. For more details on KCL package and module import, please refer to the [Configuration File Overview](overview). + +The `import` statements needed for the following walkthrough: +``` +import kam.v1.app_configuration as ac +import kam.v1.workload as wl +import opsrule as o +``` + +## Max Unavailable Replicas + +Currently, the `opsrule` module supports setting a `maxUnavailable` parameter, which specifies the maximum number of pods that can be rendered unavailable at any time. It can be either a fraction of the total pods for the current application or a fixed number. This operational rule is particularly helpful against unexpected changes or deletes to the workloads. It can also prevent too many workloads from going down during an application upgrade. + +More rules will be available in future versions of Kusion. + +To set `maxUnavailable` to a percentage of pods: +``` +myapp: ac.AppConfiguration { + workload: wl.Service { + containers: { + # ... + } + } + accessories: { + "opsRule": o.OpsRule { + maxUnavailable: "30%" + } + } +} +``` + +To set `maxUnavailable` to a fixed number of pods: +``` +myapp: ac.AppConfiguration { + workload: wl.Service { + # ... + } + accessories: { + "opsRule": o.OpsRule { + maxUnavailable: 2 + } + } +} +``` \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.11/4-configuration-walkthrough/_category_.json b/docs_versioned_docs/version-v0.11/4-configuration-walkthrough/_category_.json new file mode 100644 index 00000000..64d45678 --- /dev/null +++ b/docs_versioned_docs/version-v0.11/4-configuration-walkthrough/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Configuration Walkthrough" +} diff --git a/docs_versioned_docs/version-v0.11/5-user-guides/1-cloud-resources/1-database.md b/docs_versioned_docs/version-v0.11/5-user-guides/1-cloud-resources/1-database.md new file mode 100644 index 00000000..3e3c3249 --- /dev/null +++ b/docs_versioned_docs/version-v0.11/5-user-guides/1-cloud-resources/1-database.md @@ -0,0 +1,298 @@ +--- +id: database +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Deliver the WordPress Application with Cloud RDS + +This tutorial will demonstrate how to deploy a WordPress application with Kusion, which relies on both Kubernetes and IaaS resources provided by cloud vendors. We can learn how to declare the Relational Database Service (RDS) to provide a cloud-based database solution with Kusion for our application from this article. + +## Prerequisites + +- Install [Kusion](../../2-getting-started/1-install-kusion.md). +- Install [kubectl CLI](https://kubernetes.io/docs/tasks/tools/#kubectl) and run a [Kubernetes](https://kubernetes.io/) or [k3s](https://docs.k3s.io/quick-start) or [k3d](https://k3d.io/v5.4.4/#installation) or [MiniKube](https://minikube.sigs.k8s.io/docs/tutorials/multi_node) cluster. +- Prepare a cloud service account and create a user with at least **VPCFullAccess** and **RDSFullAccess** related permissions to use the Relational Database Service (RDS). This kind of user can be created and managed in the Identity and Access Management (IAM) console of the cloud vendor. +- The environment that executes `kusion` needs to have connectivity to terraform registry to download the terraform providers. + +Additionally, we also need to configure the obtained AccessKey and SecretKey as well as the cloud resource region as environment variables for specific cloud provider: + + + + +```bash +export AWS_ACCESS_KEY_ID="AKIAQZDxxxx" # replace it with your AccessKey +export AWS_SECRET_ACCESS_KEY="oE/xxxx" # replace it with your SecretKey +export AWS_REGION=us-east-1 # replace it with your region +``` + +![aws iam account](/img/docs/user_docs/getting-started/aws-iam-account.png) + +```mdx-code-block + + +``` + +```bash +export ALICLOUD_ACCESS_KEY="LTAI5txxx" # replace it with your AccessKey +export ALICLOUD_SECRET_KEY="nxuowIxxx" # replace it with your SecretKey +export ALICLOUD_REGION=cn-hangzhou # replace it with your region +``` + +![alicloud iam account](/img/docs/user_docs/getting-started/set-rds-access.png) + +```mdx-code-block + + +``` + +## Init Workspace + +To deploy the WordPress application with cloud rds, we first need to initiate a `Workspace` for the targeted stack (here we are using `dev`). Please copy the following example YAML file to your local `workspace.yaml`. + + + + +`workspace.yaml` +```yaml +# MySQL configurations for AWS RDS +modules: + kusionstack/mysql@0.1.0: + default: + cloud: aws + size: 20 + instanceType: db.t3.micro + privateRouting: false + databaseName: "wordpress-mysql" +``` + +```mdx-code-block + + +``` + +`workspace.yaml` +```yaml +# MySQL configurations for Alicloud RDS +modules: + kusionstack/mysql@0.1.0: + default: + cloud: alicloud + size: 20 + instanceType: mysql.n2.serverless.1c + category: serverless_basic + privateRouting: false + subnetID: [your-subnet-id] + databaseName: "wordpress-mysql" +``` + +```mdx-code-block + + +``` + +If you would like to try creating the `Alicloud` RDS instance, you should replace the `[your-subnet-id]` of `modules.kusionstack/mysql@0.1.0.default.subnetID` field with the Alicloud `vSwitchID` to which the database will be provisioned in. After that, you can execute the following command line to initiate the configuration for `dev` workspace. + +```shell +kusion workspace create dev -f workspace.yaml +``` + +Since Kusion by default use the `default` workspace, we can switch to the `dev` workspace with the following cmd: + +```shell +kusion workspace switch dev +``` + +If you have already created and used the configuration of `dev` workspace, you can append the MySQL module configs to your workspace YAML file and use the following command line to update the workspace configuration. + +```shell +kusion workspace update dev -f workspace.yaml +``` + +We can use the following command lines to show the current workspace configurations for `dev` workspace. + +```shell +kusion workspace show +``` + +The `workspace.yaml` is a sample configuration file for workspace management, including `MySQL` module configs. Workspace configurations are usually declared by **Platform Engineers** and will take effect through the corresponding stack. + +:::info +More details about the configuration of Workspace can be found in [Concepts of Workspace](../../3-concepts/4-workspace.md). +::: + +## Create Project And Stack + +We can create a new project named `wordpress-rds-cloud` with the `kusion project create` command. + +```shell +# Create a new directory and navigate into it. +mkdir wordpress-rds-cloud && cd wordpress-rds-cloud + +# Create a new project with the name of the current directory. +kusion project create +``` + +After creating the new project, we can create a new stack named `dev` with the `kusion stack create` command. + +```shell +# Create a new stack with the specified name under current project directory. +kusion stack create dev +``` + +The created project and stack structure looks like below: + +```shell +tree +. +├── dev +│   ├── kcl.mod +│   ├── main.k +│   └── stack.yaml +└── project.yaml + +2 directories, 4 files +``` + +### Update And Review Configuration Codes + +The configuration codes in the created stack are basically empty, thus we should replace the `dev/kcl.mod` and `dev/main.k` with the below codes: + +```shell +# dev/kcl.mod +[dependencies] +kam = { git = "https://github.com/KusionStack/kam.git", tag = "0.1.0" } +network = { oci = "oci://ghcr.io/kusionstack/network", tag = "0.1.0" } +mysql = { oci = "oci://ghcr.io/kusionstack/mysql", tag = "0.1.0" } +``` + +```python +# dev/main.k +import kam.v1.app_configuration as ac +import kam.v1.workload as wl +import kam.v1.workload.container as c +import network as n +import mysql + +# main.k declares customized configurations for dev stacks. +wordpress: ac.AppConfiguration { + workload: wl.Service { + containers: { + wordpress: c.Container { + image: "wordpress:6.3" + env: { + "WORDPRESS_DB_HOST": "$(KUSION_DB_HOST_WORDPRESS_MYSQL)" + "WORDPRESS_DB_USER": "$(KUSION_DB_USERNAME_WORDPRESS_MYSQL)" + "WORDPRESS_DB_PASSWORD": "$(KUSION_DB_PASSWORD_WORDPRESS_MYSQL)" + "WORDPRESS_DB_NAME": "mysql" + } + resources: { + "cpu": "500m" + "memory": "512Mi" + } + } + } + replicas: 1 + } + accessories: { + "network": n.Network { + ports: [ + n.Port { + port: 80 + } + ] + } + "mysql": mysql.MySQL { + type: "cloud" + version: "8.0" + } + } +} +``` + +## Application Delivery + +You can complete the delivery of the WordPress application in the folder of `wordpress-cloud-rds/dev` using the following command line. Kusion will enable the watching of the application resource creation and automatic port-forwarding of the specified port (80) from local to the Kubernetes Service. + +```shell +cd dev && kusion apply --watch +``` + +:::info +During the first apply, the models and modules as well as the Terraform CLI (if not exists) that the application depends on will be downloaded, so it may take some time (usually within two minutes). You can take a break and have a cup of coffee. +::: + + + + +![apply the wordpress application with aws rds](/img/docs/user_docs/getting-started/apply-wordpress-cloud-rds-aws.png) + +```mdx-code-block + + +``` + +![apply the wordpress application with alicloud rds](/img/docs/user_docs/getting-started/apply-wordpress-cloud-rds-alicloud.png) + +```mdx-code-block + + +``` + +After all the resources reconciled, we can port-forward our local port (e.g. 12345) to the WordPress frontend service port (80) in the cluster: + +```shell +kubectl port-forward -n wordpress-cloud-rds svc/wordpress-cloud-rds-dev-wordpress-private 12345:80 +``` + +![kubectl port-forward for wordpress](/img/docs/user_docs/getting-started/wordpress-cloud-rds-port-forward.png) + +## Verify WordPress Application + +Next, we will verify the WordPress site service we just delivered, along with the creation of the RDS instance it depends on. We can start using the WordPress site by accessing the link of local-forwarded port [(http://localhost:12345)](http://localhost:12345) we just configured in the browser. + +![wordpress site page](/img/docs/user_docs/getting-started/wordpress-site-page.png) + +In addition, we can also log in to the cloud service console page to view the RDS instance we just created. + + + + +![aws rds instance](/img/docs/user_docs/getting-started/cloud-rds-instance-aws.png) + +```mdx-code-block + + +``` + +![alicloud rds instance](/img/docs/user_docs/getting-started/cloud-rds-instance-alicloud.png) + +```mdx-code-block + + +``` + +## Delete WordPress Application + +You can delete the WordPress application and related RDS resources using the following command line. + +```shell +kusion destroy --yes +``` + + + + +![kusion destroy wordpress with aws rds](/img/docs/user_docs/getting-started/destroy-wordpress-cloud-rds-aws.png) + +```mdx-code-block + + +``` + +![kusion destroy wordpress with alicloud rds](/img/docs/user_docs/getting-started/destroy-wordpress-cloud-rds-alicloud.png) + +```mdx-code-block + + diff --git a/docs_versioned_docs/version-v0.11/5-user-guides/1-cloud-resources/2-expose-service.md b/docs_versioned_docs/version-v0.11/5-user-guides/1-cloud-resources/2-expose-service.md new file mode 100644 index 00000000..bafb6b96 --- /dev/null +++ b/docs_versioned_docs/version-v0.11/5-user-guides/1-cloud-resources/2-expose-service.md @@ -0,0 +1,252 @@ +--- +id: expose-service +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Expose Application Service Deployed on CSP Kubernetes + +Deploying applications on the Kubernetes provided by CSP (Cloud Service Provider) is convenient and reliable, which is adopted by many enterprises. Kusion has a good integration with CSP Kubernetes service. You can deploy your application to the Kubernetes cluster, and expose the service in a quite easy way. + +This tutorial demonstrates how to expose service of the application deployed on CSP Kubernetes. And the responsibilities of platform engineers and application developers are also clearly defined. + +## Prerequisites + +Create a Kubernetes cluster provided by CSP, and complete the corresponding configurations for `KUBECONFIG`. The following CSP Kubernetes services are supported: + +- [Amazon Elastic Kubernetes Service (EKS)](https://aws.amazon.com/eks) +- [Alibaba Cloud Container Service for Kubernetes (ACK)](https://www.alibabacloud.com/product/kubernetes) + +## Expose Service Publicly + +If you want the application to be accessed from outside the cluster, you should expose the service publicly. Follow the steps below, you will simply hit the goal. + +### Set up Workspace + +Create the workspace as the target where the application will be deployed to. The workspace is usually set up by **Platform Engineers**, which contains platform-standard and application-agnostic configurations. The workspace configurations are organized through a YAML file. + + + + +```yaml +modules: + kusionstack/network@0.1.0: + default: + port: + type: aws +``` + +```mdx-code-block + + +``` + +```yaml +modules: + kusionstack/network@0.1.0: + default: + port: + type: alicloud + annotations: + service.beta.kubernetes.io/alibaba-cloud-loadbalancer-spec: slb.s1.small +``` + +```mdx-code-block + + +``` + +The YAML shown above gives an example of the workspace configuration to expose service on **EKS** and **ACK**. The block `port` contains the workspace configuration of Kusion module `network`, which has the following fields: + +- type: the CSP providing Kubernetes service, support `alicloud` and `aws` +- annotations: annotations attached to the service, should be a map +- labels: labels attached to the service, should be a map + +Then, create the workspace with the configuration file. The following command creates a workspace named `dev` with configuration file `workspace.yaml`. + +```bash +kusion workspace create dev -f workspace.yaml +``` + +After that, we can switch to the `dev` workspace with the following cmd: + +```shell +kusion workspace switch dev +``` + +If you already create and use the configuration of `dev` workspace, you can append the MySQL module configs to your workspace YAML file and use the following command line to update the workspace configuration. + +```shell +kusion workspace update dev -f workspace.yaml +``` + +We can use the following command lines to show the current workspace configurations for `dev` workspace. + +```shell +kusion workspace show +``` + + +### Init Project + +After creating workspace, you should write application configuration code, which only contains simple and application-centric configurations. This step is usually accomplished by application developers. + +We can start by initializing this tutorial project with `kusion init` cmd: + +```shell +# Create a new directory and navigate into it. +mkdir nginx && cd nginx + +# Initialize the demo project with the name of the current directory. +kusion init +``` + +The created project structure looks like below: + +```shell +tree +. +├── dev +│   ├── kcl.mod +│   ├── main.k +│   └── stack.yaml +└── project.yaml + +2 directories, 4 files +``` + +:::info +More details about the directory structure can be found in [Project](../../3-concepts/1-project/1-overview.md) and [Stack](../../3-concepts/2-stack/1-overview.md). +::: + +### Update And Review Configuration Codes + +The initiated configuration codes are for the demo quickstart application, we should replace the `dev/kcl.mod` and `dev/main.k` with the below codes: + +`dev/kcl.mod` +```shell +[package] +name = "nginx" +version = "0.1.0" + +[dependencies] +kam = { git = "https://github.com/KusionStack/kam.git", tag = "0.1.0" } +network = { oci = "oci://ghcr.io/kusionstack/network", tag = "0.1.0" } + +[profile] +entries = ["main.k"] +``` + +`dev/main.k` +```python +import kam.v1.app_configuration as ac +import kam.v1.workload as wl +import kam.v1.workload.container as c +import network as n + +# main.k declares customized configurations for dev stacks. +nginx: ac.AppConfiguration { + workload: wl.Service { + containers: { + nginx: c.Container { + image: "nginx:1.25.2" + resources: { + "cpu": "500m" + "memory": "512Mi" + } + } + } + replicas: 1 + } + accessories: { + "network": n.Network { + ports: [ + n.Port { + port: 80 + protocol: "TCP" + public: True + } + ] + } + } +} +``` + +The code shown above describes how to expose service publicly. Kusion use schema `Port` to describe the network configuration, the primary fields of Port are as follows: + +- port: port number to expose service +- protocol: protocol to expose service, support `TCP` and `UDP` +- public: whether to public the service + +To public the service, you should set `public` as True. Besides, schema `Service` should be used to describe the workload configuration. + +That's all what an application developer needs to configure! Next, preview and apply the configuration, the application will get deployed and exposed publicly. + +:::info +Kusion uses Load Balancer (LB) provided by the CSP to expose service publicly. For more detailed network configuration, please refer to [Application Networking](https://www.kusionstack.io/docs/kusion/configuration-walkthrough/networking) +::: + +:::info +During the first preview and apply, the models and modules as well as the Terraform CLI (if not exists) that the application depends on will be downloaded, so it may take some time (usually within two minutes). You can take a break and have a cup of coffee. +::: + +### Preview and Apply + +Execute `kusion preview` under the stack path, you will get what will be created in the real infrastructure. The picture below gives the preview result of the example. A Namespace, Service and Deployment will be created, which meets the expectation. The service name has a suffix `public`, which shows it can be accessed publicly. + +![preview-public](/img/docs/user_docs/cloud-resources/expose-service/preview-public.png) + +Then, execute `kusion apply --yes` to do the real deploying job. Just a command and a few minutes, you have accomplished deploying application and expose it publicly. + +![apply-public](/img/docs/user_docs/cloud-resources/expose-service/apply-public.png) + +### Verify Accessibility + +In the example above, the kubernetes Namespace whose name is nginx, and a `Service` and `Deployment` under the Namespace should be created. Use `kubectl get` to check, the Service whose type is `LoadBalancer` and Deployment are created indeed. And the Service has `EXTERNAL-IP` 106.5.190.109, which means it can be accessed from outside the cluster. + +![k8s-resource-public](/img/docs/user_docs/cloud-resources/expose-service/k8s-resource-public.png) + +Visit the `EXTERNAL-IP` via browser, the correct result is returned, which illustrates the servie getting publicly exposed successfully. + +![result-public](/img/docs/user_docs/cloud-resources/expose-service/result-public.png) + +## Expose Service Inside Cluster + +If you only need the application to be accessed inside the cluster, just configure `Public` as `False` in schema `Port`. There is no need to change the workspace, which means an application developer can easily change a service exposure range, without the involvement of platform engineers. + +```python +import kam.v1.app_configuration as ac +import kam.v1.workload as wl +import kam.v1.workload.container as c +import network as n + +# main.k declares customized configurations for dev stacks. +nginx: ac.AppConfiguration { + workload: wl.Service { + ... + } + accessories: { + "network": n.Network { + ports: [ + n.Port { + port: 80 + protocol: "TCP" + public: False + } + ] + } + } +} +``` + +Execute `kusion apply --yes`, the generated Service has suffix `private`. + +![apply-private](/img/docs/user_docs/cloud-resources/expose-service/apply-private.png) + +And the Service type is `ClusterIP`, only has `CLUSTER_IP` and no `EXTERNAL_IP`, which means it cannot get accessed from outside the cluster. + +![k8s-resource-private](/img/docs/user_docs/cloud-resources/expose-service/k8s-resource-private.png) + +## Summary +This tutorial demonstrates how to expose service of the application deployed on the CSP Kubernetes. By platform engineers' setup of workspace, and application developers' configuration of schema `Port` of Kusion module `network`, Kusion enables you expose service simply and efficiently. diff --git a/docs_versioned_docs/version-v0.11/5-user-guides/1-cloud-resources/_category_.json b/docs_versioned_docs/version-v0.11/5-user-guides/1-cloud-resources/_category_.json new file mode 100644 index 00000000..f6f2c380 --- /dev/null +++ b/docs_versioned_docs/version-v0.11/5-user-guides/1-cloud-resources/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Cloud Resources" +} diff --git a/docs_versioned_docs/version-v0.11/5-user-guides/2-working-with-k8s/1-deploy-application.md b/docs_versioned_docs/version-v0.11/5-user-guides/2-working-with-k8s/1-deploy-application.md new file mode 100644 index 00000000..ae93443b --- /dev/null +++ b/docs_versioned_docs/version-v0.11/5-user-guides/2-working-with-k8s/1-deploy-application.md @@ -0,0 +1,281 @@ +--- +id: deploy-application +--- + +# Deploy Application + +This guide shows you how to use Kusion CLIs to complete the deployment of an application running in Kubernetes. +We call the abstraction of application operation and maintenance configuration as `AppConfiguration`, and its instance as `Application`. +It is essentially a configuration model that describes an application. The complete definition can be seen [here](../../reference/modules/developer-schemas/app-configuration). + +In production, the application generally includes minimally several k8s resources: + +- Namespace +- Deployment +- Service + +:::tip +This guide requires you to have a basic understanding of Kubernetes. +If you are not familiar with the relevant concepts, please refer to the links below: + +- [Learn Kubernetes Basics](https://kubernetes.io/docs/tutorials/kubernetes-basics/) +- [Namespace](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/) +- [Deployment](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/) +- [Service](https://kubernetes.io/docs/concepts/services-networking/service/) +::: + +## Prerequisites + +Before we start, we need to complete the following steps: + +1、Install Kusion + +We recommend using HomeBrew(Mac), Scoop(Windows), or an installation shell script to download and install Kusion. +See [Download and Install](../../getting-started/install-kusion) for more details. + +2、Running Kubernetes cluster + +There must be a running and accessible Kubernetes cluster and a [kubectl](https://Kubernetes.io/docs/tasks/tools/#kubectl) command line tool. +If you don't have a cluster yet, you can use [Minikube](https://minikube.sigs.k8s.io/docs/tutorials/multi_node/) to start one of your own. + +## Initializing + +This guide is to deploy an app using Kusion, relying on the Kusion CLI and an existing Kubernetes cluster. + +### Initializing workspace configuration + +In version 0.10.0, we have introduced the new concept of [workspaces](../../concepts/workspace), which is a logical layer whose configurations represent an opinionated set of defaults, often appointed by the platform team. In most cases workspaces are represented with an "environment" in traditional SDLC terms. These workspaces provide a means to separate the concerns between the **application developers** who wish to focus on business logic, and a group of **platform engineers** who wish to standardize the applications on the platform. + +Driven by the discipline of Platform Engineering, management of the workspaces, including create/updating/deleting workspaces and their configurations should be done by dedicated platform engineers in a large software organizations to facilitate a more mature and scalable collaboration pattern. + +:::tip +More on the collaboration pattern can be found in the [design doc](https://github.com/KusionStack/kusion/blob/main/docs/design/collaboration/collaboration_paradigm.md). +::: + +However, if that does NOT apply to your scenario, e.g. if you work in a smaller org without platform engineers or if you are an individual developer, we wish Kusion can still be a value tool to have when delivering an application. In this guide, we are NOT distinctively highlighting the different roles or what the best practices entails (the design doc above has all that) but rather the steps needed to get Kusion tool to work. + +As of version 0.11.0, workspace configurations in Kusion can not only be managed on the local filesystem in the form of YAML files, but the remotely-managed workspaces have been supported as well. + +To initialize the workspace configuration: + +```bash +~/playground$ touch ~/dev.yaml +~/playground$ kusion workspace create dev -f ~/dev.yaml +create workspace dev successfully +``` + +To verify the workspace has been created properly: + +``` +~/playground$ kusion workspace list +- default +- dev +~/playground$ kusion workspace show dev +{} +``` + +Note that `show` command tells us the workspace configuration is currently empty, which is expected because we created the `dev` workspace with an empty YAML file. An empty workspace configuration will suffice in some cases, where no platform configurations are needed. + +Kusion by default uses the `default` workspace, thus we need to switch to the `dev` workspace we have just created. + +```bash +~/playground$ kusion workspace switch dev +``` + +We will progressively add more workspace configurations throughout this user guide. + +### Initializing application configuration + +Now that workspaces are properly initialized, we can begin by initializing the application configuration: + +```bash +# Create a new directory and navigate into it. +mkdir simple-service && cd simple-service + +# Initialize the demo project with the name of the current directory. +kusion init +``` + +The directory structure is as follows: + +```shell +simple-service/ +. +├── dev +│   ├── kcl.mod +│   ├── main.k +│   └── stack.yaml +└── project.yaml + +2 directories, 4 files +``` + +The project directory has the following files that are automatically generated: +- `project.yaml` represents project-level configurations. +- `dev` directory stores the customized stack configuration: + - `dev/main.k` stores configurations in the `dev` stack. + - `dev/stack.yaml` stores stack-level configurations. + - `dev/kcl.mod` stores stack-level dependencies. + +In general, the `.k` files are the KCL source code that represents the application configuration, and the `.yaml` is the static configuration file that describes behavior at the project or stack level. + +:::info +See [Project](../../concepts/project/overview) and [Stack](../../concepts/stack/overview) for more details about Project and Stack. +::: + +The `kusion init` command will create a demo quickstart application, we may update the `dev/kcl.mod` and `dev/main.k` later. + +#### kcl.mod +There should be a `kcl.mod` file generated automatically under the project directory. The `kcl.mod` file describes the dependency for the current project or stack. By default, it should contain a reference to the official [`kam` repository](https://github.com/KusionStack/kam) which holds the Kusion `AppConfiguration` and related workload model definitions that fits best practices. You can also create your own models library and reference that. + +You can change the package name in `kcl.mod` to `simple-service`: + +`dev/kcl.mod` +```shell +[package] +name = "simple-service" +version = "0.1.0" + +[dependencies] +kam = { git = "https://github.com/KusionStack/kam.git", tag = "0.1.0" } +network = { oci = "oci://ghcr.io/kusionstack/network", tag = "0.1.0" } + +[profile] +entries = ["main.k"] +``` + +#### main.k +The configuration file `main.k`, usually written by the application developers, declare customized configurations for a specific stack, including an `Application` instance of `AppConfiguration` model. + +You can update the `main.k` as follows: + +```python +import kam.v1.app_configuration as ac +import kam.v1.workload as wl +import kam.v1.workload.container as c +import network as n + +"helloworld": ac.AppConfiguration { + workload: wl.Service { + containers: { + "helloworld": c.Container { + image = "gcr.io/google-samples/gb-frontend:v4" + } + } + replicas: 2 + } + accessories: { + "network": n.Network { + ports: [ + n.Port { + port: 80 + } + ] + } + } +} +``` + +## Previewing + +At this point, the project has been completely initialized. +The configuration is written in KCL, not JSON/YAML which Kubernetes recognizes, so it needs to be built to get the final output. And we can use the `kusion preview` cmd to preview the Kubernetes resources intended to deliver. + +Enter stack dir `simple-service/dev` and preview: + +```bash +cd simple-service/dev && kusion preview +``` + +:::tip +For instructions on the kusion command line tool, execute `kusion -h`, or refer to the tool's online [documentation](../../reference/commands). +::: + +## Applying + +Preview is now completed. We can apply the configuration as the next step. In the output from `kusion preview`, you can see 3 resources: + +- a Namespace named `simple-service` +- a Deployment named `simple-service-dev-helloworld` in the `simple-service` namespace +- a Service named `simple-service-dev-helloworld-private` in the `simple-service` namespace + +Execute command: + +```bash +kusion apply +``` + +The output is similar to: + +``` + ✔︎ Generating Spec in the Stack dev... +Stack: dev ID Action +* ├─ v1:Namespace:simple-service Create +* ├─ v1:Service:simple-service:simple-service-dev-helloworld-private Create +* └─ apps/v1:Deployment:simple-service:simple-service-dev-helloworld Create + + +? Do you want to apply these diffs? yes +Start applying diffs ... + SUCCESS Create v1:Namespace:simple-service success + SUCCESS Create v1:Service:simple-service:simple-service-dev-helloworld-private success + SUCCESS Create apps/v1:Deployment:simple-service:simple-service-dev-helloworld success +Create apps/v1:Deployment:simple-service:simple-service-dev-helloworld success [3/3] ███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████ 100% | 0s +Apply complete! Resources: 3 created, 0 updated, 0 deleted. +``` + +After the configuration applying successfully, you can use the `kubectl` to check the actual status of these resources. + +1、 Check Namespace + +```bash +kubectl get ns +``` + +The output is similar to: + +``` +NAME STATUS AGE +default Active 117d +simple-service Active 38s +kube-system Active 117d +... +``` + +2、Check Deployment + +```bash +kubectl get deploy -n simple-service +``` + +The output is similar to: + +``` +NAME READY UP-TO-DATE AVAILABLE AGE +simple-service-dev-helloworld 1/1 1 1 59s +``` + +3、Check Service + +```bash +kubectl get svc -n simple-service +``` + +The output is similar to: + +``` +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +simple-service-dev-helloworld-private ClusterIP 10.98.89.104 80/TCP 79s +``` + +4、Validate app + +Using the `kubectl` tool, forward native port `30000` to the service port `80`. + +```bash +kubectl port-forward svc/simple-service-dev-helloworld-private -n simple-service 30000:80 +``` + +Open browser and visit [http://127.0.0.1:30000](http://127.0.0.1:30000): + +![app-preview](/img/docs/user_docs/guides/working-with-k8s/app-preview.png) diff --git a/docs_versioned_docs/version-v0.11/5-user-guides/2-working-with-k8s/2-container.md b/docs_versioned_docs/version-v0.11/5-user-guides/2-working-with-k8s/2-container.md new file mode 100644 index 00000000..0647e853 --- /dev/null +++ b/docs_versioned_docs/version-v0.11/5-user-guides/2-working-with-k8s/2-container.md @@ -0,0 +1,146 @@ +--- +id: container +--- + +# Configure Containers + +You can manage container-level configurations in the `AppConfiguration` model via the `containers` field (under the `workload` schema). By default, everything defined in the `containers` field will be treated as application containers. Sidecar containers will be supported in a future version of kusion. + +For the full `Container` schema reference, please see [here](../../reference/modules/developer-schemas/workload/service#schema-container) for more details. + +## Pre-requisite + +Please refer to the [prerequisites](deploy-application#prerequisites) in the guide for deploying an application. + +The example below also requires you to have [initialized the project](deploy-application#initializing) using the `kusion workspace create` and `kusion init` command, which will create a workspace and also generate a [`kcl.mod` file](deploy-application#kclmod) under the stack directory. + +## Managing Workspace Configuration + +In the last guide, we introduced a step to [initialize a workspace](deploy-application#initializing-workspace-configuration) with an empty configuration. The same empty configuration will still work in this guide, no changes are required there. + +However, if you (or the platform team) would like to set default values for the workloads to standardize the behavior of applications in the `dev` workspace, you can do so by updating the `~/dev.yaml`: + +```yaml +modules: + service: + default: + replicas: 3 + labels: + label-key: label-value + annotations: + annotation-key: annotation-value + type: CollaSet +``` + +Please note that the `replicas` in the workspace configuration only works as a default value and will be overridden by the value set in the application configuration. + +The workspace configuration need to be updated with the command: + +```bash +kusion workspace update dev -f ~/dev.yaml +``` + +For a full reference of what can be configured in the workspace level, please see the [workspace reference](../../reference/modules/workspace-configs/workload/service). + +## Example + +`simple-service/dev/main.k`: +```python +import kam.v1.app_configuration as ac +import kam.v1.workload as wl +import kam.v1.workload.container as c +import network as n + +"helloworld": ac.AppConfiguration { + workload: wl.Service { + containers: { + "helloworld": c.Container { + image = "gcr.io/google-samples/gb-frontend:v4" + env: { + "env1": "VALUE" + "env2": "VALUE2" + } + resources: { + "cpu": "500m" + "memory": "512Mi" + } + # Configure an HTTP readiness probe + readinessProbe: p.Probe { + probeHandler: p.Http { + url: "http://localhost:80" + } + initialDelaySeconds: 10 + } + } + } + replicas: 2 + } + accessories: { + "network": n.Network { + ports: [ + n.Port { + port: 80 + } + ] + } + } +} +``` + +## Apply + +Re-run steps in [Applying](deploy-application#applying), new container configuration can be applied. + +``` +$ kusion apply + ✔︎ Generating Spec in the Stack dev... +Stack: dev ID Action +* ├─ v1:Namespace:simple-service UnChanged +* ├─ v1:Service:simple-service:simple-service-dev-helloworld-private UnChanged +* └─ apps/v1:Deployment:simple-service:simple-service-dev-helloworld Update + + +? Do you want to apply these diffs? yes +Start applying diffs ... + SUCCESS UnChanged v1:Namespace:simple-service, skip + SUCCESS UnChanged v1:Service:simple-service:simple-service-dev-helloworld-private, skip + SUCCESS Update apps/v1:Deployment:simple-service:simple-service-dev-helloworld success +Update apps/v1:Deployment:simple-service:simple-service-dev-helloworld success [3/3] ███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████ 100% | 0s +Apply complete! Resources: 0 created, 1 updated, 0 deleted. +``` + +## Validation + +We can verify the container (in the deployment template) now has the updated attributes as defined in the container configuration: +``` +$ kubectl get deployment -n simple-service -o yaml +... + template: + ... + spec: + containers: + - env: + - name: env1 + value: VALUE + - name: env2 + value: VALUE2 + image: gcr.io/google-samples/gb-frontend:v4 + imagePullPolicy: IfNotPresent + name: helloworld + readinessProbe: + failureThreshold: 3 + httpGet: + host: localhost + path: / + port: 80 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + resources: + limits: + cpu: 500m + memory: 512M +... +``` \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.11/5-user-guides/2-working-with-k8s/3-service.md b/docs_versioned_docs/version-v0.11/5-user-guides/2-working-with-k8s/3-service.md new file mode 100644 index 00000000..4b903fec --- /dev/null +++ b/docs_versioned_docs/version-v0.11/5-user-guides/2-working-with-k8s/3-service.md @@ -0,0 +1,139 @@ +--- +id: service +--- + +# Expose Service + +You can determine how to expose your service in the `AppConfiguration` model via the `ports` field (under the `network` accessory). The `ports` field defines a list of all the `Port`s you want to expose for the application (and their corresponding listening ports on the container, if they don't match the service ports), so that it can be consumed by other applications. + +Unless explicitly defined, each of the ports exposed is by default exposed privately as a `ClusterIP` type service. You can expose a port publicly by specifying the `public` field in the `Port` schema. At the moment, the implementation for publicly access is done via Load Balancer type service backed by cloud providers. Ingress will be supported in a future version of kusion. + +For the `Port` schema reference, please see [here](../../reference/modules/developer-schemas/workload/service#schema-port) for more details. + +## Prerequisites + +Please refer to the [prerequisites](deploy-application#prerequisites) in the guide for deploying an application. + +The example below also requires you to have [initialized the project](deploy-application#initializing) using the `kusion workspace create` and `kusion init` command, which will create a workspace and also generate a [`kcl.mod` file](deploy-application#kclmod) under the stack directory. + +## Managing Workspace Configuration + +In the first guide in this series, we introduced a step to [initialize a workspace](deploy-application#initializing-workspace-configuration) with an empty configuration. The same empty configuration will still work in this guide, no changes are required there. + +However, if you (or the platform team) would like to set default values for the services to standardize the behavior of applications in the `dev` workspace, you can do so by updating the `~/dev.yaml`: + +```yaml +modules: + kusionstack/network@0.1.0: + default: + port: + type: alicloud + labels: + kusionstack.io/control: "true" + annotations: + service.beta.kubernetes.io/alibaba-cloud-loadbalancer-spec: slb.s1.small +``` + +The workspace configuration need to be updated with the command: + +```bash +kusion workspace update dev -f ~/dev.yaml +``` + +For a full reference of what can be configured in the workspace level, please see the [workspace reference](../../reference/modules/workspace-configs/networking/network). + +## Example + +`simple-service/dev/main.k`: +```python +import kam.v1.app_configuration as ac +import kam.v1.workload as wl +import kam.v1.workload.container as c +import network as n + +"helloworld": ac.AppConfiguration { + workload: wl.Service { + containers: { + "helloworld": c.Container { + image = "gcr.io/google-samples/gb-frontend:v4" + env: { + "env1": "VALUE" + "env2": "VALUE2" + } + resources: { + "cpu": "500m" + "memory": "512Mi" + } + # Configure an HTTP readiness probe + readinessProbe: p.Probe { + probeHandler: p.Http { + url: "http://localhost:80" + } + initialDelaySeconds: 10 + } + } + } + replicas: 2 + } + accessories: { + "network": n.Network { + ports: [ + n.Port { + port: 8080 + targetPort: 80 + } + ] + } + } +} +``` + +The code above changes the service port to expose from `80` in the last guide to `8080`, but still targeting the container port `80` because that's what the application is listening on. + +## Applying + +Re-run steps in [Applying](deploy-application#applying), new service configuration can be applied. + +``` +$ kusion apply + ✔︎ Generating Spec in the Stack dev... +Stack: dev ID Action +* ├─ v1:Namespace:simple-service UnChanged +* ├─ v1:Service:simple-service:simple-service-dev-helloworld-private Update +* └─ apps/v1:Deployment:simple-service:simple-service-dev-helloworld UnChanged + + +? Do you want to apply these diffs? yes +Start applying diffs ... + SUCCESS UnChanged v1:Namespace:simple-service, skip + SUCCESS Update v1:Service:simple-service:simple-service-dev-helloworld-private success + SUCCESS UnChanged apps/v1:Deployment:simple-service:simple-service-dev-helloworld, skip +UnChanged apps/v1:Deployment:simple-service:simple-service-dev-helloworld, skip [3/3] ██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████ 100% | 0s +Apply complete! Resources: 0 created, 1 updated, 0 deleted. +``` + +## Validation + +We can verify the Kubernetes service now has the updated attributes (mapping service port 8080 to container port 80) as defined in the `ports` configuration: + +``` +kubectl get svc -n simple-service -o yaml +... + spec: + ... + ports: + - name: simple-service-dev-helloworld-private-8080-tcp + port: 8080 + protocol: TCP + targetPort: 80 +... +``` + +Exposing service port 8080: +``` +kubectl port-forward svc/simple-service-dev-helloworld-private -n simple-service 30000:8080 +``` + +Open browser and visit [http://127.0.0.1:30000](http://127.0.0.1:30000), the application should be up and running: + +![app-preview](/img/docs/user_docs/guides/working-with-k8s/app-preview.png) \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.11/5-user-guides/2-working-with-k8s/4-image-upgrade.md b/docs_versioned_docs/version-v0.11/5-user-guides/2-working-with-k8s/4-image-upgrade.md new file mode 100644 index 00000000..ccee54e0 --- /dev/null +++ b/docs_versioned_docs/version-v0.11/5-user-guides/2-working-with-k8s/4-image-upgrade.md @@ -0,0 +1,78 @@ +--- +id: image-upgrade +--- + +# Upgrade Image + +You can declare the application's container image via `image` field of the `Container` schema. + +For the full `Container` schema reference, please see [here](../../reference/modules/developer-schemas/workload/service#schema-container) for more details. + +## Pre-requisite + +Please refer to the [prerequisites](deploy-application#prerequisites) in the guide for deploying an application. + +The example below also requires you to have [initialized the project](deploy-application#initializing) using the `kusion workspace create` and `kusion init` command, which will create a workspace and also generate a [`kcl.mod` file](deploy-application#kclmod) under the stack directory. + +## Managing Workspace Configuration + +In the first guide in this series, we introduced a step to [initialize a workspace](deploy-application#initializing-workspace-configuration) with an empty configuration. The same empty configuration will still work in this guide, no changes are required there. + +## Example + +Update the image value in `simple-service/dev/main.k`: +```python +import kam.v1.app_configuration as ac + +helloworld: ac.AppConfiguration { + workload.containers.nginx: { + ... + # before: + # image = "gcr.io/google-samples/gb-frontend:v4" + # after: + image = "gcr.io/google-samples/gb-frontend:v5" + ... + } +} +``` + +Everything else in `main.k` stay the same. + +## Applying + +Re-run steps in [Applying](deploy-application#applying), update image is completed. + +``` +$ kusion apply + ✔︎ Generating Spec in the Stack dev... +Stack: dev ID Action +* ├─ v1:Namespace:simple-service UnChanged +* ├─ v1:Service:simple-service:simple-service-dev-helloworld-private UnChanged +* └─ apps/v1:Deployment:simple-service:simple-service-dev-helloworld Update + + +? Do you want to apply these diffs? yes +Start applying diffs ... + SUCCESS UnChanged v1:Namespace:simple-service, skip + SUCCESS UnChanged v1:Service:simple-service:simple-service-dev-helloworld-private, skip + SUCCESS Update apps/v1:Deployment:simple-service:simple-service-dev-helloworld success +Update apps/v1:Deployment:simple-service:simple-service-dev-helloworld success [3/3] ███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████ 100% | 0s +Apply complete! Resources: 0 created, 1 updated, 0 deleted. +``` + +## Validation + +We can verify the application container (in the deployment template) now has the updated image (v5) as defined in the container configuration: +``` +kubectl get deployment -n simple-service -o yaml +... + template: + ... + spec: + containers: + - env: + ... + image: gcr.io/google-samples/gb-frontend:v5 + ... +... +``` \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.11/5-user-guides/2-working-with-k8s/5-resource-spec.md b/docs_versioned_docs/version-v0.11/5-user-guides/2-working-with-k8s/5-resource-spec.md new file mode 100644 index 00000000..1e5f208a --- /dev/null +++ b/docs_versioned_docs/version-v0.11/5-user-guides/2-working-with-k8s/5-resource-spec.md @@ -0,0 +1,90 @@ +--- +id: resource-spec +--- + +# Configure Resource Specification + +You can manage container-level resource specification in the `AppConfiguration` model via the `resources` field (under the `Container` schema). + +For the full `Container` schema reference, please see [here](../../reference/modules/developer-schemas/workload/service#schema-container) for more details. + +## Prerequisites + +Please refer to the [prerequisites](deploy-application#prerequisites) in the guide for deploying an application. + +The example below also requires you to have [initialized the project](deploy-application#initializing) using the `kusion workspace create` and `kusion init` command, which will create a workspace and also generate a [`kcl.mod` file](deploy-application#kclmod) under the stack directory. + +## Managing Workspace Configuration + +In the first guide in this series, we introduced a step to [initialize a workspace](deploy-application#initializing-workspace-configuration) with an empty configuration. The same empty configuration will still work in this guide, no changes are required there. + +## Example + +Update the resources value in `simple-service/dev/main.k`: + +```py +import kam.v1.app_configuration as ac + +helloworld: ac.AppConfiguration { + workload.containers.helloworld: { + ... + # before: + # resources: { + # "cpu": "500m" + # "memory": "512M" + # } + # after: + resources: { + "cpu": "250m" + "memory": "256Mi" + } + ... + } +} +``` + +Everything else in `main.k` stay the same. + +## Applying + +Re-run steps in [Applying](deploy-application#applying), resource scaling is completed. + +``` +$ kusion apply + ✔︎ Generating Spec in the Stack dev... +Stack: dev ID Action +* ├─ v1:Namespace:simple-service UnChanged +* ├─ v1:Service:simple-service:simple-service-dev-helloworld-private UnChanged +* └─ apps/v1:Deployment:simple-service:simple-service-dev-helloworld Update + + +? Do you want to apply these diffs? yes +Start applying diffs ... + SUCCESS UnChanged v1:Namespace:simple-service, skip + SUCCESS UnChanged v1:Service:simple-service:simple-service-dev-helloworld-private, skip + SUCCESS Update apps/v1:Deployment:simple-service:simple-service-dev-helloworld success +Update apps/v1:Deployment:simple-service:simple-service-dev-helloworld success [3/3] ███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████ 100% | 0s +Apply complete! Resources: 0 created, 1 updated, 0 deleted. +``` + +## Validation + +We can verify the application container (in the deployment template) now has the updated resources attributes (cpu:250m, memory:256Mi) as defined in the container configuration: + +``` +kubectl get deployment -n simple-service -o yaml +... + template: + ... + spec: + containers: + - env: + ... + image: gcr.io/google-samples/gb-frontend:v5 + ... + resources: + limits: + cpu: 250m + memory: 256Mi +... +``` \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.11/5-user-guides/2-working-with-k8s/6-set-up-operational-rules.md b/docs_versioned_docs/version-v0.11/5-user-guides/2-working-with-k8s/6-set-up-operational-rules.md new file mode 100644 index 00000000..de40895b --- /dev/null +++ b/docs_versioned_docs/version-v0.11/5-user-guides/2-working-with-k8s/6-set-up-operational-rules.md @@ -0,0 +1,85 @@ +--- +id: set-up-operational-rules +--- + +# Set up Operational Rules + +You can set up operational rules in the `AppConfiguration` model with the `opsrule` accessory and corresponding platform configurations in the workspace directory. The `opsrule` is the collection of operational rule requirements for the application that are used as a preemptive measure to police and stop any unwanted changes. + +## Prerequisites + +Please refer to the [prerequisites](deploy-application#prerequisites) in the guide for deploying an application. + +The example below also requires you to have [initialized the project](deploy-application#initializing) using the `kusion workspace create` and `kusion init` command, which will create a workspace and also generate a [`kcl.mod` file](deploy-application#kclmod) under the stack directory. + +## Managing Workspace Configuration + +In the first guide in this series, we introduced a step to [initialize a workspace](deploy-application#initializing-workspace-configuration) with an empty configuration. The same empty configuration will still work in this guide, no changes are required there. + +However, if you (or the platform team) would like to set default values for the opsrule to standardize the behavior of applications, you can do so by updating the `~/dev.yaml`. +Note that the platform engineers should set the default workload to [Kusion Operation CollaSet](https://github.com/KusionStack/operating) and installed the Kusion Operation controllers properly, the `opsrules` module will generate a [PodTransitionRule](https://www.kusionstack.io/docs/operating/manuals/podtransitionrule) instead of updating the `maxUnavailable` value in the deployment: + +```yaml +modules: + service: + default: + type: CollaSet + kusionstack/opsrule@0.1.0: + default: + maxUnavailable: 30% +``` + +Please note that the `maxUnavailable` in the workspace configuration only works as a default value and will be overridden by the value set in the application configuration. + +The workspace configuration need to be updated with the command: + +```bash +kusion workspace update dev -f ~/dev.yaml +``` + +## Example + +Add the `opsrule` module dependency to `kcl.mod`: + +```shell +[package] +name = "simple-service" +version = "0.1.0" + +[dependencies] +kam = { git = "https://github.com/KusionStack/kam.git", tag = "0.1.0" } +network = { oci = "oci://ghcr.io/kusionstack/network", tag = "0.1.0" } +opsrule = { oci = "oci://ghcr.io/kusionstack/opsrule", tag = "0.1.0" } + +[profile] +entries = ["main.k"] +``` + +Add the `opsrule` snippet to the `AppConfiguration` in `simple-service/dev/main.k`: + +```py +import kam.v1.app_configuration as ac +import kam.v1.workload as wl +import kam.v1.workload.container as c +import opsrule + +helloworld: ac.AppConfiguration { + workload: wl.Service { + ... + } + # Configure the maxUnavailable rule + accessories: { + "opsrule": opsrule.OpsRule { + "maxUnavailable": 50% + } + } +} +``` + +## Applying + +Re-run steps in [Applying](deploy-application#applying), resource scaling is completed. + +## Validation + +We can verify the application deployment strategy now has the updated attributes `maxUnavailable: 50%` in the container configuration. diff --git a/docs_versioned_docs/version-v0.11/5-user-guides/2-working-with-k8s/7-job.md b/docs_versioned_docs/version-v0.11/5-user-guides/2-working-with-k8s/7-job.md new file mode 100644 index 00000000..096cf34d --- /dev/null +++ b/docs_versioned_docs/version-v0.11/5-user-guides/2-working-with-k8s/7-job.md @@ -0,0 +1,131 @@ +--- +id: job +--- + +# Schedule a Job + +The guides above provide examples on how to configure workloads of the type `wl.Service`, which is typically used for long-running web applications that should **never** go down. Alternatively, you could also schedule another kind of workload profile, namely `wl.Job` which corresponds to a one-off or recurring execution of tasks that run to completion and then stop. + +## Prerequisites + +Please refer to the [prerequisites](deploy-application#prerequisites) in the guide for scheduling a job. + +The example below also requires you to have [initialized the project](deploy-application#initializing) using the `kusion workspace create` and `kusion init` command, which will create a workspace and also generate a [`kcl.mod` file](deploy-application#kclmod) under the stack directory. + +## Managing Workspace Configuration + +In the first guide in this series, we introduced a step to [initialize a workspace](deploy-application#initializing-workspace-configuration) with an empty configuration. The same empty configuration will still work in this guide, no changes are required there. Alternatively, if you have updated your workspace config in the previous guides, no changes need to be made either. + +However, if you (or the platform team) would like to set default values for the workloads to standardize the behavior of applications in the `dev` workspace, you can do so by updating the `~/dev.yaml`: + +```yaml +modules: + service: + default: + replicas: 3 + labels: + label-key: label-value + annotations: + annotation-key: annotation-value +``` + +Please note that the `replicas` in the workspace configuration only works as a default value and will be overridden by the value set in the application configuration. + +The workspace configuration need to be updated with the command: + +```bash +kusion workspace update dev -f ~/dev.yaml +``` + +For a full reference of what can be configured in the workspace level, please see the [workspace reference](../../reference/modules/workspace-configs/workload/job). + +## Example + +To schedule a job with cron expression, update `simple-service/dev/main.k` to the following: + +`simple-service/dev/main.k`: +```py +import kam.v1.app_configuration as ac +import kam.v1.workload as wl +import kam.v1.workload.container as c + +helloworld: ac.AppConfiguration { + workload: wl.Job { + containers: { + "busybox": c.Container { + # The target image + image: "busybox:1.28" + # Run the following command as defined + command: ["/bin/sh", "-c", "echo hello"] + } + } + # Run every minute. + schedule: "* * * * *" + } +} +``` + +The KCL snippet above schedules a job. Alternatively, if you want a one-time job without cron, simply remove the `schedule` from the configuration. + +You can find the full example in here in the [konfig repo](https://github.com/KusionStack/konfig/tree/main/example/simple-job). + +## Applying + +Re-run steps in [Applying](deploy-application#applying) and schedule the job. Your output might look like one of the following: + +If you are starting from scratch, all resources are created on the spot: + +``` +$ kusion apply + ✔︎ Generating Spec in the Stack dev... +Stack: dev ID Action +* ├─ v1:Namespace:simple-service Create +* └─ batch/v1:CronJob:simple-service:simple-service-dev-helloworld Create + + +? Do you want to apply these diffs? yes +Start applying diffs ... + SUCCESS Create v1:Namespace:simple-service success + SUCCESS Create batch/v1:CronJob:simple-service:helloworld-dev-helloworld success +Create batch/v1:CronJob:simple-service:simple-service-dev-helloworld success [2/2] ██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████ 100% | 0s +Apply complete! Resources: 2 created, 0 updated, 0 deleted. +``` + +If you are starting from the last guide which configures an `opsrule`, the output looks like the following which destroys the `Deployment` and `Service` and replace it with a `CronJob`: + +``` +$ kusion apply + ✔︎ Generating Spec in the Stack dev... +Stack: dev ID Action +* ├─ v1:Namespace:simple-service UnChanged +* ├─ batch/v1:CronJob:simple-service:simple-service-dev-helloworld Create +* ├─ apps/v1:Deployment:simple-service:simple-service-dev-helloworld Delete +* └─ v1:Service:simple-service:simple-service-dev-helloworld-private Delete + + +? Do you want to apply these diffs? yes +Start applying diffs ... + SUCCESS UnChanged v1:Namespace:simple-service, skip + SUCCESS Delete apps/v1:Deployment:simple-service:simple-service-dev-helloworld success + SUCCESS Create batch/v1:CronJob:simple-service:simple-service-dev-helloworld success + SUCCESS Delete v1:Service:simple-service:simple-service-dev-helloworld-private success +Delete v1:Service:simple-service:simple-service-dev-helloworld-private success [4/4] ███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████ 100% | 0s +Apply complete! Resources: 1 created, 0 updated, 2 deleted. +``` + +## Validation + +We can verify the job has now been scheduled: + +```shell +$ kubectl get cronjob -n simple-service +NAME SCHEDULE SUSPEND ACTIVE LAST SCHEDULE AGE +simple-service-dev-helloworld * * * * * False 0 2m18s +``` + +Verify the job has been triggered after the minute mark since we scheduled it to run every minute: +```shell +$ kubectl get job -n simple-service +NAME COMPLETIONS DURATION AGE +simple-service-dev-helloworld-28415748 1/1 5s 11s +``` diff --git a/docs_versioned_docs/version-v0.11/5-user-guides/2-working-with-k8s/_category_.json b/docs_versioned_docs/version-v0.11/5-user-guides/2-working-with-k8s/_category_.json new file mode 100644 index 00000000..79d3c6c5 --- /dev/null +++ b/docs_versioned_docs/version-v0.11/5-user-guides/2-working-with-k8s/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Kubernetes" +} diff --git a/docs_versioned_docs/version-v0.11/5-user-guides/3-observability/1-prometheus.md b/docs_versioned_docs/version-v0.11/5-user-guides/3-observability/1-prometheus.md new file mode 100644 index 00000000..13905ab1 --- /dev/null +++ b/docs_versioned_docs/version-v0.11/5-user-guides/3-observability/1-prometheus.md @@ -0,0 +1,326 @@ +--- +id: prometheus +--- + +# Configure Monitoring Behavior With Prometheus + +This document provides the step-by-step instruction to set up monitoring for your application. + +As of today, Kusion supports the configuration of Prometheus scraping behaviors for the target application. In the future, we will add more cloud-provider-native solutions, such as AWS CloudWatch, Azure Monitor, etc. + +The user guide below is composed of the following components: + +- Namespace +- Deployment +- Service +- ServiceMonitor + +:::tip + +This guide requires you to have a basic understanding of Kubernetes and Prometheus. +If you are not familiar with the relevant concepts, please refer to the links below: + +- [Learn Kubernetes Basics](https://kubernetes.io/docs/tutorials/kubernetes-basics/) +- [Prometheus Introduction](https://prometheus.io/docs/introduction/overview/) +::: + +## Pre-requisite +Please refer to the [prerequisites](../working-with-k8s/deploy-application#prerequisites) in the guide for deploying an application. + +The example below also requires you to have [initialized the project](../working-with-k8s/deploy-application#initializing) using the `kusion init` command, which will generate a [`kcl.mod` file](../working-with-k8s/deploy-application#kclmod) under the project directory. + +## Setting up your own Prometheus + +There a quite a few ways to set up Prometheus in your cluster: +1. Installing a Prometheus operator +2. Installing a standalone Prometheus server +3. Installing a Prometheus agent and connect to a remote Prometheus server + +[The advice from the Prometheus team](https://github.com/prometheus-operator/prometheus-operator/issues/1547#issuecomment-401092041) is to use the `ServiceMonitor` or `PodMonitor` CRs via the Prometheus operator to manage scrape configs going forward[2]. + +In either case, you only have to do this setup once per cluster. This doc will use a minikube cluster and Prometheus operator as an example. + +### Installing Prometheus operator[3]. +To get the example in this user guide working, all you need is a running Prometheus operator. You can have that installed by running: +``` +LATEST=$(curl -s https://api.github.com/repos/prometheus-operator/prometheus-operator/releases/latest | jq -cr .tag_name) +curl -sL https://github.com/prometheus-operator/prometheus-operator/releases/download/${LATEST}/bundle.yaml | kubectl create -f - +``` + +This will install all the necessary CRDs and the Prometheus operator itself in the default namespace. Wait a few minutes, you can confirm the operator is up by running: +``` +kubectl wait --for=condition=Ready pods -l app.kubernetes.io/name=prometheus-operator -n default +``` + +### Make sure RBAC is properly set up +If you have RBAC enabled on the cluster, the following must be created for Prometheus to work properly: +``` +apiVersion: v1 +kind: ServiceAccount +metadata: + name: prometheus +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: prometheus +rules: +- apiGroups: [""] + resources: + - nodes + - nodes/metrics + - services + - endpoints + - pods + verbs: ["get", "list", "watch"] +- apiGroups: [""] + resources: + - configmaps + verbs: ["get"] +- apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: ["get", "list", "watch"] +- nonResourceURLs: ["/metrics"] + verbs: ["get"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: prometheus +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: prometheus +subjects: +- kind: ServiceAccount + name: prometheus + namespace: default +``` + +### Configure Prometheus instance via the operator +Once all of the above is set up, you can then configure the Prometheus instance via the operator: +``` +apiVersion: monitoring.coreos.com/v1 +kind: Prometheus +metadata: + name: prometheus +spec: + serviceAccountName: prometheus + serviceMonitorNamespaceSelector: {} + serviceMonitorSelector: {} + podMonitorNamespaceSelector: {} + podMonitorSelector: {} + resources: + requests: + memory: 400Mi +``` +This Prometheus instance above will be cluster-wide, picking up ALL the service monitors and pod monitors across ALL the namespaces. +You can adjust the requests and limits accordingly if you have a larger cluster. + +### Exposing the Prometheus portal (optional) +Once you have the managed Prometheus instance created via the Prometheus CR above, you should be able to see a service created called `prometheus-operated`: + +![prometheus-operated](/img/docs/user_docs/guides/prometheus/prometheus-operated.png) + +If you are also running on minikube, you can expose it onto your localhost via kubectl: +``` +kubectl port-forward svc/prometheus-operated 9099:9090 +``` + +You should then be able to see the Prometheus portal via `localhost:9099` in your browser: + +![prometheus-portal](/img/docs/user_docs/guides/prometheus/prometheus-portal.png) + +If you are running a non-local cluster, you can try to expose it via another way, through an ingress controller for example. + +## Setting up workspace configs + +Since v0.10.0, we have introduced the concept of [workspaces](../../3-concepts/4-workspace.md), whose configurations represent the part of the application behaviors that platform teams are interested in standardizing, or the ones to eliminate from developer's mind to make their lives easier. + +In the case of setting up Prometheus, there are a few things to set up on the workspace level: + +### Operator mode + +The `operatorMode` flag indicates to Kusion whether the Prometheus instance installed in the cluster runs as a Kubernetes operator or not. This determines the different kinds of resources Kusion manages. + +To see more about different ways to run Prometheus in the Kubernetes cluster, please refer to the [design documentation](https://github.com/KusionStack/kusion/blob/main/docs/prometheus.md#prometheus-installation). + +Most cloud vendors provide an out-of-the-box monitoring solutions for workloads running in a managed-Kubernetes cluster (EKS, AKS, etc), such as AWS CloudWatch, Azure Monitor, etc. These solutions mostly involve installing an agent (CloudWatch Agent, OMS Agent, etc) in the cluster and collecting the metrics to a centralized monitoring server. In those cases, you don't need to set `operatorMode` to `True`. It only needs to be set to `True` when you have an installation of the [Prometheus operator](https://github.com/prometheus-operator/prometheus-operator) running inside the Kubernetes cluster. + +:::info + +For differences between [Prometheus operator](https://github.com/prometheus-operator/prometheus-operator), [kube-prometheus](https://github.com/prometheus-operator/kube-prometheus) and the [community kube-prometheus-stack helm chart](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack), the details are documented [here](https://github.com/prometheus-operator/prometheus-operator#prometheus-operator-vs-kube-prometheus-vs-community-helm-chart). +::: + +### Monitor types + +The `monitorType` flag indicates the kind of monitor Kusion will create. It only applies when `operatorMode` is set to `True`. As of version 0.10.0, Kusion provides options to scrape metrics from either the application pods or its corresponding Kubernetes services. This determines the different kinds of resources Kusion manages when Prometheus runs as an operator in the target cluster. + +A sample `workspace.yaml` with Prometheus settings: +```yaml +modules: + ... + kusionstack/monitoring@0.1.0: + default: + operatorMode: True + monitorType: Service + scheme: http + interval: 30s + timeout: 15s +... +``` + +To instruct Prometheus to scrape from pod targets instead: +```yaml +modules: + ... + kusionstack/monitoring@0.1.0: + default: + operatorMode: True + monitorType: Pod + scheme: http + interval: 30s + timeout: 15s +... +``` + +If the `operatorMode` is omitted from the `workspace.yaml`, Kusion defaults `operatorMode` to false. + +### Overriding with projectSelector + +Workspace configurations contain a set of default setting group for all projects in the workspace, with means to override them by Projects using a `projectSelector` keyword. + +Projects with the name matching those in projectSelector will use the values defined in that override group instead of the default. If a key is not present in the override group, the default value will be used. + +Take a look at the sample `workspace.yaml`: +```yaml +modules: + ... + kusionstack/monitoring@0.1.0: + default: + operatorMode: True + monitorType: Pod + scheme: http + interval: 30s + timeout: 15s + low_frequency: + operatorMode: False + interval: 2m + projectSelector: + - foobar + high_frequency: + monitorType: Service + projectSelector: + - helloworld +... +``` + +In the example above, a project with the name `helloworld` will have the monitoring settings where `operatorMode` is set to `False`, a 2 minute scraping interval, 15 seconds timeout (coming from default) and http scheme (coming from default). + +You cannot have the same project appear in two projectSelectors. + +For a full reference of what can be configured in the workspace level, please see the [workspace reference](../../reference/modules/workspace-configs/monitoring/prometheus). + +## Updating the workspace config + +Assuming you now have a `workspace.yaml` that looks like the following: +```yaml +modules: + kusionstack/monitoring@0.1.0: + default: + operatorMode: True + monitorType: Service + scheme: http + interval: 30s + timeout: 15s +... +``` + +Update the workspace configuration by running the following command: +``` +kusion workspace update dev -f workspace.yaml +``` +Verify the workspace config is properly updated by running the command: +``` +kusion workspace show dev +``` + +## Using kusion to deploy your application with monitoring requirements + +At this point we are set up for good! Any new applications you deploy via kusion will now automatically have the monitoring-related resources created, should you declare you want it via the `monitoring` field in the `AppConfiguration` model. + +The monitoring in an AppConfiguration is declared in the `monitoring` field. See the example below for a full, deployable AppConfiguration. + +Please note we are using a new image `quay.io/brancz/prometheus-example-app` since the app itself need to expose metrics for Prometheus to scrape: + +`helloworld/dev/kcl.mod`: +``` +[package] +name = "helloworld" + +[dependencies] +monitoring = { oci = "oci://ghcr.io/kusionstack/monitoring", tag = "0.1.0" } +kam = { git = "https://github.com/KusionStack/kam.git", tag = "0.1.0" } + +[profile] +entries = ["main.k"] +``` + +`helloworld/dev/main.k`: +``` +import kam.v1.app_configuration as ac +import kam.v1.workload as wl +import kam.v1.workload.container as c +import monitoring as m +import network.network as n + +helloworld: ac.AppConfiguration { + workload: wl.Service { + containers: { + "monitoring-sample-app": c.Container { + image: "quay.io/brancz/prometheus-example-app:v0.3.0" + } + } + } + # Add the monitoring configuration backed by Prometheus + accessories: { + "monitoring": m.Prometheus { + path: "/metrics" + } + "network": n.Network { + ports: [ + n.Port { + port: 8080 + } + ] + } + } +} +``` + +The KCL file above represents an application with a service type workload, exposing the port 8080, and would like Prometheus to scrape the `/metrics` endpoint every 2 minutes. + +Running `kusion apply` would show that kusion will create a `Namespace`, a `Deployment`, a `Service` and a `ServiceMonitor`: +![kusion-apply-with-monitor](/img/docs/user_docs/guides/prometheus/kusion-apply-with-monitor.png) + +Continue applying all resources: +![kusion-apply-success](/img/docs/user_docs/guides/prometheus/kusion-apply-success.png) + +If we want to, we can verify the service monitor has been created successfully: +![service-monitor](/img/docs/user_docs/guides/prometheus/service-monitor.png) + +In a few seconds, you should be able to see in the Prometheus portal that the service we just deployed has now been discovered and monitored by Prometheus: +![prometheus-targets](/img/docs/user_docs/guides/prometheus/prometheus-targets.png) + +You can run a few simply queries for the data that Prometheus scraped from your application: +![prometheus-simple-query](/img/docs/user_docs/guides/prometheus/prometheus-simple-query.png) + +For more info about PromQL, you can find them [here](https://prometheus.io/docs/prometheus/latest/querying/basics/)[4]. + +## References +1. Prometheus: https://prometheus.io/docs/introduction/overview/ +2. Prometheus team advise: https://github.com/prometheus-operator/prometheus-operator/issues/1547#issuecomment-446691500 +3. Prometheus operator getting started doc: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/user-guides/getting-started.md +4. PromQL basics: https://prometheus.io/docs/prometheus/latest/querying/basics/ \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.11/5-user-guides/3-observability/_category_.json b/docs_versioned_docs/version-v0.11/5-user-guides/3-observability/_category_.json new file mode 100644 index 00000000..b061ae3e --- /dev/null +++ b/docs_versioned_docs/version-v0.11/5-user-guides/3-observability/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Automated Observability" +} diff --git a/docs_versioned_docs/version-v0.11/5-user-guides/4-secrets-management/1-using-cloud-secrets.md b/docs_versioned_docs/version-v0.11/5-user-guides/4-secrets-management/1-using-cloud-secrets.md new file mode 100644 index 00000000..9b52531b --- /dev/null +++ b/docs_versioned_docs/version-v0.11/5-user-guides/4-secrets-management/1-using-cloud-secrets.md @@ -0,0 +1,97 @@ +--- +id: using-cloud-secrets +--- + +# Using Cloud Secrets Manager + +Applications usually store sensitive data in secrets by using centralized secrets management solutions. For example, you authenticate databases, services, and external systems with passwords, API keys, tokens, and other credentials stored in a secret store, e.g. Hashicorp Vault, AWS Secrets Manager, Azure Key Vault, etc + +Kusion provides out-of-the-box support to reference existing external secrets management solution, this tutorial introduces that how to pull the secret from AWS Secrets Manager to make it available to applications. + +## Prerequisites + +Please refer to the [prerequisites](../working-with-k8s/deploy-application#prerequisites) in the guide for deploying an application. + +The example below also requires you to have [initialized the project](../working-with-k8s/deploy-application#initializing) using the `kusion init` command, which will generate a [`kcl.mod` file](../working-with-k8s/deploy-application#kclmod) under the project directory. + +Additionally, you also need to configure the obtained AccessKey and SecretKey as environment variables: + +```bash +export AWS_ACCESS_KEY_ID="AKIAQZDxxxx" # replace it with your AccessKey +export AWS_SECRET_ACCESS_KEY="oE/xxxx" # replace it with your SecretKey +``` + +![aws iam account](/img/docs/user_docs/getting-started/aws-iam-account.png) + +## Setting up workspace + +Since v0.10.0, we have introduced the concept of [workspaces](../../3-concepts/4-workspace.md), whose configurations represent the part of the application behaviors that platform teams are interested in standardizing, or the ones to eliminate from developer's mind to make their lives easier. + +In the case of setting up cloud secrets manager, platform teams need to specify which secrets management solution to use and necessary information to access on the workspace level. + +A sample `workspace.yaml` with AWS Secrets Manager settings: + +``` +modules: + ... +secretStore: + provider: + aws: + region: us-east-1 + profile: The optional profile to be used to interact with AWS Secrets Manager. +... +``` + +## Update AppConfiguration + +At this point we are set up for good! Now you can declare external type of secrets via the `secrets` field in the `AppConfiguration` model to consume sensitive data stored in AWS Secrets Manager. + +See the example below for a full, deployable AppConfiguration. + +``` +import kam.v1.app_configuration as ac +import kam.v1.workload as wl +import kam.v1.workload.container as c +import kam.v1.workload.secret as sec + +gitsync: ac.AppConfiguration { + workload: wl.Service { + containers: { + "syncer": c.Container { + image: "dyrnq/git-sync" + # Run the following command as defined + command: [ + "--repo=https://github.com/KusionStack/kusion" + "--ref=HEAD" + "--root=/mnt/git" + ] + # Consume secrets in environment variables + env: { + "GIT_SYNC_USERNAME": "secret://git-auth/username" + "GIT_SYNC_PASSWORD": "secret://git-auth/password" + } + } + } + # Secrets used to retrieve secret data from AWS Secrets Manager + secrets: { + "git-auth": sec.Secret { + type: "external" + data: { + "username": "ref://git-auth-info/username" + "password": "ref://git-auth-info/password" + } + } + } + } +} +``` + +## Apply and Verify + +Run `kusion apply` command to deploy above application, then use the below command to verify if the secret got deployed: + +``` +kubectl get secret -n secretdemo +``` + +You will find `git-auth` of type Opaque automatically created and contains sensitive information pulled from AWS Secrets Manager. \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.11/5-user-guides/4-secrets-management/_category_.json b/docs_versioned_docs/version-v0.11/5-user-guides/4-secrets-management/_category_.json new file mode 100644 index 00000000..8990c11b --- /dev/null +++ b/docs_versioned_docs/version-v0.11/5-user-guides/4-secrets-management/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Secrets Management" +} diff --git a/docs_versioned_docs/version-v0.11/5-user-guides/_category_.json b/docs_versioned_docs/version-v0.11/5-user-guides/_category_.json new file mode 100644 index 00000000..abf4c874 --- /dev/null +++ b/docs_versioned_docs/version-v0.11/5-user-guides/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "User Guides" +} diff --git a/docs_versioned_docs/version-v0.11/6-reference/1-commands/_category_.json b/docs_versioned_docs/version-v0.11/6-reference/1-commands/_category_.json new file mode 100644 index 00000000..d783ca2e --- /dev/null +++ b/docs_versioned_docs/version-v0.11/6-reference/1-commands/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Kusion Commands" +} diff --git a/docs_versioned_docs/version-v0.11/6-reference/1-commands/index.md b/docs_versioned_docs/version-v0.11/6-reference/1-commands/index.md new file mode 100644 index 00000000..bf4a8741 --- /dev/null +++ b/docs_versioned_docs/version-v0.11/6-reference/1-commands/index.md @@ -0,0 +1,36 @@ +# Kusion Commands + +Kusion is the Platform Orchestrator of KusionStack + +### Synopsis + +As a Platform Orchestrator, Kusion delivers user intentions to Kubernetes, Clouds and On-Premise resources. Also enables asynchronous cooperation between the development and the platform team and drives the separation of concerns. + +``` +kusion [flags] +``` + +### Options + +``` + -h, --help help for kusion + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion apply](kusion-apply.md) - Apply the operational intent of various resources to multiple runtimes +* [kusion config](kusion-config.md) - Interact with the Kusion config +* [kusion destroy](kusion-destroy.md) - Destroy resources within the stack. +* [kusion generate](kusion-generate.md) - Generate and print the resulting Spec resources of target Stack +* [kusion init](kusion-init.md) - Initialize the scaffolding for a demo project +* [kusion mod](kusion-mod.md) - Manage Kusion modules +* [kusion options](kusion-options.md) - Print the list of flags inherited by all commands +* [kusion preview](kusion-preview.md) - Preview a series of resource changes within the stack +* [kusion project](kusion-project.md) - Project is a folder that contains a project.yaml file and is linked to a Git repository +* [kusion stack](kusion-stack.md) - Stack is a folder that contains a stack.yaml file within the corresponding project directory +* [kusion version](kusion-version.md) - Print the Kusion version information for the current context +* [kusion workspace](kusion-workspace.md) - Workspace is a logical concept representing a target that stacks will be deployed to + +###### Auto generated by spf13/cobra on 29-Apr-2024 diff --git a/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-apply.md b/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-apply.md new file mode 100644 index 00000000..3386884b --- /dev/null +++ b/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-apply.md @@ -0,0 +1,64 @@ +# kusion apply + +Apply the operational intent of various resources to multiple runtimes + +### Synopsis + +Apply a series of resource changes within the stack. + + Create, update or delete resources according to the operational intent within a stack. By default, Kusion will generate an execution preview and prompt for your approval before performing any actions. You can review the preview details and make a decision to proceed with the actions or abort them. + +``` +kusion apply [flags] +``` + +### Examples + +``` + # Apply with specified work directory + kusion apply -w /path/to/workdir + + # Apply with specified arguments + kusion apply -D name=test -D age=18 + + # Skip interactive approval of preview details before applying + kusion apply --yes + + # Apply without output style and color + kusion apply --no-style=true + + # Apply with localhost port forwarding + kusion apply --port-forward=8080 +``` + +### Options + +``` + -a, --all --detail Automatically show all preview details, combined use with flag --detail + --backend string The backend to use, supports 'local', 'oss' and 's3'. + -d, --detail Automatically show preview details with interactive options (default true) + --dry-run Preview the execution effect (always successful) without actually applying the changes + -h, --help help for apply + --ignore-fields strings Ignore differences of target fields + --no-style no-style sets to RawOutput mode and disables all of styling + --operator string Specify the operator + -o, --output string Specify the output format + --port-forward int Forward the specified port from local to service + --watch After creating/updating/deleting the requested object, watch for changes + -w, --workdir string The work directory to run Kusion CLI. + --workspace string The name of target workspace to operate in. + -y, --yes Automatically approve and perform the update after previewing it +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion](index.md) - Kusion is the Platform Orchestrator of Internal Developer Platform + +###### Auto generated by spf13/cobra on 29-Apr-2024 diff --git a/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-config-get.md b/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-config-get.md new file mode 100644 index 00000000..a57f71e2 --- /dev/null +++ b/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-config-get.md @@ -0,0 +1,37 @@ +# kusion config get + +Get a config item + +### Synopsis + +This command gets the value of a specified kusion config item, where the config item must be registered. + +``` +kusion config get +``` + +### Examples + +``` + # Get a config item + kusion config get backends.current +``` + +### Options + +``` + -h, --help help for get +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion config](kusion-config.md) - Interact with the Kusion config + +###### Auto generated by spf13/cobra on 29-Apr-2024 diff --git a/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-config-list.md b/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-config-list.md new file mode 100644 index 00000000..019ec5b8 --- /dev/null +++ b/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-config-list.md @@ -0,0 +1,37 @@ +# kusion config list + +List all config items + +### Synopsis + +This command lists all the kusion config items and their values. + +``` +kusion config list +``` + +### Examples + +``` + # List config items + kusion config list +``` + +### Options + +``` + -h, --help help for list +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion config](kusion-config.md) - Interact with the Kusion config + +###### Auto generated by spf13/cobra on 29-Apr-2024 diff --git a/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-config-set.md b/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-config-set.md new file mode 100644 index 00000000..355b2af2 --- /dev/null +++ b/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-config-set.md @@ -0,0 +1,43 @@ +# kusion config set + +Set a config item + +### Synopsis + +This command sets the value of a specified kusion config item, where the config item must be registered, and the value must be in valid type. + +``` +kusion config set +``` + +### Examples + +``` + # Set a config item with string type value + kusion config set backends.current mysql-pre + + # Set a config item with int type value + kusion config set backends.mysql-pre.configs.port 3306 + + # Set a config item with struct or map type value + kusion config set backends.mysql-pre.configs '{"dbName":"kusion","user":"kk","host":"127.0.0.1","port":3306}' +``` + +### Options + +``` + -h, --help help for set +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion config](kusion-config.md) - Interact with the Kusion config + +###### Auto generated by spf13/cobra on 29-Apr-2024 diff --git a/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-config-unset.md b/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-config-unset.md new file mode 100644 index 00000000..8e308e94 --- /dev/null +++ b/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-config-unset.md @@ -0,0 +1,37 @@ +# kusion config unset + +Unset a config item + +### Synopsis + +This command unsets a specified kusion config item, where the config item must be registered. + +``` +kusion config unset +``` + +### Examples + +``` + # Unset a config item + kusion config unset backends.mysql-pre.configs.port +``` + +### Options + +``` + -h, --help help for unset +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion config](kusion-config.md) - Interact with the Kusion config + +###### Auto generated by spf13/cobra on 29-Apr-2024 diff --git a/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-config.md b/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-config.md new file mode 100644 index 00000000..f9f40850 --- /dev/null +++ b/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-config.md @@ -0,0 +1,34 @@ +# kusion config + +Interact with the Kusion config + +### Synopsis + +Config contains the operation of Kusion configurations. + +``` +kusion config [flags] +``` + +### Options + +``` + -h, --help help for config +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion](index.md) - Kusion is the Platform Orchestrator of Internal Developer Platform +* [kusion config get](kusion-config-get.md) - Get a config item +* [kusion config list](kusion-config-list.md) - List all config items +* [kusion config set](kusion-config-set.md) - Set a config item +* [kusion config unset](kusion-config-unset.md) - Unset a config item + +###### Auto generated by spf13/cobra on 29-Apr-2024 diff --git a/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-destroy.md b/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-destroy.md new file mode 100644 index 00000000..77f97606 --- /dev/null +++ b/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-destroy.md @@ -0,0 +1,46 @@ +# kusion destroy + +Destroy resources within the stack. + +### Synopsis + +Destroy resources within the stack. + + Please note that the destroy command does NOT perform resource version checks. Therefore, if someone submits an update to a resource at the same time you execute a destroy command, their update will be lost along with the rest of the resource. + +``` +kusion destroy [flags] +``` + +### Examples + +``` + # Delete resources of current stack + kusion destroy +``` + +### Options + +``` + --backend string The backend to use, supports 'local', 'oss' and 's3'. + -d, --detail Automatically show preview details after previewing it + -h, --help help for destroy + --no-style no-style sets to RawOutput mode and disables all of styling + --operator string Specify the operator + -w, --workdir string The work directory to run Kusion CLI. + --workspace string The name of target workspace to operate in. + -y, --yes Automatically approve and perform the update after previewing it +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion](index.md) - Kusion is the Platform Orchestrator of Internal Developer Platform + +###### Auto generated by spf13/cobra on 29-Apr-2024 diff --git a/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-generate.md b/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-generate.md new file mode 100644 index 00000000..d7a3b54f --- /dev/null +++ b/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-generate.md @@ -0,0 +1,50 @@ +# kusion generate + +Generate and print the resulting Spec resources of target Stack + +### Synopsis + + + This command generates Spec resources with given values, then write the resulting Spec resources to specific output file or stdout. + + The nearest parent folder containing a stack.yaml file is loaded from the project in the current directory. + +``` +kusion generate [flags] +``` + +### Examples + +``` + + # Generate and write Spec resources to specific output file + kusion generate -o /tmp/spec.yaml + + # Generate spec with custom workspace + kusion generate -o /tmp/spec.yaml --workspace dev +``` + +### Options + +``` + --backend string The backend to use, supports 'local', 'oss' and 's3'. + -h, --help help for generate + --no-style no-style sets to RawOutput mode and disables all of styling + -o, --output string File to write generated Spec resources to + --set stringArray Set values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2) + -w, --workdir string The work directory to run Kusion CLI. + --workspace string The name of target workspace to operate in. +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion](index.md) - Kusion is the Platform Orchestrator of Internal Developer Platform + +###### Auto generated by spf13/cobra on 29-Apr-2024 diff --git a/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-init.md b/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-init.md new file mode 100644 index 00000000..7331dcf6 --- /dev/null +++ b/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-init.md @@ -0,0 +1,44 @@ +# kusion init + +Initialize the scaffolding for a demo project + +### Synopsis + +This command initializes the scaffolding for a demo project with the name of the current directory to help users quickly get started. + + Note that target directory needs to be an empty directory. + +``` +kusion init [flags] +``` + +### Examples + +``` + # Initialize a demo project with the name of the current directory + mkdir quickstart && cd quickstart + kusion init + + # Initialize the demo project in a different target directory + kusion init --target projects/my-demo-project +``` + +### Options + +``` + -h, --help help for init + -t, --target string specify the target directory +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion](index.md) - Kusion is the Platform Orchestrator of Internal Developer Platform + +###### Auto generated by spf13/cobra on 29-Apr-2024 diff --git a/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-mod-init.md b/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-mod-init.md new file mode 100644 index 00000000..97179c8f --- /dev/null +++ b/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-mod-init.md @@ -0,0 +1,40 @@ +# kusion mod init + +Create a kusion module along with common files and directories in the current directory + +``` +kusion mod init [MODULE NAME] [PATH] [flags] +``` + +### Examples + +``` + # Create a kusion module template in the current directory + kusion mod init my-module + + # Init a kusion module at the specified Path + kusion mod init my-module ./modules + + # Init a module from a remote git template repository + kusion mod init my-module --template https://github.com// +``` + +### Options + +``` + -h, --help help for init + --template string Initialize with specified template +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion mod](kusion-mod.md) - Manage Kusion modules + +###### Auto generated by spf13/cobra on 29-Apr-2024 diff --git a/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-mod-push.md b/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-mod-push.md new file mode 100644 index 00000000..0bbd4292 --- /dev/null +++ b/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-mod-push.md @@ -0,0 +1,65 @@ +# kusion mod push + +Push a module to OCI registry + +### Synopsis + + + The push command packages the module as an OCI artifact and pushes it to the + OCI registry using the version as the image tag. + +``` +kusion mod push [MODULE PATH] [OCI REPOSITORY URL] +``` + +### Examples + +``` + + # Push a module of current OS arch to an OCI Registry using a token + kusion mod push /path/to/my-module oci://ghcr.io/org --creds + + # Push a module of specific OS arch to an OCI Registry using a token + kusion mod push /path/to/my-module oci://ghcr.io/org --os-arch==darwin/arm64 --creds + + # Push a module to an OCI Registry using a credentials in : format. + kusion mod push /path/to/my-module oci://ghcr.io/org --creds : + + # Push a release candidate without marking it as the latest stable + kusion mod push /path/to/my-module oci://ghcr.io/org --latest=false + + # Push a module with custom OCI annotations + kusion mod push /path/to/my-module oci://ghcr.io/org \ + --annotation='org.opencontainers.image.documentation=https://app.org/docs' + + # Push and sign a module with Cosign (the cosign binary must be present in PATH) + export COSIGN_PASSWORD=password + kusion mod push /path/to/my-module oci://ghcr.io/org \ + --sign=cosign --cosign-key=/path/to/cosign.key +``` + +### Options + +``` + -a, --annotations strings Set custom OCI annotations in '=' format. + --cosign-key string The Cosign private key for signing the module. + --creds string The credentials token for the OCI registry in or : format. + -h, --help help for push + --insecure-registry If true, allows connecting to a OCI registry without TLS or with self-signed certificates. + --latest Tags the current version as the latest stable module version. (default true) + --os-arch string The os arch of the module e.g. 'darwin/arm64', 'linux/amd64'. + --sign string Signs the module with the specified provider. +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion mod](kusion-mod.md) - Manage Kusion modules + +###### Auto generated by spf13/cobra on 29-Apr-2024 diff --git a/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-mod.md b/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-mod.md new file mode 100644 index 00000000..7bc944c8 --- /dev/null +++ b/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-mod.md @@ -0,0 +1,35 @@ +# kusion mod + +Manage Kusion modules + +### Synopsis + + + Commands for managing Kusion modules. + + These commands help you manage the lifecycle of Kusion modules. + +``` +kusion mod +``` + +### Options + +``` + -h, --help help for mod +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion](index.md) - Kusion is the Platform Orchestrator of Internal Developer Platform +* [kusion mod init](kusion-mod-init.md) - Create a kusion module along with common files and directories in the current directory +* [kusion mod push](kusion-mod-push.md) - Push a module to OCI registry + +###### Auto generated by spf13/cobra on 29-Apr-2024 diff --git a/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-options.md b/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-options.md new file mode 100644 index 00000000..b2eb8cb5 --- /dev/null +++ b/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-options.md @@ -0,0 +1,37 @@ +# kusion options + +Print the list of flags inherited by all commands + +### Synopsis + +Print the list of flags inherited by all commands + +``` +kusion options [flags] +``` + +### Examples + +``` + # Print flags inherited by all commands + kubectl options +``` + +### Options + +``` + -h, --help help for options +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion](index.md) - Kusion is the Platform Orchestrator of Internal Developer Platform + +###### Auto generated by spf13/cobra on 29-Apr-2024 diff --git a/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-preview.md b/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-preview.md new file mode 100644 index 00000000..2aa7b269 --- /dev/null +++ b/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-preview.md @@ -0,0 +1,60 @@ +# kusion preview + +Preview a series of resource changes within the stack + +### Synopsis + +Preview a series of resource changes within the stack. + + Create, update or delete resources according to the intent described in the stack. By default, Kusion will generate an execution preview and present it for your approval before taking any action. + +``` +kusion preview [flags] +``` + +### Examples + +``` + # Preview with specified work directory + kusion preview -w /path/to/workdir + + # Preview with specified arguments + kusion preview -D name=test -D age=18 + + # Preview with ignored fields + kusion preview --ignore-fields="metadata.generation,metadata.managedFields + + # Preview with json format result + kusion preview -o json + + # Preview without output style and color + kusion preview --no-style=true +``` + +### Options + +``` + -a, --all --detail Automatically show all preview details, combined use with flag --detail + --backend string The backend to use, supports 'local', 'oss' and 's3'. + -d, --detail Automatically show preview details with interactive options (default true) + -h, --help help for preview + --ignore-fields strings Ignore differences of target fields + --no-style no-style sets to RawOutput mode and disables all of styling + --operator string Specify the operator + -o, --output string Specify the output format + -w, --workdir string The work directory to run Kusion CLI. + --workspace string The name of target workspace to operate in. +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion](index.md) - Kusion is the Platform Orchestrator of Internal Developer Platform + +###### Auto generated by spf13/cobra on 29-Apr-2024 diff --git a/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-project-create.md b/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-project-create.md new file mode 100644 index 00000000..aa9c8674 --- /dev/null +++ b/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-project-create.md @@ -0,0 +1,44 @@ +# kusion project create + +Create a new project + +### Synopsis + +This command creates a new project.yaml file under the target directory which by default is the current working directory. + + Note that the target directory needs to be an empty directory. + +``` +kusion project create +``` + +### Examples + +``` + # Create a new project with the name of the current working directory + mkdir my-project && cd my-project + kusion project create + + # Create a new project in a specified target directory + kusion project create --target /dir/to/projects/my-project +``` + +### Options + +``` + -h, --help help for create + -t, --target string specify the target directory +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion project](kusion-project.md) - Project is a folder that contains a project.yaml file and is linked to a Git repository + +###### Auto generated by spf13/cobra on 29-Apr-2024 diff --git a/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-project.md b/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-project.md new file mode 100644 index 00000000..9c392998 --- /dev/null +++ b/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-project.md @@ -0,0 +1,33 @@ +# kusion project + +Project is a folder that contains a project.yaml file and is linked to a Git repository + +### Synopsis + +Project in Kusion is defined as any folder that contains a project.yaml file and is linked to a Git repository. + + Project organizes logical configurations for internal components to orchestrate the application and assembles them to suit different roles, such as developers and platform engineers. + +``` +kusion project [flags] +``` + +### Options + +``` + -h, --help help for project +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion](index.md) - Kusion is the Platform Orchestrator of Internal Developer Platform +* [kusion project create](kusion-project-create.md) - Create a new project + +###### Auto generated by spf13/cobra on 29-Apr-2024 diff --git a/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-stack-create.md b/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-stack-create.md new file mode 100644 index 00000000..2ad22c2b --- /dev/null +++ b/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-stack-create.md @@ -0,0 +1,49 @@ +# kusion stack create + +Create a new stack + +### Synopsis + +This command creates a new stack under the target directory which by default is the current working directory. + + The stack folder to be created contains 'stack.yaml', 'kcl.mod' and 'main.k' with the specified values. + + Note that the target directory needs to be a valid project directory with project.yaml file + +``` +kusion stack create +``` + +### Examples + +``` + # Create a new stack at current project directory + kusion stack create dev + + # Create a new stack in a specified target project directory + kusion stack create dev --target /dir/to/projects/my-project + + # Create a new stack copied from the referenced stack under the target project directory + kusion stack create prod --copy-from dev +``` + +### Options + +``` + --copy-from string specify the referenced stack path to copy from + -h, --help help for create + -t, --target string specify the target project directory +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion stack](kusion-stack.md) - Stack is a folder that contains a stack.yaml file within the corresponding project directory + +###### Auto generated by spf13/cobra on 29-Apr-2024 diff --git a/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-stack.md b/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-stack.md new file mode 100644 index 00000000..181e3be6 --- /dev/null +++ b/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-stack.md @@ -0,0 +1,33 @@ +# kusion stack + +Stack is a folder that contains a stack.yaml file within the corresponding project directory + +### Synopsis + +Stack in Kusion is defined as any folder that contains a stack.yaml file within the corresponding project directory. + + A stack provides a mechanism to isolate multiple deployments of the same application, serving with the target workspace to which an application will be deployed. + +``` +kusion stack [flags] +``` + +### Options + +``` + -h, --help help for stack +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion](index.md) - Kusion is the Platform Orchestrator of Internal Developer Platform +* [kusion stack create](kusion-stack-create.md) - Create a new stack + +###### Auto generated by spf13/cobra on 29-Apr-2024 diff --git a/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-version.md b/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-version.md new file mode 100644 index 00000000..a204b7d7 --- /dev/null +++ b/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-version.md @@ -0,0 +1,38 @@ +# kusion version + +Print the Kusion version information for the current context + +### Synopsis + +Print the Kusion version information for the current context + +``` +kusion version [flags] +``` + +### Examples + +``` + # Print the Kusion version + kusion version +``` + +### Options + +``` + -h, --help help for version + -o, --output string Output format. Only json format is supported for now +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion](index.md) - Kusion is the Platform Orchestrator of Internal Developer Platform + +###### Auto generated by spf13/cobra on 29-Apr-2024 diff --git a/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-workspace-create.md b/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-workspace-create.md new file mode 100644 index 00000000..2be4c7e9 --- /dev/null +++ b/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-workspace-create.md @@ -0,0 +1,46 @@ +# kusion workspace create + +Create a new workspace + +### Synopsis + +This command creates a workspace with specified name and configuration file, where the file must be in the YAML format. + +``` +kusion workspace create +``` + +### Examples + +``` + # Create a workspace + kusion workspace create dev -f dev.yaml + + # Create a workspace and set as current + kusion workspace create dev -f dev.yaml --current + + # Create a workspace in a specified backend + kusion workspace create prod -f prod.yaml --backend oss-prod +``` + +### Options + +``` + --backend string the backend name + --current set the creating workspace as current + -f, --file string the path of workspace configuration file + -h, --help help for create +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion workspace](kusion-workspace.md) - Workspace is a logical concept representing a target that stacks will be deployed to + +###### Auto generated by spf13/cobra on 29-Apr-2024 diff --git a/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-workspace-delete.md b/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-workspace-delete.md new file mode 100644 index 00000000..d521a074 --- /dev/null +++ b/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-workspace-delete.md @@ -0,0 +1,44 @@ +# kusion workspace delete + +Delete a workspace + +### Synopsis + +This command deletes the current or a specified workspace. + +``` +kusion workspace delete +``` + +### Examples + +``` + # Delete the current workspace + kusion workspace delete + + # Delete a specified workspace + kusion workspace delete dev + + # Delete a specified workspace in a specified backend + kusion workspace delete prod --backend oss-prod +``` + +### Options + +``` + --backend string the backend name + -h, --help help for delete +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion workspace](kusion-workspace.md) - Workspace is a logical concept representing a target that stacks will be deployed to + +###### Auto generated by spf13/cobra on 29-Apr-2024 diff --git a/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-workspace-list.md b/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-workspace-list.md new file mode 100644 index 00000000..4e958d3e --- /dev/null +++ b/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-workspace-list.md @@ -0,0 +1,41 @@ +# kusion workspace list + +List all workspace names + +### Synopsis + +This command list the names of all workspaces. + +``` +kusion workspace list +``` + +### Examples + +``` + # List all workspace names + kusion workspace list + + # List all workspace names in a specified backend + kusion workspace list --backend oss-prod +``` + +### Options + +``` + --backend string the backend name + -h, --help help for list +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion workspace](kusion-workspace.md) - Workspace is a logical concept representing a target that stacks will be deployed to + +###### Auto generated by spf13/cobra on 29-Apr-2024 diff --git a/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-workspace-show.md b/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-workspace-show.md new file mode 100644 index 00000000..d1c9e0e8 --- /dev/null +++ b/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-workspace-show.md @@ -0,0 +1,44 @@ +# kusion workspace show + +Show a workspace configuration + +### Synopsis + +This command gets the current or a specified workspace configuration. + +``` +kusion workspace show +``` + +### Examples + +``` + # Show current workspace configuration + kusion workspace show + + # Show a specified workspace configuration + kusion workspace show dev + + # Show a specified workspace in a specified backend + kusion workspace show prod --backend oss-prod +``` + +### Options + +``` + --backend string the backend name + -h, --help help for show +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion workspace](kusion-workspace.md) - Workspace is a logical concept representing a target that stacks will be deployed to + +###### Auto generated by spf13/cobra on 29-Apr-2024 diff --git a/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-workspace-switch.md b/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-workspace-switch.md new file mode 100644 index 00000000..2a2cf389 --- /dev/null +++ b/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-workspace-switch.md @@ -0,0 +1,41 @@ +# kusion workspace switch + +Switch the current workspace + +### Synopsis + +This command switches the workspace, where the workspace must be created. + +``` +kusion workspace switch +``` + +### Examples + +``` + # Switch the current workspace + kusion workspace switch dev + + # Switch the current workspace in a specified backend + kusion workspace switch prod --backend oss-prod +``` + +### Options + +``` + --backend string the backend name + -h, --help help for switch +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion workspace](kusion-workspace.md) - Workspace is a logical concept representing a target that stacks will be deployed to + +###### Auto generated by spf13/cobra on 29-Apr-2024 diff --git a/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-workspace-update.md b/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-workspace-update.md new file mode 100644 index 00000000..7789c006 --- /dev/null +++ b/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-workspace-update.md @@ -0,0 +1,46 @@ +# kusion workspace update + +Update a workspace configuration + +### Synopsis + +This command updates a workspace configuration with specified configuration file, where the file must be in the YAML format. + +``` +kusion workspace update +``` + +### Examples + +``` + # Update the current workspace + kusion workspace update -f dev.yaml + + # Update a specified workspace and set as current + kusion workspace update dev -f dev.yaml --current + + # Update a specified workspace in a specified backend + kusion workspace update prod -f prod.yaml --backend oss-prod +``` + +### Options + +``` + --backend string the backend name + --current set the creating workspace as current + -f, --file string the path of workspace configuration file + -h, --help help for update +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion workspace](kusion-workspace.md) - Workspace is a logical concept representing a target that stacks will be deployed to + +###### Auto generated by spf13/cobra on 29-Apr-2024 diff --git a/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-workspace.md b/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-workspace.md new file mode 100644 index 00000000..a744f587 --- /dev/null +++ b/docs_versioned_docs/version-v0.11/6-reference/1-commands/kusion-workspace.md @@ -0,0 +1,38 @@ +# kusion workspace + +Workspace is a logical concept representing a target that stacks will be deployed to + +### Synopsis + +Workspace is a logical concept representing a target that stacks will be deployed to. + + Workspace is managed by platform engineers, which contains a set of configurations that application developers do not want or should not concern, and is reused by multiple stacks belonging to different projects. + +``` +kusion workspace [flags] +``` + +### Options + +``` + -h, --help help for workspace +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion](index.md) - Kusion is the Platform Orchestrator of Internal Developer Platform +* [kusion workspace create](kusion-workspace-create.md) - Create a new workspace +* [kusion workspace delete](kusion-workspace-delete.md) - Delete a workspace +* [kusion workspace list](kusion-workspace-list.md) - List all workspace names +* [kusion workspace show](kusion-workspace-show.md) - Show a workspace configuration +* [kusion workspace switch](kusion-workspace-switch.md) - Switch the current workspace +* [kusion workspace update](kusion-workspace-update.md) - Update a workspace configuration + +###### Auto generated by spf13/cobra on 29-Apr-2024 diff --git a/docs_versioned_docs/version-v0.11/6-reference/2-modules/1-developer-schemas/_category_.json b/docs_versioned_docs/version-v0.11/6-reference/2-modules/1-developer-schemas/_category_.json new file mode 100644 index 00000000..0df3bade --- /dev/null +++ b/docs_versioned_docs/version-v0.11/6-reference/2-modules/1-developer-schemas/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Developer Schemas" +} \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.11/6-reference/2-modules/1-developer-schemas/app-configuration.md b/docs_versioned_docs/version-v0.11/6-reference/2-modules/1-developer-schemas/app-configuration.md new file mode 100644 index 00000000..447e5442 --- /dev/null +++ b/docs_versioned_docs/version-v0.11/6-reference/2-modules/1-developer-schemas/app-configuration.md @@ -0,0 +1,35 @@ +# appconfiguration + +## Schema AppConfiguration + +AppConfiguration is a developer-centric definition that describes how to run an Application.
This application model builds upon a decade of experience at AntGroup running super large scale
internal developer platform, combined with best-of-breed ideas and practices from the community. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**accessories**|{str:any}|Accessories defines a collection of accessories that will be attached to the workload.|{}| +|**annotations**|{str:str}|Annotations are key/value pairs that attach arbitrary non-identifying metadata to resources.|{}| +|**labels**|{str:str}|Labels can be used to attach arbitrary metadata as key-value pairs to resources.|{}| +|**workload** `required`|[wl.Service](workload/service#schema-service) \| [wl.Job](workload/job#schema-job) |Workload defines how to run your application code. Currently supported workload profile
includes Service and Job.|N/A| + +### Examples +```python +# Instantiate an App with a long-running service and its image is "nginx:v1" + +import kam as ac +import kam.workload as wl +import kam.workload.container as c + +helloworld : ac.AppConfiguration { + workload: wl.Service { + containers: { + "nginx": c.Container { + image: "nginx:v1" + } + } + } +} +``` + + diff --git a/docs_versioned_docs/version-v0.11/6-reference/2-modules/1-developer-schemas/database/mysql.md b/docs_versioned_docs/version-v0.11/6-reference/2-modules/1-developer-schemas/database/mysql.md new file mode 100644 index 00000000..8f6135bb --- /dev/null +++ b/docs_versioned_docs/version-v0.11/6-reference/2-modules/1-developer-schemas/database/mysql.md @@ -0,0 +1,39 @@ +# mysql + +## Schema MySQL + +MySQL describes the attributes to locally deploy or create a cloud provider
managed mysql database instance for the workload. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**type** `required`|"local" | "cloud"|Type defines whether the mysql database is deployed locally or provided by
cloud vendor.|| +|**version** `required`|str|Version defines the mysql version to use.|| + +### Examples +```python +# Instantiate a local mysql database with version of 5.7. + +import mysql + +accessories: { + "mysql": mysql.MySQL { + type: "local" + version: "8.0" + } +} +``` + + +### Credentials and Connectivity + +For sensitive information such as the **host**, **username** and **password** for the database instance, Kusion will automatically inject them into the application container for users through environment variables. The relevant environment variables are listed in the table below. + +| Name | Explanation | +| ---- | ----------- | +| KUSION_DB\_HOST\_`` | Host address for accessing the database instance | +| KUSION_DB\_USERNAME\_`` | Account username for accessing the database instance | +| KUSION_DB\_PASSWORD\_`` | Account password for accessing the database instance | + +The `databaseName` can be declared in [workspace configs of mysql](../../2-workspace-configs/database/mysql.md), and Kusion will automatically concatenate the ``, ``, `` and `mysql` with `-` if not specified. When injecting the credentials into containers' environment variables, Kusion will convert the `databaseName` to uppercase, and replace `-` with `_`. diff --git a/docs_versioned_docs/version-v0.11/6-reference/2-modules/1-developer-schemas/database/postgres.md b/docs_versioned_docs/version-v0.11/6-reference/2-modules/1-developer-schemas/database/postgres.md new file mode 100644 index 00000000..ad8cbb7e --- /dev/null +++ b/docs_versioned_docs/version-v0.11/6-reference/2-modules/1-developer-schemas/database/postgres.md @@ -0,0 +1,39 @@ +# postgres + +## Schema PostgreSQL + +PostgreSQL describes the attributes to locally deploy or create a cloud provider
managed postgresql database instance for the workload. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**type** `required`|"local" | "cloud"|Type defines whether the postgresql database is deployed locally or provided by
cloud vendor.|| +|**version** `required`|str|Version defines the postgres version to use.|| + +### Examples +```python +#Instantiate a local postgresql database with image version of 14.0. + +import postgres as postgres + +accessories: { + "postgres": postgres.PostgreSQL { + type: "local" + version: "14.0" + } +} +``` + + +### Credentials and Connectivity + +For sensitive information such as the **host**, **username** and **password** for the database instance, Kusion will automatically inject them into the application container for users through environment variables. The relevant environment variables are listed in the table below. + +| Name | Explanation | +| ---- | ----------- | +| KUSION_DB\_HOST\_`` | Host address for accessing the database instance | +| KUSION_DB\_USERNAME\_`` | Account username for accessing the database instance | +| KUSION_DB\_PASSWORD\_`` | Account password for accessing the database instance | + +The `databaseName` can be declared in [workspace configs of postgres](../../2-workspace-configs/database/postgres.md), and Kusion will automatically concatenate the ``, ``, `` and `postgres` with `-` if not specified. When injecting the credentials into containers' environment variables, Kusion will convert the `databaseName` to uppercase, and replace `-` with `_`. diff --git a/docs_versioned_docs/version-v0.11/6-reference/2-modules/1-developer-schemas/internal/common.md b/docs_versioned_docs/version-v0.11/6-reference/2-modules/1-developer-schemas/internal/common.md new file mode 100644 index 00000000..8b649196 --- /dev/null +++ b/docs_versioned_docs/version-v0.11/6-reference/2-modules/1-developer-schemas/internal/common.md @@ -0,0 +1,17 @@ +# common + +## Schema WorkloadBase + +WorkloadBase defines set of attributes shared by different workload profile, e.g Service
and Job. You can inherit this Schema to reuse these common attributes. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**annotations**|{str:str}|Annotations are key/value pairs that attach arbitrary non-identifying metadata to the workload.|| +|**containers** `required`|{str:}|Containers defines the templates of containers to be ran.
More info: https://kubernetes.io/docs/concepts/containers|| +|**labels**|{str:str}|Labels are key/value pairs that are attached to the workload.|| +|**replicas**|int|Number of container replicas based on this configuration that should be ran.|| +|**secrets**|{str:[s.Secret](#schema-secret)}|Secrets can be used to store small amount of sensitive data e.g. password, token.|| + + diff --git a/docs_versioned_docs/version-v0.11/6-reference/2-modules/1-developer-schemas/internal/container/container.md b/docs_versioned_docs/version-v0.11/6-reference/2-modules/1-developer-schemas/internal/container/container.md new file mode 100644 index 00000000..ce170fc6 --- /dev/null +++ b/docs_versioned_docs/version-v0.11/6-reference/2-modules/1-developer-schemas/internal/container/container.md @@ -0,0 +1,63 @@ +# container + +## Schema Container + +Container describes how the Application's tasks are expected to be run. Depending on
the replicas parameter 1 or more containers can be created from each template. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**args**|[str]|Arguments to the entrypoint.
Args will overwrite the CMD value set in the Dockfile, otherwise the Docker
image's CMD is used if this is not provided.|| +|**command**|[str]|Entrypoint array. Not executed within a shell.
Command will overwrite the ENTRYPOINT value set in the Dockfile, otherwise the Docker
image's ENTRYPOINT is used if this is not provided.|| +|**dirs**|{str:str}|Collection of volumes mount into the container's filesystem.
The dirs parameter is a dict with the key being the folder name in the container and the value
being the referenced volume.|| +|**env**|{str:str}|List of environment variables to set in the container.
The value of the environment variable may be static text or a value from a secret.|| +|**files**|{str:[FileSpec](#filespec)}|List of files to create in the container.
The files parameter is a dict with the key being the file name in the container and the value
being the target file specification.|| +|**image** `required`|str|Image refers to the Docker image name to run for this container.
More info: https://kubernetes.io/docs/concepts/containers/images|| +|**lifecycle**|[lc.Lifecycle](lifecycle/lifecycle.md#schema-lifecycle)|Lifecycle refers to actions that the management system should take in response to container lifecycle events.|| +|**livenessProbe**|[p.Probe](probe/probe.md#schema-probe)|LivenessProbe indicates if a running process is healthy.
Container will be restarted if the probe fails.|| +|**readinessProbe**|[p.Probe](probe/probe.md#schema-probe)|ReadinessProbe indicates whether an application is available to handle requests.|| +|**resources**|{str:str}|Map of resource requirements the container should run with.
The resources parameter is a dict with the key being the resource name and the value being
the resource value.|| +|**startupProbe**|[p.Probe](probe/probe.md#schema-probe)|StartupProbe indicates that the container has started for the first time.
Container will be restarted if the probe fails.|| +|**workingDir**|str|The working directory of the running process defined in entrypoint.
Default container runtime will be used if this is not specified.|| + +### Examples +```python +import kam.workload.container as c + +web = c.Container { + image: "nginx:latest" + command: ["/bin/sh", "-c", "echo hi"] + env: { + "name": "value" + } + resources: { + "cpu": "2" + "memory": "4Gi" + } +} +``` + +## Schema FileSpec + +FileSpec defines the target file in a Container. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**content**|str|File content in plain text.|| +|**contentFrom**|str|Source for the file content, reference to a secret of configmap value.|| +|**mode** `required`|str|Mode bits used to set permissions on this file, must be an octal value
between 0000 and 0777 or a decimal value between 0 and 511|"0644"| + +### Examples +```python +import kam.workload.container as c + +tmpFile = c.FileSpec { + content: "some file contents" + mode: "0777" +} +``` + + diff --git a/docs_versioned_docs/version-v0.11/6-reference/2-modules/1-developer-schemas/internal/container/lifecycle/lifecycle.md b/docs_versioned_docs/version-v0.11/6-reference/2-modules/1-developer-schemas/internal/container/lifecycle/lifecycle.md new file mode 100644 index 00000000..91123526 --- /dev/null +++ b/docs_versioned_docs/version-v0.11/6-reference/2-modules/1-developer-schemas/internal/container/lifecycle/lifecycle.md @@ -0,0 +1,29 @@ +# lifecycle + +## Schema Lifecycle + +Lifecycle describes actions that the management system should take in response
to container lifecycle events. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**postStart**| | |The action to be taken after a container is created.
More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks|| +|**preStop**| | |The action to be taken before a container is terminated due to an API request or
management event such as liveness/startup probe failure, preemption, resource contention, etc.
More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks|| + +### Examples +```python +import kam.workload.container.probe as p +import kam.workload.container.lifecycle as lc + +lifecycleHook = lc.Lifecycle { + preStop: p.Exec { + command: ["preStop.sh"] + } + postStart: p.Http { + url: "http://localhost:80" + } +} +``` + + diff --git a/docs_versioned_docs/version-v0.11/6-reference/2-modules/1-developer-schemas/internal/container/probe/probe.md b/docs_versioned_docs/version-v0.11/6-reference/2-modules/1-developer-schemas/internal/container/probe/probe.md new file mode 100644 index 00000000..64d709cd --- /dev/null +++ b/docs_versioned_docs/version-v0.11/6-reference/2-modules/1-developer-schemas/internal/container/probe/probe.md @@ -0,0 +1,92 @@ +# probe + +## Schema Probe + +Probe describes a health check to be performed against a container to determine whether it is
alive or ready to receive traffic. There are three probe types: readiness, liveness, and startup. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**failureThreshold**|int|Minimum consecutive failures for the probe to be considered failed after having succeeded.|| +|**initialDelaySeconds**|int|The number of seconds before health checking is activated.
More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes|| +|**periodSeconds**|int|How often (in seconds) to perform the probe.|| +|**probeHandler** `required`|[Exec](#exec) | [Http](#http) | [Tcp](#tcp)|The action taken to determine the alive or health of a container|| +|**successThreshold**|int|Minimum consecutive successes for the probe to be considered successful after having failed.|| +|**terminationGracePeriod**|int|Duration in seconds before terminate gracefully upon probe failure.|| +|**timeoutSeconds**|int|The number of seconds after which the probe times out.
More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes|| + +### Examples +```python +import kam.workload.container.probe as p + +probe = p.Probe { + probeHandler: p.Http { + path: "/healthz" + } + initialDelaySeconds: 10 +} +``` + +## Schema Exec + +Exec describes a "run in container" action. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**command** `required`|[str]|The command line to execute inside the container.|| + +### Examples +```python +import kam.workload.container.probe as p + +execProbe = p.Exec { + command: ["probe.sh"] +} +``` + +## Schema Http + +Http describes an action based on HTTP Get requests. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**headers**|{str:str}|Collection of custom headers to set in the request|| +|**url** `required`|str|The full qualified url to send HTTP requests.|| + +### Examples +```python +import kam.workload.container.probe as p + +httpProbe = p.Http { + url: "http://localhost:80" + headers: { + "X-HEADER": "VALUE" + } +} +``` + +## Schema Tcp + +Tcp describes an action based on opening a socket. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**url** `required`|str|The full qualified url to open a socket.|| + +### Examples +```python +import kam.workload.container.probe as p + +tcpProbe = p.Tcp { + url: "tcp://localhost:1234" +} +``` + + diff --git a/docs_versioned_docs/version-v0.11/6-reference/2-modules/1-developer-schemas/internal/secret/secret.md b/docs_versioned_docs/version-v0.11/6-reference/2-modules/1-developer-schemas/internal/secret/secret.md new file mode 100644 index 00000000..1f13bb85 --- /dev/null +++ b/docs_versioned_docs/version-v0.11/6-reference/2-modules/1-developer-schemas/internal/secret/secret.md @@ -0,0 +1,29 @@ +# secret + +## Schema Secret + +Secrets are used to provide data that is considered sensitive like passwords, API keys,
TLS certificates, tokens or other credentials. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**data**|{str:str}|Data contains the non-binary secret data in string form.|| +|**immutable**|bool|Immutable, if set to true, ensures that data stored in the Secret cannot be updated.|| +|**params**|{str:str}|Collection of parameters used to facilitate programmatic handling of secret data.|| +|**type** `required`|"basic" | "token" | "opaque" | "certificate" | "external"|Type of secret, used to facilitate programmatic handling of secret data.|| + +### Examples +```python +import kam.workload.secret as sec + +basicAuth = sec.Secret { + type: "basic" + data: { + "username": "" + "password": "" + } +} +``` + + diff --git a/docs_versioned_docs/version-v0.11/6-reference/2-modules/1-developer-schemas/monitoring/prometheus.md b/docs_versioned_docs/version-v0.11/6-reference/2-modules/1-developer-schemas/monitoring/prometheus.md new file mode 100644 index 00000000..bf2e551e --- /dev/null +++ b/docs_versioned_docs/version-v0.11/6-reference/2-modules/1-developer-schemas/monitoring/prometheus.md @@ -0,0 +1,24 @@ +# prometheus + +## Schema Prometheus + +Prometheus can be used to define monitoring requirements + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**path**|str|The path to scrape metrics from.|"/metrics"| +|**port**|str|The port to scrape metrics from. When using Prometheus operator, this needs to be the port NAME. Otherwise, this can be a port name or a number.|container ports when scraping pod (monitorType is pod) and service port when scraping service (monitorType is service)| + +### Examples +```python +import monitoring as m + +"monitoring": m.Prometheus { + path: "/metrics" + port: "web" +} +``` + + diff --git a/docs_versioned_docs/version-v0.11/6-reference/2-modules/1-developer-schemas/network/network.md b/docs_versioned_docs/version-v0.11/6-reference/2-modules/1-developer-schemas/network/network.md new file mode 100644 index 00000000..daa33121 --- /dev/null +++ b/docs_versioned_docs/version-v0.11/6-reference/2-modules/1-developer-schemas/network/network.md @@ -0,0 +1,51 @@ +# network + +## Schema Network + +Network defines the exposed port of Service, which can be used to describe how the Service
get accessed. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**ports**|[[Port](#schema-port)]|The list of ports which the Workload should get exposed.|| + +### Examples +```python +import network as n + +"network": n.Network { + ports: [ + n.Port { + port: 80 + public: True + } + ] +} +``` + +## Schema Port + +Port defines the exposed port of Workload, which can be used to describe how the Workload get accessed. + +| name | type | description | default value | +| --- | --- | --- | --- | +|**port** `required`|int|The exposed port of the Workload.|80| +|**protocol** `required`|"TCP" | "UDP"|The protocol to access the port.|"TCP"| +|**public** `required`|bool|Public defines whether the port can be accessed through Internet.|False| +|**targetPort**|int|The backend container port. If empty, set it the same as the port.|| + +### Examples + +```python +import network as n + +port = n.Port { + port: 80 + targetPort: 8080 + protocol: "TCP" + public: True +} +``` + + diff --git a/docs_versioned_docs/version-v0.11/6-reference/2-modules/1-developer-schemas/opsrule/opsrule.md b/docs_versioned_docs/version-v0.11/6-reference/2-modules/1-developer-schemas/opsrule/opsrule.md new file mode 100644 index 00000000..26ee44b7 --- /dev/null +++ b/docs_versioned_docs/version-v0.11/6-reference/2-modules/1-developer-schemas/opsrule/opsrule.md @@ -0,0 +1,35 @@ +# opsrule + +## Schema OpsRule + +OpsRule describes operation rules for various Day-2 Operations. Once declared, these
operation rules will be checked before any Day-2 operations. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**maxUnavailable**|int | str|The maximum percentage of the total pod instances in the component that can be
simultaneously unhealthy.|"25%"| + +```python +import opsrule as o +import kam.v1.app_configuration +import kam.v1.workload as wl +import kam.v1.workload.container as c + +helloworld : ac.AppConfiguration { + workload: wl.Service { + containers: { + "nginx": c.Container { + image: "nginx:v1" + } + } + } + accessories: { + "opsrule" : o.OpsRule { + maxUnavailable: "30%" + } + } +} +``` + + diff --git a/docs_versioned_docs/version-v0.11/6-reference/2-modules/1-developer-schemas/workload/job.md b/docs_versioned_docs/version-v0.11/6-reference/2-modules/1-developer-schemas/workload/job.md new file mode 100644 index 00000000..52194488 --- /dev/null +++ b/docs_versioned_docs/version-v0.11/6-reference/2-modules/1-developer-schemas/workload/job.md @@ -0,0 +1,251 @@ +# job + +## Schemas +- [Job](#schema-job) + - [Container](#schema-container) + - [Filespec](#schema-filespec) + - [LifeCycle](#schema-lifecycle) + - [Probe](#schema-probe) + - [Exec](#schema-exec) + - [Http](#schema-http) + - [Tcp](#schema-tcp) + - [Secret](#schema-secret) + +## Schema Job + +Job is a kind of workload profile that describes how to run your application code. This
is typically used for tasks that take from a few seconds to a few days to complete. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**annotations**|{str:str}|Annotations are key/value pairs that attach arbitrary non-identifying metadata to the workload.|| +|**containers** `required`|{str:[Container](../internal/container#schema-container)}|Containers defines the templates of containers to be ran.
More info: https://kubernetes.io/docs/concepts/containers|| +|**labels**|{str:str}|Labels are key/value pairs that are attached to the workload.|| +|**replicas**|int|Number of container replicas based on this configuration that should be ran.|| +|**schedule** `required`|str|The scheduling strategy in Cron format. More info: https://en.wikipedia.org/wiki/Cron.|| +|**secrets**|{str:[Secret](../internal/secret/secret.md#schema-secret)}|Secrets can be used to store small amount of sensitive data e.g. password, token.|| + +### Examples +```python +# Instantiate a job with busybox image and runs every hour + +import kam.workload as wl +import kam.workload.container as c + +echoJob : wl.Job { + containers: { + "busybox": c.Container{ + image: "busybox:1.28" + command: ["/bin/sh", "-c", "echo hello"] + } + } + schedule: "0 * * * *" +} +``` + +### Base Schema +[WorkloadBase](../internal/common#schema-workloadbase) + +## Schema Container + +Container describes how the Application's tasks are expected to be run. Depending on
the replicas parameter 1 or more containers can be created from each template. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**args**|[str]|Arguments to the entrypoint.
Args will overwrite the CMD value set in the Dockfile, otherwise the Docker
image's CMD is used if this is not provided.|| +|**command**|[str]|Entrypoint array. Not executed within a shell.
Command will overwrite the ENTRYPOINT value set in the Dockfile, otherwise the Docker
image's ENTRYPOINT is used if this is not provided.|| +|**dirs**|{str:str}|Collection of volumes mount into the container's filesystem.
The dirs parameter is a dict with the key being the folder name in the container and the value
being the referenced volume.|| +|**env**|{str:str}|List of environment variables to set in the container.
The value of the environment variable may be static text or a value from a secret.|| +|**files**|{str:[FileSpec](#filespec)}|List of files to create in the container.
The files parameter is a dict with the key being the file name in the container and the value
being the target file specification.|| +|**image** `required`|str|Image refers to the Docker image name to run for this container.
More info: https://kubernetes.io/docs/concepts/containers/images|| +|**lifecycle**|[lc.Lifecycle](../internal/container/lifecycle/lifecycle.md#schema-lifecycle)|Lifecycle refers to actions that the management system should take in response to container lifecycle events.|| +|**livenessProbe**|[p.Probe](../internal/container/probe/probe.md#schema-probe)|LivenessProbe indicates if a running process is healthy.
Container will be restarted if the probe fails.|| +|**readinessProbe**|[p.Probe](../internal/container/probe/probe.md#schema-probe)|ReadinessProbe indicates whether an application is available to handle requests.|| +|**resources**|{str:str}|Map of resource requirements the container should run with.
The resources parameter is a dict with the key being the resource name and the value being
the resource value.|| +|**startupProbe**|[p.Probe](../internal/container/probe/probe.md#schema-probe)|StartupProbe indicates that the container has started for the first time.
Container will be restarted if the probe fails.|| +|**workingDir**|str|The working directory of the running process defined in entrypoint.
Default container runtime will be used if this is not specified.|| + +### Examples +```python +import kam.workload.container as c + +web = c.Container { + image: "nginx:latest" + command: ["/bin/sh", "-c", "echo hi"] + env: { + "name": "value" + } + resources: { + "cpu": "2" + "memory": "4Gi" + } +} +``` + +## Schema FileSpec + +FileSpec defines the target file in a Container. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**content**|str|File content in plain text.|| +|**contentFrom**|str|Source for the file content, reference to a secret of configmap value.|| +|**mode** `required`|str|Mode bits used to set permissions on this file, must be an octal value
between 0000 and 0777 or a decimal value between 0 and 511|"0644"| + +### Examples +```python +import kam.workload.container as c + +tmpFile = c.FileSpec { + content: "some file contents" + mode: "0777" +} +``` + +### Schema Lifecycle + +Lifecycle describes actions that the management system should take in response to container lifecycle events. + +#### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**postStart**| | |The action to be taken after a container is created.
More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks|| +|**preStop**| | |The action to be taken before a container is terminated due to an API request or
management event such as liveness/startup probe failure, preemption, resource contention, etc.
More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks|| +#### Examples + +``` +import kam.workload.container.probe as p +import kam.workload.container.lifecycle as lc + +lifecycleHook = lc.Lifecycle { + preStop: p.Exec { + command: ["preStop.sh"] + } + postStart: p.Http { + url: "http://localhost:80" + } +} +``` + +### Schema Exec + +Exec describes a "run in container" action. + +#### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**command** `required`|[str]|The command line to execute inside the container.|| +#### Examples + +``` +import kam.workload.container.probe as p + +execProbe = p.Exec { + command: ["probe.sh"] +} +``` + +### Schema Http + +Http describes an action based on HTTP Get requests. + +#### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**headers**|{str:str}|Collection of custom headers to set in the request|| +|**url** `required`|str|The full qualified url to send HTTP requests.|| +#### Examples + +``` +import kam.workload.container.probe as p + +httpProbe = p.Http { + url: "http://localhost:80" + headers: { + "X-HEADER": "VALUE" + } +} +``` + +### Schema Probe + +Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic. There are three probe types: readiness, liveness, and startup. + +#### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**failureThreshold**|int|Minimum consecutive failures for the probe to be considered failed after having succeeded.|| +|**initialDelaySeconds**|int|The number of seconds before health checking is activated.
More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes|| +|**periodSeconds**|int|How often (in seconds) to perform the probe.|| +|**probeHandler** `required`|[Exec](#exec) | [Http](#http) | [Tcp](#tcp)|The action taken to determine the alive or health of a container|| +|**successThreshold**|int|Minimum consecutive successes for the probe to be considered successful after having failed.|| +|**terminationGracePeriod**|int|Duration in seconds before terminate gracefully upon probe failure.|| +|**timeoutSeconds**|int|The number of seconds after which the probe times out.
More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes|| +#### Examples + +``` +import kam.workload.container.probe as p + +probe = p.Probe { + probeHandler: p.Http { + path: "/healthz" + } + initialDelaySeconds: 10 +} +``` + +### Schema Tcp + +Tcp describes an action based on opening a socket. + +#### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**url** `required`|str|The full qualified url to open a socket.|| +#### Examples + +``` +import kam.workload.container.probe as p + +tcpProbe = p.Tcp { + url: "tcp://localhost:1234" +} +``` + +## Schema Secret + +Secret can be used to store sensitive data. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**data**|{str:str}|Data contains the non-binary secret data in string form.|| +|**immutable**|bool|Immutable, if set to true, ensures that data stored in the Secret cannot be updated.|| +|**params**|{str:str}|Collection of parameters used to facilitate programmatic handling of secret data.|| +|**type** `required`|"basic" | "token" | "opaque" | "certificate" | "external"|Type of secret, used to facilitate programmatic handling of secret data.|| + +### Examples +```python +import kam.workload.secret as sec + +basicAuth = sec.Secret { + type: "basic" + data: { + "username": "" + "password": "" + } +} +``` + + diff --git a/docs_versioned_docs/version-v0.11/6-reference/2-modules/1-developer-schemas/workload/service.md b/docs_versioned_docs/version-v0.11/6-reference/2-modules/1-developer-schemas/workload/service.md new file mode 100644 index 00000000..bb0a8eb4 --- /dev/null +++ b/docs_versioned_docs/version-v0.11/6-reference/2-modules/1-developer-schemas/workload/service.md @@ -0,0 +1,248 @@ +# service + +## Schemas +- [Service](#schema-service) + - [Container](#schema-container) + - [Filespec](#schema-filespec) + - [LifeCycle](#schema-lifecycle) + - [Probe](#schema-probe) + - [Exec](#schema-exec) + - [Http](#schema-http) + - [Tcp](#schema-tcp) + - [Secret](#schema-secret) + +## Schema Service + +Service is a kind of workload profile that describes how to run your application code. This
is typically used for long-running web applications that should "never" go down, and handle
short-lived latency-sensitive web requests, or events. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**annotations**|{str:str}|Annotations are key/value pairs that attach arbitrary non-identifying metadata to the workload.|| +|**containers** `required`|{str:}|Containers defines the templates of containers to be ran.
More info: https://kubernetes.io/docs/concepts/containers|| +|**labels**|{str:str}|Labels are key/value pairs that are attached to the workload.|| +|**replicas**|int|Number of container replicas based on this configuration that should be ran.|| +|**secrets**|{str:[Secret](../internal/secret/secret.md#schema-secret)}|Secrets can be used to store small amount of sensitive data e.g. password, token.|| + +### Examples +```python +# Instantiate a long-running service and its image is "nginx:v1" + +import kam.workload as wl +import kam.workload.container as c + +nginxSvc : wl.Service { + containers: { + "nginx": c.Container { + image: "nginx:v1" + } + } +} +``` + +### Base Schema +[WorkloadBase](../internal/common#schema-workloadbase) + +## Schema Container + +Container describes how the Application's tasks are expected to be run. Depending on
the replicas parameter 1 or more containers can be created from each template. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**args**|[str]|Arguments to the entrypoint.
Args will overwrite the CMD value set in the Dockfile, otherwise the Docker
image's CMD is used if this is not provided.|| +|**command**|[str]|Entrypoint array. Not executed within a shell.
Command will overwrite the ENTRYPOINT value set in the Dockfile, otherwise the Docker
image's ENTRYPOINT is used if this is not provided.|| +|**dirs**|{str:str}|Collection of volumes mount into the container's filesystem.
The dirs parameter is a dict with the key being the folder name in the container and the value
being the referenced volume.|| +|**env**|{str:str}|List of environment variables to set in the container.
The value of the environment variable may be static text or a value from a secret.|| +|**files**|{str:[FileSpec](#filespec)}|List of files to create in the container.
The files parameter is a dict with the key being the file name in the container and the value
being the target file specification.|| +|**image** `required`|str|Image refers to the Docker image name to run for this container.
More info: https://kubernetes.io/docs/concepts/containers/images|| +|**lifecycle**|[lc.Lifecycle](../internal/container/lifecycle/lifecycle.md#schema-lifecycle)|Lifecycle refers to actions that the management system should take in response to container lifecycle events.|| +|**livenessProbe**|[p.Probe](../internal/container/probe/probe.md#schema-probe)|LivenessProbe indicates if a running process is healthy.
Container will be restarted if the probe fails.|| +|**readinessProbe**|[p.Probe](../internal/container/probe/probe.md#schema-probe)|ReadinessProbe indicates whether an application is available to handle requests.|| +|**resources**|{str:str}|Map of resource requirements the container should run with.
The resources parameter is a dict with the key being the resource name and the value being
the resource value.|| +|**startupProbe**|[p.Probe](../internal/container/probe/probe.md#schema-probe)|StartupProbe indicates that the container has started for the first time.
Container will be restarted if the probe fails.|| +|**workingDir**|str|The working directory of the running process defined in entrypoint.
Default container runtime will be used if this is not specified.|| + +### Examples +```python +import kam.workload.container as c + +web = c.Container { + image: "nginx:latest" + command: ["/bin/sh", "-c", "echo hi"] + env: { + "name": "value" + } + resources: { + "cpu": "2" + "memory": "4Gi" + } +} +``` + +## Schema FileSpec + +FileSpec defines the target file in a Container. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**content**|str|File content in plain text.|| +|**contentFrom**|str|Source for the file content, reference to a secret of configmap value.|| +|**mode** `required`|str|Mode bits used to set permissions on this file, must be an octal value
between 0000 and 0777 or a decimal value between 0 and 511|"0644"| + +### Examples +```python +import kam.workload.container as c + +tmpFile = c.FileSpec { + content: "some file contents" + mode: "0777" +} +``` + +### Schema Lifecycle + +Lifecycle describes actions that the management system should take in response to container lifecycle events. + +#### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**postStart**| | |The action to be taken after a container is created.
More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks|| +|**preStop**| | |The action to be taken before a container is terminated due to an API request or
management event such as liveness/startup probe failure, preemption, resource contention, etc.
More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks|| +#### Examples + +``` +import kam.workload.container.probe as p +import kam.workload.container.lifecycle as lc + +lifecycleHook = lc.Lifecycle { + preStop: p.Exec { + command: ["preStop.sh"] + } + postStart: p.Http { + url: "http://localhost:80" + } +} +``` + +### Schema Exec + +Exec describes a "run in container" action. + +#### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**command** `required`|[str]|The command line to execute inside the container.|| +#### Examples + +``` +import kam.workload.container.probe as p + +execProbe = p.Exec { + command: ["probe.sh"] +} +``` + +### Schema Http + +Http describes an action based on HTTP Get requests. + +#### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**headers**|{str:str}|Collection of custom headers to set in the request|| +|**url** `required`|str|The full qualified url to send HTTP requests.|| +#### Examples + +``` +import kam.workload.container.probe as p + +httpProbe = p.Http { + url: "http://localhost:80" + headers: { + "X-HEADER": "VALUE" + } +} +``` + +### Schema Probe + +Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic. There are three probe types: readiness, liveness, and startup. + +#### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**failureThreshold**|int|Minimum consecutive failures for the probe to be considered failed after having succeeded.|| +|**initialDelaySeconds**|int|The number of seconds before health checking is activated.
More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes|| +|**periodSeconds**|int|How often (in seconds) to perform the probe.|| +|**probeHandler** `required`|[Exec](#exec) | [Http](#http) | [Tcp](#tcp)|The action taken to determine the alive or health of a container|| +|**successThreshold**|int|Minimum consecutive successes for the probe to be considered successful after having failed.|| +|**terminationGracePeriod**|int|Duration in seconds before terminate gracefully upon probe failure.|| +|**timeoutSeconds**|int|The number of seconds after which the probe times out.
More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes|| +#### Examples + +``` +import kam.workload.container.probe as p + +probe = p.Probe { + probeHandler: p.Http { + path: "/healthz" + } + initialDelaySeconds: 10 +} +``` + +### Schema Tcp + +Tcp describes an action based on opening a socket. + +#### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**url** `required`|str|The full qualified url to open a socket.|| +#### Examples + +``` +import kam.workload.container.probe as p + +tcpProbe = p.Tcp { + url: "tcp://localhost:1234" +} +``` + +## Schema Secret + +Secret can be used to store sensitive data. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**data**|{str:str}|Data contains the non-binary secret data in string form.|| +|**immutable**|bool|Immutable, if set to true, ensures that data stored in the Secret cannot be updated.|| +|**params**|{str:str}|Collection of parameters used to facilitate programmatic handling of secret data.|| +|**type** `required`|"basic" | "token" | "opaque" | "certificate" | "external"|Type of secret, used to facilitate programmatic handling of secret data.|| + +### Examples +```python +import kam.workload.secret as sec + +basicAuth = sec.Secret { + type: "basic" + data: { + "username": "" + "password": "" + } +} +``` + + diff --git a/docs_versioned_docs/version-v0.11/6-reference/2-modules/2-workspace-configs/_category_.json b/docs_versioned_docs/version-v0.11/6-reference/2-modules/2-workspace-configs/_category_.json new file mode 100644 index 00000000..81444988 --- /dev/null +++ b/docs_versioned_docs/version-v0.11/6-reference/2-modules/2-workspace-configs/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Workspace Configs" +} \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.11/6-reference/2-modules/2-workspace-configs/database/mysql.md b/docs_versioned_docs/version-v0.11/6-reference/2-modules/2-workspace-configs/database/mysql.md new file mode 100644 index 00000000..85f36f27 --- /dev/null +++ b/docs_versioned_docs/version-v0.11/6-reference/2-modules/2-workspace-configs/database/mysql.md @@ -0,0 +1,46 @@ +# mysql + +## Module MySQL + +MySQL describes the attributes to locally deploy or create a cloud provider managed mysql database instance for the workload. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**cloud**
Cloud specifies the type of the cloud vendor. |"aws" \| "alicloud"|Undefined|**required**| +|**username**
Username specifies the operation account for the mysql database. |str|"root"|optional| +|**category**
Category specifies the edition of the mysql instance provided by the cloud vendor. |str|"Basic"|optional| +|**securityIPs**
SecurityIPs specifies the list of IP addresses allowed to access the mysql instance provided by the cloud vendor. |[str]|["0.0.0.0/0"]|optional| +|**privateRouting**
PrivateRouting specifies whether the host address of the cloud mysql instance for the workload to connect with is via public network or private network of the cloud vendor. |bool|true|optional| +|**size**
Size specifies the allocated storage size of the mysql instance. |int|10|optional| +|**subnetID**
SubnetID specifies the virtual subnet ID associated with the VPC that the cloud mysql instance will be created in. |str|Undefined|optional| +|**databaseName**
databaseName specifies the database name. |str|Undefined|optional| + +### Examples + +```yaml +# MySQL workspace configs for AWS RDS +modules: + kusionstack@mysql@0.1.0: + default: + cloud: aws + size: 20 + instanceType: db.t3.micro + privateRouting: false + databaseName: "my-mysql" +``` + +```yaml +# MySQL workspace configs for Alicloud RDS +modules: + kusionstack@mysql@0.1.0: + default: + cloud: alicloud + size: 20 + instanceType: mysql.n2.serverless.1c + category: serverless_basic + privateRouting: false + subnetID: [your-subnet-id] + databaseName: "my-mysql" +``` \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.11/6-reference/2-modules/2-workspace-configs/database/postgres.md b/docs_versioned_docs/version-v0.11/6-reference/2-modules/2-workspace-configs/database/postgres.md new file mode 100644 index 00000000..3d0ea4bc --- /dev/null +++ b/docs_versioned_docs/version-v0.11/6-reference/2-modules/2-workspace-configs/database/postgres.md @@ -0,0 +1,49 @@ +# postgres + +## Module PostgreSQL + +PostgreSQL describes the attributes to locally deploy or create a cloud provider managed postgres database instance for the workload. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**cloud**
Cloud specifies the type of the cloud vendor. |"aws" \| "alicloud"|Undefined|**required**| +|**username**
Username specifies the operation account for the postgres database. |str|"root"|optional| +|**category**
Category specifies the edition of the postgres instance provided by the cloud vendor. |str|"Basic"|optional| +|**securityIPs**
SecurityIPs specifies the list of IP addresses allowed to access the postgres instance provided by the cloud vendor. |[str]|["0.0.0.0/0"]|optional| +|**privateRouting**
PrivateRouting specifies whether the host address of the cloud postgres instance for the workload to connect with is via public network or private network of the cloud vendor. |bool|true|optional| +|**size**
Size specifies the allocated storage size of the postgres instance. |int|10|optional| +|**subnetID**
SubnetID specifies the virtual subnet ID associated with the VPC that the cloud postgres instance will be created in. |str|Undefined|optional| +|**databaseName**
databaseName specifies the database name. |str|Undefined|optional| + +### Examples + +```yaml +# PostgreSQL workspace configs for AWS RDS +modules: + kusionstack@postgres@0.1.0: + default: + cloud: aws + size: 20 + instanceType: db.t3.micro + securityIPs: + - 0.0.0.0/0 + databaseName: "my-postgres" +``` + +```yaml +# PostgreSQL workspace configs for Alicloud RDS +modules: + kusionstack@postgres@0.1.0: + default: + cloud: alicloud + size: 20 + instanceType: pg.n2.serverless.1c + category: serverless_basic + privateRouting: false + subnetID: [your-subnet-id] + securityIPs: + - 0.0.0.0/0 + databaseName: "my-postgres" +``` \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.11/6-reference/2-modules/2-workspace-configs/monitoring/prometheus.md b/docs_versioned_docs/version-v0.11/6-reference/2-modules/2-workspace-configs/monitoring/prometheus.md new file mode 100644 index 00000000..f55b9c3f --- /dev/null +++ b/docs_versioned_docs/version-v0.11/6-reference/2-modules/2-workspace-configs/monitoring/prometheus.md @@ -0,0 +1,40 @@ +# monitoring + +`monitoring` can be used to define workspace-level monitoring configurations. + +## Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**operatorMode**
Whether the Prometheus instance installed in the cluster runs as a Kubernetes operator or not. This determines the different kinds of resources Kusion manages.|true \| false|false|optional| +|**monitorType**
The kind of monitor to create. It only applies when operatorMode is set to True.|"Service" \| "Pod"|"Service"|optional| +|**interval**
The time interval which Prometheus scrapes metrics data. Only applicable when operator mode is set to true.
When operator mode is set to false, the scraping interval can only be set in the scraping job configuration, which kusion does not have permission to manage directly.|str|30s|optional| +|**timeout**
The timeout when Prometheus scrapes metrics data. Only applicable when operator mode is set to true.
When operator mode is set to false, the scraping timeout can only be set in the scraping job configuration, which kusion does not have permission to manage directly.|str|15s|optional| +|**scheme**
The scheme to scrape metrics from. Possible values are http and https.|"http" \| "https"|http|optional| + +### Examples +```yaml +modules: + kusionstack/monitoring@0.1.0: + default: + operatorMode: True + monitorType: Pod + scheme: http + interval: 30s + timeout: 15s + low_frequency: + operatorMode: False + interval: 2m + timeout: 1m + projectSelector: + - foo + - bar + high_frequency: + monitorType: Service + interval: 10s + timeout: 5s + projectSelector: + - helloworld + - wordpress + - prometheus-sample-app +``` \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.11/6-reference/2-modules/2-workspace-configs/networking/network.md b/docs_versioned_docs/version-v0.11/6-reference/2-modules/2-workspace-configs/networking/network.md new file mode 100644 index 00000000..027289ba --- /dev/null +++ b/docs_versioned_docs/version-v0.11/6-reference/2-modules/2-workspace-configs/networking/network.md @@ -0,0 +1,24 @@ +# network + +`network` can be used to define workspace-level networking configurations. + +## Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**type**
The specific cloud vendor that provides load balancer.| "alicloud" \| "aws"|Undefined|**required**| +| **labels**
The attached labels of the port.|{str:str}|Undefined|optional| +| **annotations**
The attached annotations of the port.|{str:str}|Undefined|optional| + +### Examples + +```yaml +modules: + kusionstack/network@0.1.0: + default: + type: alicloud + labels: + kusionstack.io/control: "true" + annotations: + service.beta.kubernetes.io/alibaba-cloud-loadbalancer-spec: slb.s1.small +``` \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.11/6-reference/2-modules/2-workspace-configs/opsrule/opsrule.md b/docs_versioned_docs/version-v0.11/6-reference/2-modules/2-workspace-configs/opsrule/opsrule.md new file mode 100644 index 00000000..d5b98aa9 --- /dev/null +++ b/docs_versioned_docs/version-v0.11/6-reference/2-modules/2-workspace-configs/opsrule/opsrule.md @@ -0,0 +1,19 @@ +# opsrule + +`opsrule` can be used to define workspace-level operational rule configurations. + +## Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**maxUnavailable**
The maximum percentage of the total pod instances in the component that can be
simultaneously unhealthy.|int \| str|Undefined|optional| + + +### Examples + +```yaml +modules: + kusionstack/opsrule@0.1.0: + default: + maxUnavailable: "40%" +``` \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.11/6-reference/2-modules/2-workspace-configs/workload/job.md b/docs_versioned_docs/version-v0.11/6-reference/2-modules/2-workspace-configs/workload/job.md new file mode 100644 index 00000000..67a56155 --- /dev/null +++ b/docs_versioned_docs/version-v0.11/6-reference/2-modules/2-workspace-configs/workload/job.md @@ -0,0 +1,23 @@ +# job + +`job` can be used to define workspace-level job configuration. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +| **replicas**
Number of container replicas based on this configuration that should be ran. |int|2| optional | +| **labels**
Labels are key/value pairs that are attached to the workload. |{str: str}|Undefined| optional | +| **annotations**
Annotations are key/value pairs that attach arbitrary non-identifying metadata to the workload. |{str: str}|Undefined| optional | + +### Examples +```yaml +modules: + job: + default: + replicas: 3 + labels: + label-key: label-value + annotations: + annotation-key: annotation-value +``` \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.11/6-reference/2-modules/2-workspace-configs/workload/service.md b/docs_versioned_docs/version-v0.11/6-reference/2-modules/2-workspace-configs/workload/service.md new file mode 100644 index 00000000..fbefe8b8 --- /dev/null +++ b/docs_versioned_docs/version-v0.11/6-reference/2-modules/2-workspace-configs/workload/service.md @@ -0,0 +1,25 @@ +# service + +`service` can be used to define workspace-level service configuration. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +| **replicas**
Number of container replicas based on this configuration that should be ran. |int|2| optional | +| **labels**
Labels are key/value pairs that are attached to the workload. |{str: str}|Undefined| optional | +| **annotations**
Annotations are key/value pairs that attach arbitrary non-identifying metadata to the workload. |{str: str}|Undefined| optional | +| **type**
Type represents the type of workload used by this Service. Currently, it supports several
types, including Deployment and CollaSet. |"Deployment" \| "CollaSet"| Deployment |**required**| + +### Examples +```yaml +modules: + service: + default: + replicas: 3 + labels: + label-key: label-value + annotations: + annotation-key: annotation-value + type: CollaSet +``` \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.11/6-reference/2-modules/3-naming-conventions.md b/docs_versioned_docs/version-v0.11/6-reference/2-modules/3-naming-conventions.md new file mode 100644 index 00000000..ab7f668c --- /dev/null +++ b/docs_versioned_docs/version-v0.11/6-reference/2-modules/3-naming-conventions.md @@ -0,0 +1,34 @@ +--- +id: naming-conventions +sidebar_label: Resource Naming Conventions +--- + +# Resource Naming Conventions + +Kusion will automatically create Kubernetes or Terraform resources for the applications, many of which do not require users' awareness. This document will introduce the naming conventions for these related resources. + +## Kubernetes Resources + +Kusion adheres to specific rules when generating the Kubernetes resources for users' applications. The table below lists some common Kubernetes resource naming conventions. Note that `Namespace` can now be specified by users. + +| Resource | Concatenation Rule | Example ID | +| -------- | ------------------ | ---------- | +| Namespace | `` | v1:Namespace:wordpress-local-db | +| Deployment | ``-``-`` | apps/v1:Deployment:wordpress-local-db:wordpress-local-db-dev-wordpress | +| CronJob | ``-``-`` | batch/v1:CronJob:helloworld:helloworld-dev-helloworld | +| Service | ``-``-``-` or ` | v1:Service:helloworld:helloworld-dev-helloworld-public | + +## Terraform Resources + +Similarly, Kusion also adheres to specific naming conventions when generating the Terraform Resources. Some common resources are listed below. + +| Resource | Concatenation Rule | Example ID | +| -------- | ------------------ | ---------- | +| random_password | ``-`` | hashicorp:random:random_password:wordpress-db-mysql | +| aws_security_group | ``-`` | hashicorp:aws:aws_security_group:wordpress-db-mysql | +| aws_db_instance | `` | hashicorp:aws:aws_db_instance:wordpress-db | +| alicloud_db_instance | `` | aliyun:alicloud:alicloud_db_instance:wordpress-db | +| alicloud_db_connection | `` | aliyun:alicloud:alicloud_db_connection:wordpress | +| alicloud_rds_account | `` | aliyun:alicloud:alicloud_rds_account:wordpress | + +The `` is composed of two parts, one of which is the `key` of database declared in `AppConfiguration` and the other is the `suffix` declared in `workspace` configuration. Kusion will concatenate the database key and suffix, convert them to uppercase, and replace `-` with `_`. And the `` supported now includes `mysql` and `postgres`. diff --git a/docs_versioned_docs/version-v0.11/6-reference/2-modules/_category_.json b/docs_versioned_docs/version-v0.11/6-reference/2-modules/_category_.json new file mode 100644 index 00000000..4dadaa75 --- /dev/null +++ b/docs_versioned_docs/version-v0.11/6-reference/2-modules/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Kusion Modules" +} diff --git a/docs_versioned_docs/version-v0.11/6-reference/2-modules/index.md b/docs_versioned_docs/version-v0.11/6-reference/2-modules/index.md new file mode 100644 index 00000000..cb0d24a9 --- /dev/null +++ b/docs_versioned_docs/version-v0.11/6-reference/2-modules/index.md @@ -0,0 +1,45 @@ +# Kusion Modules + +KusionStack presets application configuration models described by KCL, where the model is called **Kusion Model**. The GitHub repository [KusionStack/catalog](https://github.com/KusionStack/catalog) is used to store these models, which is known as **Kusion Model Library**. + +The original intention of designing Kusion Model is to enhance the efficiency and improve the experience of YAML users. Through the unified application model defined by code, abstract and encapsulate complex configuration items, omit repetitive and derivable configurations, and supplement with necessary verification logic. Only the necessary attributes get exposed, users get an out-of-the-box, easy-to-understand configuration interface, which reduces the difficulty and improves the reliability of the configuration work. + +Kusion Model Library currently provides the Kusion Model `AppConfiguration`. The design of `AppConfiguration` is developer-centric, based on Ant Group's decades of practice in building and managing hyperscale IDP (Internal Developer Platform), and the best practice of community. `AppConfiguration` describes the full lifecycle of an application. + +A simple example of using `AppConfiguration` to describe an application is as follows: + +```bash +wordpress: ac.AppConfiguration { + workload: wl.Service { + containers: { + "wordpress": c.Container { + image: "wordpress:latest" + env: { + "WORDPRESS_DB_HOST": "secret://wordpress-db/hostAddress" + "WORDPRESS_DB_PASSWORD": "secret://wordpress-db/password" + } + resources: { + "cpu": "1" + "memory": "2Gi" + } + } + } + replicas: 2 + ports: [ + n.Port { + port: 80 + public: True + } + ] + } + + database: db.Database { + type: "alicloud" + engine: "MySQL" + version: "5.7" + size: 20 + instanceType: "mysql.n2.serverless.1c" + category: "serverless_basic" + } +} +``` \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.11/6-reference/3-roadmap.md b/docs_versioned_docs/version-v0.11/6-reference/3-roadmap.md new file mode 100644 index 00000000..7a1e4565 --- /dev/null +++ b/docs_versioned_docs/version-v0.11/6-reference/3-roadmap.md @@ -0,0 +1,15 @@ +# Roadmap + +For a finer-grained view into our roadmap and what is being worked on for a release, please refer to the [GitHub Issue Tracker](https://github.com/KusionStack/kusion/issues) + +## Resource Ecosystem +We plan to expand the range of resource types that our platform can handle. This includes not only traditional cloud IaaS resources, but also popular cloud-native products such as Prometheus, istio and Argo. By supporting a wider variety of resources, we aim to address the heterogeneous needs of modern applications and allow users to harness the full power of the cloud-native technologies. + +## App Progressive Rollout +One of the key challenges in delivering applications at scale is to balance the need for speed with the need for reliability. To help our users achieve this balance, we will introduce progressive rollout strategies, such as canary release, rolling release, and percentage release. These techniques enable users to test new features or versions on a small subset of their users or infrastructure before rolling them out to the entire system. By doing so, users can minimize the risk of downtime or errors caused by untested changes. + +## Custom Pipelines +Thie current workflow of KusionStack is `write`,`preview` and `apply`, but to handle more complex deployments we need to empower users to customize the deployment pipelines to fit their specific workflows and requirements. This includes the ability to define custom stages, add or remove steps, and integrate with third-party tools. With customizable pipelines, users can streamline their deployment process, automate repetitive tasks, and satisfy their own needs by themselves. + +## Runtime Plugin +We have already supported IaaS cloud resources and Kubernetes resources, but we need a more flexible mechanism to support a broader range of on-premise infrastructure. By supporting a diverse set of infrastructures, we can help users avoid vendor lock-in, optimize their resource usage, and scale their applications across different regions and geographies. diff --git a/docs_versioned_docs/version-v0.11/6-reference/_category_.json b/docs_versioned_docs/version-v0.11/6-reference/_category_.json new file mode 100644 index 00000000..a3b4dd92 --- /dev/null +++ b/docs_versioned_docs/version-v0.11/6-reference/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Reference" +} diff --git a/docs_versioned_docs/version-v0.11/7-faq/1-install-error.md b/docs_versioned_docs/version-v0.11/7-faq/1-install-error.md new file mode 100644 index 00000000..a0fde76a --- /dev/null +++ b/docs_versioned_docs/version-v0.11/7-faq/1-install-error.md @@ -0,0 +1,39 @@ +--- +sidebar_position: 1 +--- + +# Installation + +## 1. Could not find `libintl.dylib` + +This problem is that some tools depends on the `Gettext` library, but macOS does not have this library by default. You can try to solve it in the following ways: + +1. (Skip this step for non-macOS m1) For macOS m1 operating system, make sure you have a homebrew arm64e-version installed in /opt/homebrew, otherwise install the arm version of brew with the following command + +``` +/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" +# add to path +export PATH=/opt/homebrew/bin:$PATH +``` + +2. `brew install gettext` +3. Make sure `libintl.8.dylib` exists in `/usr/local/opt/gettext/lib` directory +4. If brew is installed in another directory, the library can be created by copying it to the corresponding directory + +## 2. macOS system SSL related errors + +Openssl dylib library not found or SSL module is not available problem + +1. (Skip this step for non-macOS m1) For macOS m1 operating system, make sure you have a homebrew arm64e-version installed in /opt/homebrew, otherwise install the arm version of brew with the following command + +``` +/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" +# add to path +export PATH=/opt/homebrew/bin:$PATH +``` + +2. Install openssl (version 1.1) via brew + +``` +brew install openssl@1.1 +``` diff --git a/docs_versioned_docs/version-v0.11/7-faq/2-kcl.md b/docs_versioned_docs/version-v0.11/7-faq/2-kcl.md new file mode 100644 index 00000000..596aa881 --- /dev/null +++ b/docs_versioned_docs/version-v0.11/7-faq/2-kcl.md @@ -0,0 +1,7 @@ +--- +sidebar_position: 2 +--- + +# KCL + +Visit the [KCL website](https://kcl-lang.io/docs/user_docs/support/faq-kcl) for more documents. \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.11/7-faq/_category_.json b/docs_versioned_docs/version-v0.11/7-faq/_category_.json new file mode 100644 index 00000000..7c4b229f --- /dev/null +++ b/docs_versioned_docs/version-v0.11/7-faq/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "FAQ" +} diff --git a/docs_versioned_docs/version-v0.12/1-what-is-kusion/1-overview.md b/docs_versioned_docs/version-v0.12/1-what-is-kusion/1-overview.md new file mode 100644 index 00000000..bbbc5fbb --- /dev/null +++ b/docs_versioned_docs/version-v0.12/1-what-is-kusion/1-overview.md @@ -0,0 +1,62 @@ +--- +id: overview +title: Overview +slug: / +--- + +# Overview + +Welcome to Kusion! This introduction section covers what Kusion is, the Kusion workflow, and how Kusion compares to other software. If you just want to dive into using Kusion, feel free to skip ahead to the [Getting Started](getting-started/install-kusion) section. + +## What is Kusion? + +Kusion is an intent-driven [Platform Orchestrator](https://internaldeveloperplatform.org/platform-orchestrators/), which sits at the core of an [Internal Developer Platform (IDP)](https://internaldeveloperplatform.org/what-is-an-internal-developer-platform/). With Kusion you can enable app-centric development, your developers only need to write a single application specification - [AppConfiguration](https://www.kusionstack.io/docs/concepts/app-configuration). [AppConfiguration](https://www.kusionstack.io/docs/concepts/app-configuration) defines the workload and all resource dependencies without needing to supply environment-specific values, Kusion ensures it provides everything needed for the application to run. + +Kusion helps app developers who are responsible for creating applications and the platform engineers responsible for maintaining the infrastructure the applications run on. These roles may overlap or align differently in your organization, but Kusion is intended to ease the workload for any practitioner responsible for either set of tasks. + +![arch](https://raw.githubusercontent.com/KusionStack/kusion/main/docs/overview.jpg) + + +## How does Kusion work? + +As a Platform Orchestrator, Kusion enables you to address challenges often associated with Day 0 and Day 1. Both platform engineers and application engineers can benefit from Kusion. + +There are two key workflows for Kusion: + +1. **Day 0 - Set up the modules and workspaces:** Platform engineers create shared modules for deploying applications and their underlying infrastructure, and workspace definitions for target landing zone. These standardized, shared modules codify the requirements from stakeholders across the organization including security, compliance, and finance. + + Kusion modules abstract the complexity of underlying infrastructure tooling, enabling app developers to deploy their applications using a self-service model. + +
+ + ![workflow](https://raw.githubusercontent.com/KusionStack/kusion/main/docs/platform_workflow.jpg) +
+ +2. **Day 1 - Set up the application:** Application developers leverage the workspaces and modules created by the platform engineers to deploy applications and their supporting infrastructure. The platform team maintains the workspaces and modules, which allows application developers to focus on building applications using a repeatable process on standardized infrastructure. + +
+ + ![workflow](https://raw.githubusercontent.com/KusionStack/kusion/main/docs/app_workflow.jpg) +
+ +## Kusion Highlights + +* **Platform as Code** + + Specify desired application intent through declarative configuration code, drive continuous deployment with any CI/CD systems or GitOps to match that intent. No ad-hoc scripts, no hard maintain custom workflows, just declarative configuration code. + +* **Dynamic Configuration Management** + + Enable platform teams to set baseline-templates, control how and where to deploy application workloads and provision accessory resources. While still enabling application developers freedom via workload-centric specification and deployment. + +* **Security & Compliance Built In** + + Enforce security and infrastructure best practices with out-of-box [base models](https://github.com/KusionStack/catalog), create security and compliance guardrails for any Kusion deploy with third-party Policy as Code tools. All accessory resource secrets are automatically injected into Workloads. + +* **Lightweight and Open Model Ecosystem** + + Pure client-side solution ensures good portability and the rich APIs make it easier to integrate and automate. Large growing model ecosystem covers all stages in application lifecycle, with extensive connections to various infrastructure capabilities. + +:::tip + +**Kusion is an early project.** The end goal of Kusion is to boost [Internal Developer Platform](https://internaldeveloperplatform.org/) revolution, and we are iterating on Kusion quickly to strive towards this goal. For any help or feedback, please contact us in [Slack](https://github.com/KusionStack/community/discussions/categories/meeting) or [issues](https://github.com/KusionStack/kusion/issues). diff --git a/docs_versioned_docs/version-v0.12/1-what-is-kusion/2-kusion-vs-x.md b/docs_versioned_docs/version-v0.12/1-what-is-kusion/2-kusion-vs-x.md new file mode 100644 index 00000000..a5ed333d --- /dev/null +++ b/docs_versioned_docs/version-v0.12/1-what-is-kusion/2-kusion-vs-x.md @@ -0,0 +1,37 @@ +--- +id: kusion-vs-x +--- + +# Kusion vs Other Software + +It can be difficult to understand how different software compare to each other. Is one a replacement for the other? Are they complementary? etc. In this section, we compare Kusion to other software. + +**vs. GitOps (ArgoCD, FluxCD, etc.)** + +According to the [open GitOps principles](https://opengitops.dev/), GitOps systems typically have its desired state expressed declaratively, continuously observe actual system state and attempt to apply the desired state. In the design of Kusion toolchain, we refer to those principles but have no intention to reinvent any GitOps systems wheel. + +Kusion adopts your GitOps process and improves it with richness of features. The declarative [AppConfiguration](../concepts/app-configuration) model can be used to express desired intent, once intent is declared [Kusion CLI](../reference/commands) takes the role to make production match intent as safely as possible. + +**vs. PaaS (Heroku, Vercel, etc.)** + +Kusion shares the same goal with traditional PaaS platforms to provide application delivery and management capabilities. The intuitive difference from the full functionality PaaS platforms is that Kusion is a client-side toolchain, not a complete PaaS platform. + +Also traditional PaaS platforms typically constrain the type of applications they can run but there is no such constrain for Kusion which means Kusion provides greater flexibility. + +Kusion allows you to have platform-like features without the constraints of a traditional PaaS. However, Kusion is not attempting to replace any PaaS platforms, instead Kusion can be used to deploy to a platform such as Heroku. + +**vs. KubeVela** + +KubeVela is a modern software delivery and management control plane which makes it easier to deploy and operate applications on top of Kubernetes. + +Although some might initially perceive an overlap between Kusion and KubeVela, they are in fact complementary and can be integrated to work together. As a lightweight, purely client-side tool, coupled with corresponding [Generator](https://github.com/KusionStack/kusion-module-framework) implementation, Kusion can render [AppConfiguration](../concepts/app-configuration) schema to generate CRD resources for KubeVela and leverage KubeVela's control plane to implement application delivery. + +**vs. Helm** + +The concept of Helm originates from the [package management](https://en.wikipedia.org/wiki/Package_manager) mechanism of the operating system. It is a package management tool based on templated YAML files and supports the execution and management of resources in the package. + +Kusion is not a package manager. Kusion naturally provides a superset of Helm capabilities with the modeled key-value pairs, so that developers can use Kusion directly as a programable alternative to avoid the pain of writing text templates. For users who have adopted Helm, the stack compilation results in Kusion can be packaged and used in Helm format. + +**vs. Kubernetes** + +Kubernetes(K8s) is a container scheduling and management runtime widely used around the world, an "operating system" core for containers, and a platform for building platforms. Above the Kubernetes API layer, Kusion aims to provide app-centric **abstraction** and unified **workspace**, better **user experience** and automation **workflow**, and helps developers build the app delivery model easily and collaboratively. diff --git a/docs_versioned_docs/version-v0.12/1-what-is-kusion/_category_.json b/docs_versioned_docs/version-v0.12/1-what-is-kusion/_category_.json new file mode 100644 index 00000000..0817eb90 --- /dev/null +++ b/docs_versioned_docs/version-v0.12/1-what-is-kusion/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "What is Kusion?" +} diff --git a/docs_versioned_docs/version-v0.12/2-getting-started/1-install-kusion.md b/docs_versioned_docs/version-v0.12/2-getting-started/1-install-kusion.md new file mode 100644 index 00000000..540881d6 --- /dev/null +++ b/docs_versioned_docs/version-v0.12/2-getting-started/1-install-kusion.md @@ -0,0 +1,144 @@ +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Install Kusion + +You can install the latest Kusion CLI on MacOS, Linux and Windows. + +## MacOs/Linux + +For the MacOs and Linux, Homebrew and sh script are supported. Choose the one you prefer from the methods below. + + + + +The recommended method for installing on MacOS and Linux is to use the brew package manager. + +**Install Kusion** + +```bash +# tap formula repository Kusionstack/tap +brew tap KusionStack/tap + +# install Kusion +brew install KusionStack/tap/kusion +``` + +**Update Kusion** + +```bash +# update formulae from remote +brew update + +# update Kusion +brew upgrade KusionStack/tap/kusion +``` + +**Uninstall Kusion** + +```bash +# uninstall Kusion +brew uninstall KusionStack/tap/kusion +``` + +```mdx-code-block + + +``` + +**Install Kusion** + +```bash +# install Kusion, default latest version +curl https://www.kusionstack.io/scripts/install.sh | sh +``` + +**Install the Specified Version of Kusion** + +You can also install the specified version of Kusion by appointing the version as shell script parameter, where the version is the [available tag](https://github.com/KusionStack/kusion/tags) trimming prefix "v", such as 0.11.0, 0.10.0, etc. In general, you don't need to specify Kusion version, just use the command above to install the latest version. + +```bash +# install Kusion of specified version 0.11.0 +curl https://www.kusionstack.io/scripts/install.sh | sh -s 0.11.0 +``` + +**Uninstall Kusion** + +```bash +# uninstall Kusion +curl https://www.kusionstack.io/scripts/uninstall.sh | sh +``` + +```mdx-code-block + + +``` + +## Windows + +For the Windows, Scoop and Powershell script are supported. Choose the one you prefer from the methods below. + + + + +The recommended method for installing on Windows is to use the scoop package manager. + +**Install Kusion** + +```bash +# add scoop bucket KusionStack +scoop bucket add KusionStack https://github.com/KusionStack/scoop-bucket.git + +# install kusion +scoop install KusionStack/kusion +``` + +**Update Kusion** + +```bash +# update manifest from remote +scoop update + +# update Kusion +scoop install KusionStack/kusion +``` + +**Uninstall Kusion** + +```bash +# uninstall Kusion +brew uninstall KusionStack/kusion +``` + +```mdx-code-block + + +``` + +**Install Kusion** + +```bash +# install Kusion, default latest version +powershell -Command "iwr -useb https://www.kusionstack.io/scripts/install.ps1 | iex" +``` + +**Install the Specified Version of Kusion** + +You can also install the specified version of Kusion by appointing the version as shell script parameter, where the version is the [available tag](https://github.com/KusionStack/kusion/tags) trimming prefix "v", such as 0.11.0, etc. In general, you don't need to specify Kusion version, just use the command above to install the latest version. + +```bash +# install Kusion of specified version 0.10.0 +powershell {"& { $(irm https://www.kusionstack.io/scripts/install.ps1) } -Version 0.11.0" | iex} +``` + +**Uninstall Kusion** + +```bash +# uninstall Kusion +powershell -Command "iwr -useb https://www.kusionstack.io/scripts/uninstall.ps1 | iex" +``` + +```mdx-code-block + + +``` diff --git a/docs_versioned_docs/version-v0.12/2-getting-started/2-deliver-quickstart.md b/docs_versioned_docs/version-v0.12/2-getting-started/2-deliver-quickstart.md new file mode 100644 index 00000000..7b89b4fa --- /dev/null +++ b/docs_versioned_docs/version-v0.12/2-getting-started/2-deliver-quickstart.md @@ -0,0 +1,221 @@ +--- +id: deliver-quickstart +--- + +# Run Your First App on Kubernetes with Kusion + +In this tutorial, we will walk through how to deploy a quickstart application on Kubernetes with Kusion. The demo application can interact with a locally deployed MySQL database, which is declared as an accessory in the config codes and will be automatically created and managed by Kusion. + +## Prerequisites + +Before we start to play with this example, we need to have the Kusion CLI installed and run an accessible Kubernetes cluster. Here are some helpful documents: + +- Install [Kusion CLI](./1-install-kusion.md). +- Run a [Kubernetes](https://kubernetes.io) cluster. Some light and convenient options for Kubernetes local deployment include [k3s](https://docs.k3s.io/quick-start), [k3d](https://k3d.io/v5.4.4/#installation), and [MiniKube](https://minikube.sigs.k8s.io/docs/tutorials/multi_node). + +## Initialize Project + +We can start by initializing this tutorial project with `kusion init` cmd. + +```shell +# Create a new directory and navigate into it. +mkdir quickstart && cd quickstart + +# Initialize the demo project with the name of the current directory. +kusion init +``` + +The created project structure looks like below: + +```shell +tree +. +├── default +│   ├── kcl.mod +│   ├── main.k +│   └── stack.yaml +└── project.yaml + +2 directories, 4 files +``` + +:::info +More details about the project and stack structure can be found in [Project](../3-concepts/1-project/1-overview.md) and [Stack](../3-concepts/2-stack/1-overview.md). +::: + +### Review Configuration Files + +Now let's have a glance at the configuration codes of `default` stack: + +```shell +cat default/main.k +``` + +```python +import kam.v1.app_configuration as ac +import service +import service.container as c +import network as n + +# main.k declares the customized configuration codes for default stack. +quickstart: ac.AppConfiguration { + workload: service.Service { + containers: { + quickstart: c.Container { + image: "kusionstack/kusion-quickstart:latest" + } + } + } + accessories: { + "network": n.Network { + ports: [ + n.Port { + port: 8080 + } + ] + } + } +} +``` + +The configuration file `main.k`, usually written by the **App Developers**, declares the customized configuration codes for `default` stack, including an `AppConfiguration` instance with the name of `quickstart`. The `quickstart` application consists of a `Workload` with the type of `service.Service`, which runs a container named `quickstart` using the image of `kusionstack/kusion-quickstart:latest`. + +Besides, it declares a **Kusion Module** with the type of `network.Network`, exposing `8080` port to be accessed for the long-running service. + +The `AppConfiguration` model can hide the major complexity of Kubernetes resources such as `Namespace`, `Deployment`, and `Service` which will be created and managed by Kusion, providing the concepts that are **application-centric** and **infrastructure-agnostic** for a more developer-friendly experience. + +:::info +More details about the `AppConfiguration` model and built-in Kusion Module can be found in [kam](https://github.com/KusionStack/kam) and [catalog](https://github.com/KusionStack/catalog). +::: + +The declaration of the dependency packages can be found in `default/kcl.mod`: + +```shell +cat default/kcl.mod +``` + +```shell +[dependencies] +kam = { git = "https://github.com/KusionStack/kam.git", tag = "0.2.0" } +service = {oci = "oci://ghcr.io/kusionstack/service", tag = "0.1.0" } +network = { oci = "oci://ghcr.io/kusionstack/network", tag = "0.2.0" } +``` + +:::info +More details about the application model and module dependency declaration can be found in [Kusion Module guide for app dev](../3-concepts/3-module/3-app-dev-guide.md). +::: + +:::tip +The specific module versions we used in the above demonstration is only applicable for Kusion CLI after **v0.12.0**. +::: + +## Application Delivery + +Use the following command to deliver the quickstart application in `default` stack on your accessible Kubernetes cluster, while watching the resource creation and automatically port-forwarding the specified port (8080) from local to the Kubernetes Service of the application. We can check the details of the resource preview results before we confirm to apply the diffs. + +```shell +cd default && kusion apply --port-forward 8080 +``` + +![](/img/docs/user_docs/getting-started/kusion_apply_quickstart_0.12.gif) + +:::info +During the first apply, the models and modules that the application depends on will be downloaded, so it may take some time (usually within one minute). You can take a break and have a cup of coffee. +::: + +:::info +Kusion by default will create the Kubernetes resources of the application in the namespace the same as the project name. If you want to customize the namespace, please refer to [Project Namespace Extension](../3-concepts/1-project/2-configuration.md#kubernetesnamespace) and [Stack Namespace Extension](../3-concepts/2-stack/2-configuration.md#kubernetesnamespace). +::: + +Now we can visit [http://localhost:8080](http://localhost:8080) in our browser and play with the demo application! + +![](/img/docs/user_docs/getting-started/quickstart_page.png) + +## Add MySQL Accessory + +As you can see, the demo application page indicates that the MySQL database is not ready yet. Hence, we will now add a MySQL database as an accessory for the workload. + +We can add the Kusion-provided built-in dependency in the `default/kcl.mod`, so that we can use the `MySQL` module in the configuration codes. + +```shell +[dependencies] +kam = { git = "https://github.com/KusionStack/kam.git", tag = "0.2.0" } +service = {oci = "oci://ghcr.io/kusionstack/service", tag = "0.1.0" } +network = { oci = "oci://ghcr.io/kusionstack/network", tag = "0.2.0" } +mysql = { oci = "oci://ghcr.io/kusionstack/mysql", tag = "0.2.0" } +``` + +We can update the `default/main.k` with the following configuration codes: + +```python +import kam.v1.app_configuration as ac +import service +import service.container as c +import network as n +import mysql + +# main.k declares the customized configuration codes for default stack. +quickstart: ac.AppConfiguration { + workload: service.Service { + containers: { + quickstart: c.Container { + image: "kusionstack/kusion-quickstart:latest" + env: { + "DB_HOST": "$(KUSION_DB_HOST_QUICKSTART_DEFAULT_QUICKSTART_MYSQL)" + "DB_USERNAME": "$(KUSION_DB_USERNAME_QUICKSTART_DEFAULT_QUICKSTART_MYSQL)" + "DB_PASSWORD": "$(KUSION_DB_PASSWORD_QUICKSTART_DEFAULT_QUICKSTART_MYSQL)" + } + } + } + } + accessories: { + "network": n.Network { + ports: [ + n.Port { + port: 8080 + } + ] + } + "mysql": mysql.MySQL { + type: "local" + version: "8.0" + } + } +} +``` + +The configuration codes above declare a local `mysql.MySQL` with the engine version of `8.0` as an accessory for the application workload. The necessary Kubernetes resources for deploying and using the local MySQL database will be generated and users can get the `host`, `username` and `password` of the database through the [MySQL Credentials And Connectivity](../6-reference/2-modules/1-developer-schemas/database/mysql.md#credentials-and-connectivity) of Kusion in application containers. + +:::info +For more information about the naming convention of Kusion built-in MySQL module, you can refer to [Module Naming Convention](../6-reference/2-modules/3-naming-conventions.md). +::: + +After that, we can re-apply the application, and we can set the `--watch=false` to skip watching the resources to be reconciled: + +```shell +kusion apply --port-forward 8080 --watch=false +``` + +![](/img/docs/user_docs/getting-started/kusion_re_apply_quickstart_0.12.gif) + +:::info +You may wait another minute to download the MySQL Module. +::: + +Let's visit [http://localhost:8080](http://localhost:8080) in our browser, and we can find that the application has successfully connected to the MySQL database. The connection information is also printed on the page. + +![](/img/docs/user_docs/getting-started/quickstart_page_with_mysql.png) + +Now please feel free to enjoy the demo application! + +![](/img/docs/user_docs/getting-started/quickstart_mysql_validation.gif) + +## Delete Application + +We can delete the quickstart demo workload and related accessory resources with the following cmd: + +```shell +kusion destroy --yes +``` + +![](/img/docs/user_docs/getting-started/kusion_destroy_quickstart.gif) diff --git a/docs_versioned_docs/version-v0.12/2-getting-started/_category_.json b/docs_versioned_docs/version-v0.12/2-getting-started/_category_.json new file mode 100644 index 00000000..41f4c00e --- /dev/null +++ b/docs_versioned_docs/version-v0.12/2-getting-started/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Getting Started" +} diff --git a/docs_versioned_docs/version-v0.12/3-concepts/0-overview.md b/docs_versioned_docs/version-v0.12/3-concepts/0-overview.md new file mode 100644 index 00000000..44aa634e --- /dev/null +++ b/docs_versioned_docs/version-v0.12/3-concepts/0-overview.md @@ -0,0 +1,21 @@ +--- +id: overview +--- + +# Overview + +In this article, we will provide an overview of the core concepts of Kusion from the perspective of the Kusion workflow. + +![kusion workflow](/img/docs/concept/kusion_workflow.png) + +The workflow of Kusion is illustrated in the diagram above, which consists of three steps. + +The first step is **Write**, where the platform engineers build the [Kusion Modules](./3-module/1-overview.md) and initialize a [Workspace](./4-workspace.md). And the application developers declare their operational intent in [AppConfiguration](./5-appconfiguration.md) under a specific [Project](./1-project/1-overview.md) and [Stack](./2-stack/1-overview.md) path. + +The second step is the **Build** process, which results in the creation of the **SSoT** (Single Source of Truth), also known as the [Spec](./6-spec.md) of the current operational task. If you need version management of the SSoT, we recommend you manage the `Spec` with a VCS (Version Control System) tool like **Git**. + +The third step is **Apply**, which makes the `Spec` effective. Kusion parses the operational intent based on the `Spec` produced in the previous step. Before applying the `Spec`, Kusion will execute the `Preview` command (you can also execute this command manually) which will use a three-way diff algorithm to preview changes and prompt users to make sure all changes meet their expectations. And the `Apply` command will then actualize the operation intent onto various infrastructure platforms, currently supporting **Kubernetes**, **Terraform**, and **On-Prem** infrastructures. A [Release](./9-release.md) file will be created in the [Storage Backend](./7-backend.md) to record an operation. The `Destroy` command will delete the resources recorded in the `Release` file of a project in a specific workspace. + +A more detailed demonstration of the Kusion engine can be seen below. + +![kusion engine](/img/docs/concept/kusion_engine.png) \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.12/3-concepts/1-project/1-overview.md b/docs_versioned_docs/version-v0.12/3-concepts/1-project/1-overview.md new file mode 100644 index 00000000..edcc84d7 --- /dev/null +++ b/docs_versioned_docs/version-v0.12/3-concepts/1-project/1-overview.md @@ -0,0 +1,12 @@ +--- +sidebar_label: Overview +id: overview +--- + +# Overview + +A project in Kusion is defined as a folder that contains a `project.yaml` file and is generally recommended to be linked to a Git repository. Typically, the mapping between a project and a repository is 1:1, however, it is possible to have multiple projects connected to a single repository — for example, in the case of a monorepo. A project consists of one or more applications. + +The purpose of the project is to bundle application configurations there are relevant. Specifically, it organizes logical configurations for internal components to orchestrate the application and assembles these configurations to suit different roles, such as developers and SREs, thereby covering the entire lifecycle of application development. + +From the perspective of the application development lifecycle, the configurations delineated by the project is decoupled from the application code. It takes an immutable image as input, allowing users to perform operations and maintain the application within an independent configuration codebase. \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.12/3-concepts/1-project/2-configuration.md b/docs_versioned_docs/version-v0.12/3-concepts/1-project/2-configuration.md new file mode 100644 index 00000000..b5823df8 --- /dev/null +++ b/docs_versioned_docs/version-v0.12/3-concepts/1-project/2-configuration.md @@ -0,0 +1,38 @@ +--- +id: configuration +sidebar_label: Project file reference +--- + +# Kusion project file reference + +Every Kusion project has a project file, `project.yaml`, which specifies metadata about your project, such as the project name and project description. The project file must begin with lowercase `project` and have an extension of either `.yaml` or `.yml`. + +## Attributes + +| Name | Required | Description | Options | +|:------------- |:--------------- |:------------- |:------------- | +| `name` | required | Name of the project containing alphanumeric characters, hyphens, underscores. | None | +| `description` | optional | A brief description of the project. | None | +| `extensions` | optional | List of extensions on the project. | [See blow](#extensions) | + +### Extensions + +Extensions allow you to customize how resources are generated or customized as part of release. + +#### kubernetesNamespace + +The Kubernetes namespace extension allows you to customize namespace within your application generate Kubernetes resources. + +| Key | Required | Description | Example | +|:------|:--------:|:-------------|:---------| +| kind | y | The kind of extension being used. Must be 'kubernetesNamespace' | `kubernetesNamespace` | +| namespace | y | The namespace where all application-scoped resources generate Kubernetes objects. | `default` | + +```yaml +# Example `project.yaml` file with customized namespace of `test`. +name: example +extensions: + - kind: kubernetesNamespace + kubernetesNamespace: + namespace: test +``` diff --git a/docs_versioned_docs/version-v0.12/3-concepts/1-project/_category_.json b/docs_versioned_docs/version-v0.12/3-concepts/1-project/_category_.json new file mode 100644 index 00000000..b62ac774 --- /dev/null +++ b/docs_versioned_docs/version-v0.12/3-concepts/1-project/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Projects" +} diff --git a/docs_versioned_docs/version-v0.12/3-concepts/2-stack/1-overview.md b/docs_versioned_docs/version-v0.12/3-concepts/2-stack/1-overview.md new file mode 100644 index 00000000..c6dcd2b5 --- /dev/null +++ b/docs_versioned_docs/version-v0.12/3-concepts/2-stack/1-overview.md @@ -0,0 +1,16 @@ +--- +sidebar_label: Overview +id: overview +--- + +# Overview + +A stack in Kusion is defined as a folder within the project directory that contains a `stack.yaml` file. Stacks provide a mechanism to isolate multiple sets of different configurations in the same project. It is also the smallest unit of operation that can be configured and deployed independently. + +The most common way to leverage stacks is to denote different phases of the software development lifecycle, such as `development`, `staging`, `production`, etc. For instance, in the case where the image and resource requirements for an application workload might be different across different phases in the SDLC, they can be represented by different stacks in the same project, namely `dev`, `stage` and `prod`. + +To distinguish this from the deploy-time concept of a "target environment" - which Kusion defines as `workspaces`, **stack** is a development-time concept for application developers to manage different configurations. One way to illustrate the difference is that you can easily be deploying the `prod` stack to multiple target environments, for example, `aws-prod-us-east`, `aws-prod-us-east-2` and `azure-prod-westus`. + +## High Level Schema + +![High_Level_Schema](/img/docs/user_docs/concepts/high-level-schema.png) \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.12/3-concepts/2-stack/2-configuration.md b/docs_versioned_docs/version-v0.12/3-concepts/2-stack/2-configuration.md new file mode 100644 index 00000000..b09a5c43 --- /dev/null +++ b/docs_versioned_docs/version-v0.12/3-concepts/2-stack/2-configuration.md @@ -0,0 +1,38 @@ +--- +id: configuration +sidebar_label: Stack file reference +--- + +# Kusion stack file reference + +Every Kusion project's stack has a stack file, `stack.yaml`, which specifies metadata about your stack, such as the stack name and stack description. The stack file must begin with lowercase `stack` and have an extension of either `.yaml` or `.yml`. + +## Attributes + +| Name | Required | Description | Options | +|:------------- |:--------------- |:------------- |:------------- | +| `name` | required | Name of the stack containing alphanumeric characters, hyphens, underscores. | None | +| `description` | optional | A brief description of the stack. | None | +| `extensions` | optional | List of extensions on the stack. | [See blow](#extensions) | + +### Extensions + +Extensions allow you to customize how resources are generated or customized as part of release. + +#### kubernetesNamespace + +The Kubernetes namespace extension allows you to customize namespace within your application generate Kubernetes resources. + +| Key | Required | Description | Example | +|:------|:--------:|:-------------|:---------| +| kind | y | The kind of extension being used. Must be 'kubernetesNamespace' | `kubernetesNamespace` | +| namespace | y | The namespace where all application-scoped resources generate Kubernetes objects. | `default` | + +```yaml +# Example `stack.yaml` file with customized namespace of `test`. +name: dev +extensions: + - kind: kubernetesNamespace + kubernetesNamespace: + namespace: test +``` diff --git a/docs_versioned_docs/version-v0.12/3-concepts/2-stack/_category_.json b/docs_versioned_docs/version-v0.12/3-concepts/2-stack/_category_.json new file mode 100644 index 00000000..914c863f --- /dev/null +++ b/docs_versioned_docs/version-v0.12/3-concepts/2-stack/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Stacks" +} diff --git a/docs_versioned_docs/version-v0.12/3-concepts/3-module/1-overview.md b/docs_versioned_docs/version-v0.12/3-concepts/3-module/1-overview.md new file mode 100644 index 00000000..b6487117 --- /dev/null +++ b/docs_versioned_docs/version-v0.12/3-concepts/3-module/1-overview.md @@ -0,0 +1,16 @@ +# Overview + +A Kusion module is a reusable building block designed by platform engineers to standardize application deployments and enable app developers to self-service. It consists of two parts: + +- App developer-oriented schema: It is a [KCL schema](https://kcl-lang.io/docs/user_docs/guides/schema-definition/). Fields in this schema are recommended to be understandable to application developers and workspace-agnostic. For example, a database Kusion module schema only contains fields like database engine type and database version. +- Kusion module generator: It is a piece of logic that generates the Intent with an instantiated schema mentioned above, along with platform configurations ([workspace](../workspace)). As a building block, Kusion module hides the complexity of infrastructures. A database Kusion module not only represents a cloud RDS, but it also contains logic to configure other resources such as security groups and IAM policies. Additionally, it seamlessly injects the database host address, username, and password into the workload's environment variables. The generator logic can be very complex in some situations so we recommend implementing it in a GPL like [go](https://go.dev/). + +Here are some explanations of the Kusion Module: + +1. It represents an independent unit that provides a specific capability to the application with clear business semantics. +2. It consists of one or multiple infrastructure resources (K8s/Terraform resources), but it is not merely a collection of unrelated resources. For instance, a database, monitoring capabilities, and network access are typical Kusion Modules. +3. Modules should not have dependencies or be nested within each other. +4. AppConfig is not a Module. +5. Kusion Module is a superset of [KPM](https://www.kcl-lang.io/docs/user_docs/guides/package-management/quick-start). It leverages the KPM to manage KCL schemas in the Kusion module. + +![kusion-module](/img/docs/concept/kusion-module.png) \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.12/3-concepts/3-module/2-develop-guide.md b/docs_versioned_docs/version-v0.12/3-concepts/3-module/2-develop-guide.md new file mode 100644 index 00000000..e4a076e9 --- /dev/null +++ b/docs_versioned_docs/version-v0.12/3-concepts/3-module/2-develop-guide.md @@ -0,0 +1,256 @@ +# Platform Engineer Develop Guide + +## Prerequisites + +To follow this guide, you will need: + +- Go 1.22 or higher installed and configured +- Kusion v0.12 or higher installed locally + +## Workflow + +As a platform engineer, the workflow of developing a Kusion module looks like this: + +1. Communicate with app developers and identify the fields that should exposed to them in the dev-orient schema +2. Identify module input parameters that should be configured by platform engineers in the [workspace](../workspace) +3. Define the app dev-orient schema +4. Develop the module by implementing gRPC interfaces + +The first two steps primarily involve communication with the application development team, and the specific details are not included in this tutorial. This tutorial begins with the subsequent two steps. + +## Set up a developing environment + +Developing a Kusion module includes defining a KCL schema and developing a module binary in golang. We will provide a scaffold repository and a new command `kusion mod init` to help developers set up the developing environment easily. + +After executing the command + +```shell +kusion mod init +``` + +Kusion will download a [scaffold repository](https://github.com/KusionStack/kusion-module-scaffolding) and rename this project with your module name. The scaffold contains code templates and all files needed for developing a Kusion module. + +## Developing + +The scaffold repository directory structure is shown below: + +```shell +$ tree kawesome/ +. +├── example +│   ├── dev +│   │   ├── example_workspace.yaml +│   │   ├── kcl.mod +│   │   ├── main.k +│   │   └── stack.yaml +│   └── project.yaml +├── kawesome.k +├── kcl.mod +└── src + ├── Makefile + ├── go.mod + ├── go.sum + ├── kawesome_generator.go + └── kawesome_generator_test.go +``` + +When developing a Kusion module with the scaffold repository, you could follow the steps below: + +1. **Define the module name and version** + - For go files. Rename the module name in the `go.mod` and related files to your Kusion module name. + ```yaml + module kawsome + go 1.22 + require ( + ... + ) + ``` + - For KCL files. Rename package name and version in the `kcl.mod` + ```toml + [package] + name = "kawesome" + version = 0.2.0 + ``` + + We assume the module named is `kawesome` and the version is `0.2.0` in this guide. + +2. **Define the dev-orient schemas**. They would be initialized by app developers. In this scaffold repository, we've defined a schema named Kawesome in `kawesome.k` that consists of two resources `Service` and `RandomPassword` and they will be generated into a Kubernetes Service and a Terraform RandomPassword later. + +```python +schema Kawesome: +""" Kawesome is a sample module schema consisting of Service +and RandomPassword + +Attributes +---------- +service: Service, default is Undefined, required. + The exposed port of Workload, which will be generated into Kubernetes Service. +randomPassword: RandomPassword, default is Undefined, required. + The sensitive random string, which will be generated into Terraform random_password. + +Examples +-------- +import kawesome as ks + +... ... + +accessories: { + "kawesome": kawesome.Kawesome { + service: kawesome.Service{ + port: 5678 + } + randomPassword: kawesome.RandomPassword { + length: 20 + } + } +} +""" + +# The exposed port of Workload, which will be generated into Kubernetes Service. +service: Service + +# The sensitive random string, which will be generated into Terraform random_password. +randomPassword: RandomPassword +``` + +3. **Implement the [gRPC proto](https://github.com/KusionStack/kusion/blob/main/pkg/modules/proto/module.proto) generate interface.** The `generate` interface consumes the application developer's config described in the [`AppConfiguration`](../app-configuration) and the platform engineer's config described in the [`workspace`](../workspace) to generate all infrastructure resources represented by this module. + +```go +func (k *Kawesome) Generate(_ context.Context, request *module.GeneratorRequest) (*module.GeneratorResponse, error) { + // generate your infrastructure resoruces +} + +// Patcher primarily contains patches for fields associated with Workloads, and additionally offers the capability to patch other resources. +type Patcher struct { + // Environments represent the environment variables patched to all containers in the workload. + Environments []v1.EnvVar `json:"environments,omitempty" yaml:"environments,omitempty"` + // Labels represent the labels patched to the workload. + Labels map[string]string `json:"labels,omitempty" yaml:"labels,omitempty"` + // PodLabels represent the labels patched to the pods. + PodLabels map[string]string `json:"podLabels,omitempty" yaml:"podLabels,omitempty"` + // Annotations represent the annotations patched to the workload. + Annotations map[string]string `json:"annotations,omitempty" yaml:"annotations,omitempty"` + // PodAnnotations represent the annotations patched to the pods. + PodAnnotations map[string]string `json:"podAnnotations,omitempty" yaml:"podAnnotations,omitempty"` + // JSONPatchers represents patchers that can be patched to an arbitrary resource. + // The key of this map represents the ResourceId of the resource to be patched. + JSONPatchers map[string]JSONPatcher `json:"jsonPatcher,omitempty" yaml:"jsonPatcher,omitempty"` +} +``` + +The `GeneratorRequest` contains the application developer's config, platform engineer's config, workload config and related metadata a module could need to generate infrastructure resources. +In the `GeneratorResponse`, there are two fields, `Resources` and `Patchers`. The `Resource` represents resources that should operated by Kusion and they will be appended into the [Spec](../spec). The `Patchers` are used to patch the workload and other resources. + +### Workload + +Workload in the AppConfiguration is also a Kusion module. If the workload module only generates one resource, this resource will be regarded as the workload resource. However, if the workload module generates more than one resource, one and only one of them must contain a key-value pair in the 'extension' field, where the key is 'kusion.io/is-workload' and the value is 'true' and this resource will be regarded as the workload resource. + +### Implicit Resource Dependency + +When you need to use an attribute of another resource as the value of a specific resource attribute, Kusion supports declaring the implicit resource dependencies with the `$kusion_path` prefix. You can concatenate the implicit resource dependency path with the resource `id`, attribute `name` and the `$kusion_path` prefix, for example: + +```yaml +# Dependency path as an attribute value. +spec: + resources: + - id: v1:Service:test-ns:test-service + type: Kubernetes + attributes: + metadata: + annotations: + deployment-name: $kusion_path.v1:Deployment:test-ns:test-deployment.metadata.name +``` + +In addition, please note that: + +- The implicit resource dependency path can only be used to replace the value in `Attributes` field of the `Resource`, but not the key. For example, the following `Spec` is **invalid**: + +```yaml +# Dependency path not in `attributes`. +spec: + resources: + - id: v1:Service:test:$kusion_path.apps/v1:Deployment:test-ns:test-deployment.metadata.name +``` + +```yaml +# Dependency path in the key, but not in the value. +spec: + resources: + - id: apps/v1:Deployment:test-ns:test-deployment + type: Kubernetes + attributes: + metadata: + annotations: + $kusion_path.v1:Service:test-ns:test-service.metadata.name: test-svc +``` + +- The implicit resource dependency path can only be used as a standalone value and cannot be combined with other string. For example, the following `Spec` is **invalid**: + +```yaml +# Dependency path combined with other string. +spec: + resources: + - id: apps/v1:Deployment:test-ns:test-deployment + type: Kubernetes + attributes: + metadata: + annotations: + test-svc: $kusion_path.v1:Service:test-ns:test-service.metadata.name + "-test" +``` + +- The impliciy resource dependency path does not support accessing the value in an array, so the following is currently **invalid**: + +```yaml +# Dependency path accessing the value in an array. +spec: + resources: + - id: apps/v1:Deployment:test-ns:test-deployment + type: Kubernetes + attributes: + metadata: + annotations: + test-svc: $kusion_path.v1:Service:test-ns:test-service.spec.ports[0].name +``` + +## Publish + +Publish the Kusion module to an OCI registry with the command `kusion mod push`. If your module is open to the public, we **welcome and highly encourage** you to contribute it to the module registry [catalog](https://github.com/KusionStack/catalog), so that more people can benefit from the module. Submit a pull request to this repository, once it is merged, it will be published to the [KusionStack GitHub container registry](https://github.com/orgs/KusionStack/packages). + +Publish a stable version +```shell +kusion mod push /path/to/my-module oci:/// --creds +``` + +Publish a module of a specific OS arch +```shell +kusion mod push /path/to/my-module oci:/// --os-arch==darwin/arm64 --creds +``` + +Publish a pre-release version +```shell +kusion mod push /path/to/my-module oci:/// --latest=false --creds +``` + +:::info +The OCI URL format is `oci:///` and please ensure that your token has permissions to write to the registry. +::: + +More details can be found in the `kusion mod push` reference doc. + +## Register to the workspace + +```yaml +modules: + kawesome: + path: oci://ghcr.io/kusionstack/kawesome + version: 0.2.0 + configs: + default: + service: + labels: + kusionstack.io/module-name: kawesome + annotations: + kusionstack.io/module-version: 0.2.0 +``` + +Register module platform configuration in the `workspace.yaml` to standardize the module's behavior. App developers can list all available modules registered in the workspace. \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.12/3-concepts/3-module/3-app-dev-guide.md b/docs_versioned_docs/version-v0.12/3-concepts/3-module/3-app-dev-guide.md new file mode 100644 index 00000000..3169c67c --- /dev/null +++ b/docs_versioned_docs/version-v0.12/3-concepts/3-module/3-app-dev-guide.md @@ -0,0 +1,127 @@ +# Application Developer User Guide + +## Prerequisites + +To follow this guide, you will need: + +- Kusion v0.12 or higher installed locally + +## Workflow + +As an application developer, the workflow of using a Kusion module looks like this: + +1. Browse available modules registered by platform engineers in the workspace +2. Add modules you need to your Stack +3. Initialize modules +4. Apply the AppConfiguration + +## Browse available modules + +For all KusionStack built-in modules, you can find all available modules and documents in the [reference](../../6-reference/2-modules/index.md) + +Since the platform engineers have already registered the available modules in the workspace, app developers can execute `kusion mod list` to list the available modules. + +```shell +kusion mod list --workspace dev + +Name Version URL +kawesome 0.2.0 oci://ghcr.io/kusionstack/kawesome +``` + +## Add modules to your Stack + +Taking `kawesome` as an example, the directory structure is shown below: + +```shell +example +├── dev +│   ├── example_workspace.yaml +│   ├── kcl.mod +│   ├── main.k +│   └── stack.yaml +└── project.yaml +``` + +Select the module you need from the result of `kusion mod list` and execute `kusion mod add kawesome` to add `kawesome` into your Stack. + +Once you have added the `kawesome` module, the `kcl.mod` file will be updated to look like this. + +``` toml +[package] +name = "example" + +[dependencies] +kawesome = { oci = "oci://ghcr.io/kusionstack/kawesome", tag = "0.2.0" } +service = {oci = "oci://ghcr.io/kusionstack/service", tag = "0.1.0" } +kam = { git = "https://github.com/KusionStack/kam.git", tag = "0.2.0" } + +[profile] +entries = ["main.k"] +``` + +- The `kam` dependency represents the [Kusion Application Module](https://github.com/KusionStack/kam.git) which contains the AppConfiguration. +- The `service` dependency represents the service workload module. +- The `kawesome` is the Kusion module we are going to use in the AppConfiguration. + +## Initialize modules + +```python +# The configuration codes in perspective of developers. +import kam.v1.app_configuration as ac +import service +import service.container as c +import kawesome.v1.kawesome + +kawesome: ac.AppConfiguration { + # Declare the workload configurations. + workload: service.Service { + containers: { + kawesome: c.Container { + image: "hashicorp/http-echo" + env: { + "ECHO_TEXT": "$(KUSION_KAWESOME_RANDOM_PASSWORD)" + } + } + } + replicas: 1 + } + # Declare the kawesome module configurations. + accessories: { + "kawesome": kawesome.Kawesome { + service: kawesome.Service{ + port: 5678 + } + randomPassword: kawesome.RandomPassword { + length: 20 + } + } + } +} +``` + +Initialize the `kawesome` module in the `accessories` block of the AppConfiguration. The key of the `accessories` item represents the module name and the value represents the actual module you required. + +## Apply the result + +Execute the preview command to validate the result. + +```shell +kusion apply + ✔︎ Generating Spec in the Stack dev... +Stack: dev +ID Action +hashicorp:random:random_password:example-dev-kawesome Create +v1:Namespace:example Create +v1:Service:example:example-dev-kawesome Create +apps/v1:Deployment:example:example-dev-kawesome Create + + +Do you want to apply these diffs?: + > details +Which diff detail do you want to see?: +> all + hashicorp:random:random_password:example-dev-kawesome Create + v1:Namespace:example Create + v1:Service:example:example-dev-kawesome Create + apps/v1:Deployment:example:example-dev-kawesome Create +``` \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.12/3-concepts/3-module/_category_.json b/docs_versioned_docs/version-v0.12/3-concepts/3-module/_category_.json new file mode 100644 index 00000000..5952a21e --- /dev/null +++ b/docs_versioned_docs/version-v0.12/3-concepts/3-module/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Modules" +} diff --git a/docs_versioned_docs/version-v0.12/3-concepts/4-workspace.md b/docs_versioned_docs/version-v0.12/3-concepts/4-workspace.md new file mode 100644 index 00000000..daedd84f --- /dev/null +++ b/docs_versioned_docs/version-v0.12/3-concepts/4-workspace.md @@ -0,0 +1,222 @@ +--- +id: workspace +sidebar_label: Workspace +--- + +# Workspace + +Workspace is a logical concept that maps to an actual target environment to deploy a stack to. In today's context, this _usually_ represents a Kubernetes cluster for the application workload and an optional cloud account to provision infrastructure resources that the workload depends on (A database, for example). Aside from the deployment destination, workspaces are also designed to be associated with a series of platform configurations that are used by all the stacks deployed to said workspace. + +When executing the command `kusion generate`, Kusion will "match" the AppConfiguration and the approriate workspace configuration to dynamically generate the `Spec`, which contains the complete manifest to describe the resources in the stack. The relationship of the Project, Stack and Workspace is shown as below. Notice that all three ways to organize stacks are valid. + +![project-stack-workspace](/img/docs/concept/project-stack-workspace.png) + +Workspace is designed to address separation of concerns. As opposed to the development-time concept of a "stack", a workspace is a deploy-time concept that represents a deployment target, a.k.a an actual runtime environment. Workspaces should be entirely managed by Platform Engineers to alleviate the burden on developers to understand environment-specific details. + +To dig a little deeper, a workspace represents the need for a distinct set of "platform opinions". That includes things that application developer either don't want to or shouldn't need to worry about, such as which Kubernetes cluster to deploy to, the credentials to deploy to said clusters, and other infrastructure details like what database instance to provision. + +Workspace is intended to be flexible so you can map them as your see fit to the boundaries that are relevant to your use case. For example, you can map a workspace to a cloud region (aws-us-east-1), provided that regional isolation is sufficient for you (this is an extreme case). Alternatively, a workspace can be map to the combination of a cloud region and an SDLC phase (aws-dev-us-east-1), provided that it represents the right boundary from a platform perspective. + +The workspace configuration is in a deterministic format and currently written in YAML. The subcommands of `kusion workspace` are provided to manage the workspaces. When using `kusion workspace`, the workspace configuration will be saved as YAML file in local file system. To avoid the possible risks, the environment variables are provided to hold the sensitive data such as Access Keys and Secret keys. + +## Workspace Configuration + +The configuration of a Workspace is stored in a single YAML file, which consists of `modules`, `secretStore`, and `context`. An example of Workspace configuration is shown as below. + +```yaml +# The platform configuration for Modules or KAMs. +# For each Module or KAM, the configuration format is as below. +# # ${module_identifier} or ${KAM_name}: +# # path: oci://ghcr.io/kusionstack/module-name # url of the module artifact +# # version: 0.2.0 # version of the module +# # configs: +# # default: # default configuration, applied to all projects +# # ${field1}: ${value1} +# # #{field2}: ${value2} +# # ... +# # ${patcher_name}: #patcher configuration, applied to the projects assigned in projectSelector +# # ${field1}: ${value1_override} +# # ... +# # projectSelector: +# # - ${project1_name} +# # - ${project2_name} +# # ... +modules: + mysql: + path: oci://ghcr.io/kusionstack/mysql + version: 0.2.0 + configs: + default: + cloud: alicloud + size: 20 + instanceType: mysql.n2.serverless.1c + category: serverless_basic + privateRouting: false + subnetID: ${mysql_subnet_id} + databaseName: kusion + largeSize: + size: 50 + projectSelector: + - foo + - bar + importDBInstance: + importedResources: + "aliyun:alicloud:alicloud_db_instance:wordpress-demo": "your-imported-resource-id" + projectSelector: + - baz + +secretStore: + provider: + aws: + region: us-east-1 + profile: The optional profile to be used to interact with AWS Secrets Manager. + +context: + KUBECONFIG_PATH: $HOME/.kube/config + AWS_ACCESS_KEY_ID: ref://secrets-manager-name/key-for-ak + AWS_SECRET_ACCESS_KEY: ref://secrets-manager-name/key-for-sk +``` + +### modules + +The `modules` are the platform-part configurations of Modules and KAMs, where the identifier of them are `${namespace}/${module_name}@${module_tag}` and `${kam_name}`. For each Module or KAM configuration, it is composed of a `default` and several `patcher` blocks. The `default` block contains the universal configuration of the Workspace, and can be applied to all Stacks in the Workspace, which is composed of the values of the Module's or KAM's fields. The `patcher` block contains the exclusive configuration for certain Stacks, which includes not only the fields' values, but also the applied Projects. + +The `patcher` block is designed to increase the flexibility for platform engineers managing Workspaces. Cause the Workspace should map to the real physical environment, in the actual production practice, it's almost impossible that all the Stacks share the same platform configuration, although we want them the same. + +The values of the same fields in `patcher` will override the `default`, and one field in multiple patchers is forbidden to assign to the same Project. That is, if there are more than one `patcher` declaring the same field with different values, the applied Projects are prohibited to overlap. And, The name of `patcher` must not be `default`. + +In the `patcher`, the applied Projects are assigned by the field `ProjectSelector`, which is an array of the Project names. The `ProjectSelector` is provided rather than something may like `StackSelector`, which specifies the applied Stacks. Here are the reasons. Explaining from the perspective of using Workspace, the mapping of Workspace and Stack is specified by the Kusion operation commands' users. While explaining from the perspective of the relationship among Project, Stack and Workspace, Workspace is designed for the reuse of platform-level configuration among multiple Projects. When a Project "encounters" a Workspace, it becomes a "Stack instance", which can be applied to a series of real resources. If using something like `StackSelector`, the reuse would not get realized, and Workspace would also lose its relevance. For more information of the relationship, please refer to [Project](project/overview) and [Stack](stack/overview). + +Different Module and KAM has different name, fields, and corresponding format and restrictions. When writing the configuration, check the corresponding Module's or KAM's description, and make sure all the requisite Modules and KAMs have correctly configured. Please refer to [Kuiosn Module](module/overview) and find more information. The example above gives a sample of the Module `mysql`. + +The `importedResources` block is designed to declare the import of existing cloud resources. The `importedResources` is a `map` where you can declare the mapping from `id` of the resource in Kusion `Spec` to the Terraform ID of the resource to be imported. Kusion will automatically synchronize the state of the existing cloud resource for the Kusion resource. + +### secretStore + +The `secretStore` field can be used to access the sensitive data stored in a cloud-based secrets manager. More details can be found in [here](../5-user-guides/4-secrets-management/1-using-cloud-secrets.md). + +### context + +The `context` field can be used to declare the information such as Kubernetes `kubeconfig` path or content, and the AK/SK of the Terraform providers. Below shows the configurable attributes. + +- `KUBECONFIG_PATH`: the local path of the `kubeConfig` file +- `KUBECONFIG_CONTENT`: the content of the `kubeConfig` file, can be used with cloud-based secrets management (e.g. `ref://secrets-management-name/secrets-key-for-kubeconfig`) +- `AWS_ACCESS_KEY_ID`: the access key ID of the AWS provider +- `AWS_SECRET_ACCESS_KEY`: the secret key of the AWS provider +- `ALICLOUD_ACCESS_KEY`: the access key ID of the Alicloud provider +- `ALICLOUD_SECRET_KEY`: the secret key of the Alicloud provider + +## Managing Workspace + +The subcommands of `kusion workspace` are used to manage Workspaces, including `create`, `show`, `list`, `switch`, `update` and `delete`. Cause the Workspace configurations are stored persistently, the current or a specified Backend will be used. For more information of Backend, please refer to [Backend](backend). + +Kusion will create a `default` Workspace with empty configuration in every Backend automatically, and set it as the current. When first using Kusion, or no configuration of Workspace, the `default` Workspace will be used. + +### Creating Workspace + +Use `kusion workspace create ${name} -f ${configuration_file_path}` to create a new Workspace with the configuration in a YAML file. The Workspace is identified by the `name`, and must be a new one, while the configuration must be written in a YAML file with correct format. + +The command above will create the Workspace in current Backend. If to create a Workspace in another backend, please use flag `--backend` to specify. The Workspace names in a Backend must be different, but allow the same in different Backends. + +In some scenarios, when a Workspace is created, it is expected to be the current. For simplification, the flag `--current` is provided to set the Workspace current alongside the creation. + +Be attention, creating `default` Workspace is not allowed, because it's created by Kusion automatically. + +The example is shown as below. + +```shell +# create a workspace in current backend +kusion workspace create dev -f dev.yaml + +# create a workspace in current backend ans set it as current +kusion workspace create dev -f dev.yaml --current + +# create a workspace in specified backend +kusion workspace create dev -f dev.yaml --backend oss-pre +``` + +The Workspaces to create are decided by the platform engineers. We recommend that they are organized by the following rules: + +- **SDLC phases**, such as `dev`, `pre`, `prod`; +- **cloud vendors**, such as `aws`, `alicloud`; +- combination of the two above, such as `dev-aws`, `prod-alicloud`. + +In design, Kusion does not support deploying Stack to multiple clouds or regions within a single Workspace. While users can technically define a Module that provisions resources across multiple clouds or regions, Kusion does not recommend this practice, and will not provide technical support for such configuration. If the platform engineers need to manage resources across multiple clouds or regions, they should create separate Workspaces. + +### Listing Workspace + +Use `kusion workspace list` to get all the workspace names. + +The example is shown as below. In order to simplify, The following examples will not give using specified backend, which is supported by `--backend` flag. + +```shell +# list all the workspace names +kusion workspace list +``` + +### Switching Workspace + +In order not to specify the Workspace name for each Kusion operation command, `kusion workspace switch ${name}` is provided to switch the current Workspace. Then when executing `kusion generate`, the current Workspace will be used. The to-switch Workspace must be created. + +The example is shown as below. + +```shell +# switch workspace +kusion workspace switch dev +``` + +### Showing Workspace + +Use `kusion workspace show ${name}` to get the Workspace configuration. If the `name` is not specified, the configuration of current Workspace will get returned. + +The example is shown as below. + +```shell +# show a specified workspace configuration +kusion workspace show dev + +# show the current workspace configuration +kusion workspace show +``` + +### Updating Workspace + +When the Workspace needs to update, use `kusion workspace update ${name} -f ${configuration_file_path}` to update with the new configuration file. The whole updated configuration is asked to provide, and the Workspace must be created. Get the Workspace configuration first, then refresh the configuration and execute the command, which are the recommended steps. If the `name` is not specified, the current Workspace will be used. + +Updating the `default` Workspace is allowed. And the flag `--current` is also supported to set it as the current. + +The example is shown as below. + +```shell +# update a specified workspace +kusion workspace update dev -f dev_new.yaml + +# update a specified workspace and set it as current +kusion workspace update dev -f dev_new.yaml --current + +# update the current workspace +kusion workspace update -f dev_new.yaml +``` + +### Deleting Workspace + +When a Workspace is not in use anymore, use `kusion workspace delete ${name}` to delete a Workspace. If the `name` is not specified, the current Workspace will get deleted, and the `default` Workspace will be set as the current Workspace. Therefore, deleting the `default` Workspace is not allowed. + +The example is shown as below. + +```shell +# delete a specified workspace +kusion workspace delete dev + +# delete the current workspace +kusion workspace delete +``` + +## Using Workspace + +Workspace is used in the command `kusion generate`, the following steps help smooth the operation process. + +1. Write the Workspace configuration file with the format shown above, and fulfill all the necessary fields; +2. Create the workspace with `kusion workspace create`, then Kusion perceives the Workspace. The flag `--current` can be used to set it as the current. +3. Execute `kusion generate` in a Stack to generate the whole Spec, the AppConfiguration and Workspace configuration get rendered automatically, and can be applied to the real infrastructure. If the appointed Workspace or Backend is asked, the flags `--workspace` and `--backend` will help achieve that. +4. If the Workspace needs to update, delete, switch, etc. Use the above commands to achieve that. diff --git a/docs_versioned_docs/version-v0.12/3-concepts/5-appconfiguration.md b/docs_versioned_docs/version-v0.12/3-concepts/5-appconfiguration.md new file mode 100644 index 00000000..570d1ac0 --- /dev/null +++ b/docs_versioned_docs/version-v0.12/3-concepts/5-appconfiguration.md @@ -0,0 +1,38 @@ +--- +id: app-configuration +sidebar_label: AppConfiguration +--- + +# AppConfiguration + +As a modern cloud-native application delivery toolchain, declarative intent-based actuation is the central idea of Kusion, and `AppConfiguration` model plays the role of describing the intent, which provides a simpler path for on-boarding developers to the platform without leaking low-level details in runtime infrastructure and allows developers to fully focus on the application logic itself. + +The `AppConfiguration` model consolidates workload and their dependent accessories for the application deployment, along with any pipeline and operational requirements into one standardized, infrastructure-independent declarative specification. This declarative specification represents the intuitive user intent for the application, which drives a standardized and efficient application delivery and operation process in a hybrid environment. + +![appconfig.png](/img/docs/concept/appconfig.png) + +AppConfiguration consists of four core concepts, namely `Workload`, `Accessory`, `Pipeline`, and `Dependency`. Each of them represents a [Kusion module](./3-module/1-overview.md). We will walk through these concepts one by one. + +#### Workload + +Workload is a representation of the business logic that runs in the cluster. Common workload types include long-running services that should “never” go down and batch jobs that take from a few seconds to a few days to complete. + +In most cases, a Workload is a backend service or the frontend of an Application. For example, in a micro-service architecture, each service would be represented by a distinct Workload. This allows developers to manage and deploy their code in a more organized and efficient manner. + +#### Accessory + +Using the analogy of a car, workload is the core engine of the application, but only having the engine isn’t enough for the application to function properly. In most cases, there must be other supporting parts for the workload to operate as intended. For those supporting parts, we call them Accessory. Accessory refers to various runtime capabilities and operational requirements provided by the underlying infrastructure, such as database, network load-balancer, storage and so on. + +From the perspective of team collaboration, the platform team should be responsible for creating and maintaining various accessory definitions, providing reusable building blocks out-of-the-box. Application developers just need to leverage the existing accessories to cover the evolving application needs. This helps software organizations achieve separation of concern so that different roles can focus on the subject matter they are an expert in. + +#### Pipeline + +Running reliable applications requires reliable delivery pipelines. By default, Kusion provides a relatively fixed built-in application delivery pipeline, which should be sufficient for most use cases. However, as the application scale and complexity grow, so does the need for a customizable delivery pipeline. Developers wish for more fine-tuned control and customization over the workflow to deliver their applications. That’s why we introduced the Pipeline section in AppConfiguration model. + +A customized delivery pipeline is made of several steps, each corresponding to an operation that needs to be executed, such as running certain tests after a deployment, scanning artifacts for vulnerabilities prior to deployment, and so on. Implementation-wise, the execution of each step should be carried out in the form of a plugin, developed and managed by the platform owners. + +#### Topologies + +Application dependencies refer to the external services or other software that an application relies on to function properly. These dependencies may be required to provide certain functionality or to use certain features in the application. + +Similar to declaring a dependency from an application to an accessory, AppConfiguration lets you declare the dependencies between different applications in the same way. diff --git a/docs_versioned_docs/version-v0.12/3-concepts/6-spec.md b/docs_versioned_docs/version-v0.12/3-concepts/6-spec.md new file mode 100644 index 00000000..b88ba030 --- /dev/null +++ b/docs_versioned_docs/version-v0.12/3-concepts/6-spec.md @@ -0,0 +1,121 @@ +--- +id: spec +sidebar_label: Spec +--- + +# Spec + +The Spec represents the operational intentions that you aim to deliver using Kusion. These intentions are expected to contain all components throughout the DevOps lifecycle, including resources (workload, database, load balancer, etc.), dependencies, and policies. The Kusion module generators are responsible for converting all AppConfigurations and environment configurations into the Spec. Once the Spec is generated, the Kusion Engine takes charge of updating the actual infrastructures to match the Spec. + +## Purpose + +### Single Source of Truth + +In Kusion's workflow, the platform engineer builds Kusion modules and provides environment configurations, application developers choose Kusion modules they need and deploy operational intentions to an environment with related environment configurations. They can also input dynamic parameters like the container image when executing the `kusion generate` command. So the final operational intentions include configurations written by application developers, environment configurations and dynamic inputs. Due to this reason, we introduce **Spec** to represent the SSoT(Single Source of Truth) of Kusion. It is the result of `kusion generate` which contains all operational intentions from different sources. + +### Consistency + +Delivering an application to different environments with identical configurations is a common practice, especially for applications that require scalable distribution. In such cases, an immutable configuration package is helpful. By utilizing the Spec, all configurations and changes are stored in a single file. As the Spec is the input of Kusion, it ensures consistency across different environments whenever you execute Kusion with the same Spec file. + +### Rollback and Disaster Recovery + +The ability to roll back is crucial in reducing incident duration. Rolling back the system to a previously validated version is much faster compared to attempting to fix it during an outage. We regard a validated Spec as a snapshot of the system and recommend storing the Spec in a version control system like Git. This enables better change management practices and makes it simpler to roll back to previous versions if needed. In case of a failure or outage, having a validated Spec simplifies the rollback process, ensuring that the system can be quickly recovered. + +## Example + +The API definition of the `Spec` structure in Kusion can be found [here](https://github.com/KusionStack/kusion/blob/main/pkg/apis/api.kusion.io/v1/types.go#L862). Below is an example `Spec` file generated from the `quickstart` demo application (more details can be found [here](../2-getting-started/2-deliver-quickstart.md)). + +```yaml +resources: + - id: v1:Namespace:quickstart + type: Kubernetes + attributes: + apiVersion: v1 + kind: Namespace + metadata: + creationTimestamp: null + name: quickstart + spec: {} + status: {} + extensions: + GVK: /v1, Kind=Namespace + - id: apps/v1:Deployment:quickstart:quickstart-default-quickstart + type: Kubernetes + attributes: + apiVersion: apps/v1 + kind: Deployment + metadata: + creationTimestamp: null + labels: + app.kubernetes.io/name: quickstart + app.kubernetes.io/part-of: quickstart + name: quickstart-default-quickstart + namespace: quickstart + spec: + selector: + matchLabels: + app.kubernetes.io/name: quickstart + app.kubernetes.io/part-of: quickstart + strategy: {} + template: + metadata: + creationTimestamp: null + labels: + app.kubernetes.io/name: quickstart + app.kubernetes.io/part-of: quickstart + spec: + containers: + - image: kusionstack/kusion-quickstart:latest + name: quickstart + resources: {} + status: {} + dependsOn: + - v1:Namespace:quickstart + - v1:Service:quickstart:quickstart-default-quickstart-private + extensions: + GVK: apps/v1, Kind=Deployment + - id: v1:Service:quickstart:quickstart-default-quickstart-private + type: Kubernetes + attributes: + apiVersion: v1 + kind: Service + metadata: + creationTimestamp: null + labels: + app.kubernetes.io/name: quickstart + app.kubernetes.io/part-of: quickstart + name: quickstart-default-quickstart-private + namespace: quickstart + spec: + ports: + - name: quickstart-default-quickstart-private-8080-tcp + port: 8080 + protocol: TCP + targetPort: 8080 + selector: + app.kubernetes.io/name: quickstart + app.kubernetes.io/part-of: quickstart + type: ClusterIP + status: + loadBalancer: {} + dependsOn: + - v1:Namespace:quickstart + extensions: + GVK: /v1, Kind=Service +secretStore: null +context: {} +``` + +From the example above, we can see that the `Spec` contains a list of `resources` required by the application. Each resource in the `Spec` needs to have `id`, `type`, `attributes`, `dependsOn`, and `extensions` fields: + +- `id` is the unique key of this resource. An idiomatic way for Kubernetes resources is `apiVersion:kind:namespace:name`, and for Terraform resources is `providerNamespace:providerName:resourceType:resourceName`. +- `type` represents the type of runtime Kusion supports, and currently includes `Kubernetes` and `Terraform`. +- `attributes` represents all specified attributes of this resource, basically the manifest and argument attributes for the `Kubernetes` and `Terraform` resources. +- `dependsOn` contains all the other resources the resource depends on. +- `extensions` specifies the arbitrary metadata of the resource, where you can declare information such as Kubernetes GVK, Terraform provider, and imported resource id, etc. + +Besides the `resources`, Spec also records the `secretStore` and `context` field in the corresponding workspace. The former can be used to access sensitive data stored in an external secrets manager, while the latter can be used to declare the workspace-level configurations such as Kubernetes `kubeconfig` file path or content, and Terraform providers' AK/SK. More information can be found [here](4-workspace.md#secretstore). + +## Apply with Spec File + +Kusion supports using the Spec file directly as input. Users can place the Spec file in the stack directory and execute `kusion preview --spec-file spec.yaml` and `kusion apply --spec-file spec.yaml` to preview and apply the resources. diff --git a/docs_versioned_docs/version-v0.12/3-concepts/7-backend.md b/docs_versioned_docs/version-v0.12/3-concepts/7-backend.md new file mode 100644 index 00000000..5262f7ac --- /dev/null +++ b/docs_versioned_docs/version-v0.12/3-concepts/7-backend.md @@ -0,0 +1,228 @@ +--- +id: backend +sidebar_label: Backends +--- + +# Backend + +Backend is Kusion's storage, which defines the place to store Workspace and Release. By default, Kusion uses the `local` type of backend to store on the local disk. While in the scenario of team collaboration, the Workspace and Release can be stored on a remote backend, such as `oss` and `s3`, to allow multiple users' access. + +The command `kusion config` is used to configure the backend configuration. Configuring a whole backend or an individual config item are both supported. For the sensitive data, the environment variables are supported, and with higher priority. + +Furthermore, Kusion provides the operation of setting current backend. Thus, the trouble of specifying backend can be saved when executing operation commands and managing `workspace`. + +## Available Backend Types + +There are three available backend types: `local`, `oss`, `s3`. + +### local + +The `local` type backend uses local file system as storage, which is suitable for local operations, but not ideal for multi-user collaboration. The supported config items are as below. + +- **path**: `type string`, `optional`, specify the directory to store the Workspace and Release files. The subdirectories `workspaces` and `releases` are used to store the corresponding files separately. It's recommended to use an empty or a Kusion exclusive directory as the local backend path. If not set, the default path `${KUSION_HOME}` is in use. + +The whole local type backend configuration is as below. + +```yaml +{ + "type": "local", + "configs": { + "path": "${local_path}" # type string, optional, the directory to store files. + } +} +``` + +### oss + +The `oss` type backend uses the Alicloud Object Storage Service (OSS) as storage. The supported config items are as below. + +- **endpoint**: `type string`, `required`, specify the access endpoint for alicloud oss bucket. +- **accessKeyID**: `type string`, `required`, specify the alicloud account accessKeyID, support declaring by environment variable `OSS_ACCESS_KEY_ID`. +- **accessKeySecret**: `type string`, `required`, specify the alicloud account accessKeySecret, support declaring by environment variable `OSS_ACCESS_KEY_SECRET`. +- **bucket**: `type string`, `required`, specify the name of the alicloud oss bucket. +- **prefix**: `type string`, `optional`, constitute the prefix to store the Workspace and Release files, whose prefixes are `${prefix}/workspaces` and `${prefix}/releases` respectively. Using prefix can create a "dedicated space" for the Kusion data, which is beneficial for the management and reuse of the bucket. If not set, there is no prefix, the files are stored in the root path of the bucket if analogy to a file system. + +Noted that `accessKeyID` and `accessKeySecret` are required for the whole configuration combined by the configuration managed by the command `kusion config` and the environment variables. For the `kusion config` alone, they are not obligatory. And for the safety reason, using environment variables is the recommended way. + +The whole oss type backend configuration is as below. + +```yaml +{ + "type": "oss", + "configs": { + "endpoint": "${oss_endpoint}", # type string, required, the oss endpoint. + "accessKeyID": "${oss_access_key_id}", # type string, ooptional for the command "kusion config", the oss access key id. + "accessKeySecret": "${oss_access_key_secret}", # type string, optional for the command "kusion config", the oss access key secret. + "bucket": "${oss_bucket}", # type string, required, the oss bucket. + "prefix": "${oss_prefix}" # type string, optional, the prefix to store the files. + } +} +``` + +The supported environment variables are as below. + +```bash +export OSS_ACCESS_KEY_ID="${oss-access-key-id}" # configure accessKeyID +export OSS_ACCESS_KEY_SECRET="${oss-access-key-secret}" # configure accessKeySecret +``` + +### s3 + +The `s3` type backend uses the AWS Simple Storage Service (S3) as storage. The supported config items are as below. + +- **region**: `type string`, `required`, specify the region of aws s3 bucket, support declaring by environment variable `AWS_DEFAULT_REGION` or `AWS_REGION`, where the latter has higher priority. +- **endpoint**: `type string`, `optional`, specify the access endpoint for aws s3 bucket. +- **accessKeyID**: `type string`, `required`, specify the aws account accessKeyID, support declaring by environment variable `AWS_ACCESS_KEY_ID`. +- **accessKeySecret**: `type string`, `required`, specify the aws account.accessKeySecret, support declaring by environment variable `AWS_SECRET_ACCESS_KEY`. +- **bucket**: `type string`, `required`, specify the name of the aws s3 bucket. +- **prefix**: `type string`, `optional`, constitute the prefix to store the Workspace and Release files, whose prefixes are `${prefix}/workspaces` and `${prefix}/releases` respectively. + +Noted that `region`, `accessKeyID` and `accessKeySecret` are optional for the `kusion config` command. + +The whole s3 type backend configuration is as below. + +```yaml +{ + "type": "s3", + "configs": { + "region": "${s3_region}", # type string, optional for the command "kusion config", the aws region. + "endpoint": "${s3_endpoint}", # type string, optional, the aws endpoint. + "accessKeyID": "${s3_access_key_id}", # type string, optional for the command "kusion config", the aws access key id. + "accessKeySecret": "${s3_access_key_secret}", # type string, optional for the command "kusion config", the aws access key secret. + "bucket": "${s3_bucket}", # type string, required, the s3 bucket. + "prefix": "${s3_prefix}" # type string, optional, the prefix to store the files. + } +} +``` + +The supported environment variables are as below. + +```bash +export AWS_DEFAULT_REGION="${s3_region}" # configure region, lower priority than AWS_REGION +export AWS_REGION="${s3_region}" # configure region, higher priority than AWS_DEFAULT_REGION +export AWS_ACCESS_KEY_ID="${s3_access_key_id}" # configure accessKeyID +export AWS_SECRET_ACCESS_KEY="${s3_access_key_secret}" # configure accessKeySecret +``` + + +## Setting a Backend + +When there is a new backend or the backend configuration needs to update, use the command `kusion config set ${key} ${value}` to set a backend. A backend is identified by a unique name, and its whole configuration is made up of the backend type and its corresponding config items. + +Be attention, do not confuse backend with backend type. For example, a backend named `s3_prod` uses `s3` as its storage, the `s3_prod` is the backend, while the `s3` is the backend type. + +There are four configuration modes: + +- setting a whole backend +- setting a backend type +- setting a whole set of backend config items +- setting a backend config item + +A unique backend name is required to do the configuration. Take `s3` type backend with name `s3_prod` for an example to explain how these modes work. + +### Setting a Whole Backend + +The key to configure a whole backend is `backends.${name}`, whose value must be the JSON marshal result in a specified format, which is determined by the backend type. Enclosing the value in single quotation marks is a good choice, which can keep the format correct. + +```shell +# set a whole backend +kusion config set backends.s3_prod '{"type":"s3","configs":{"bucket":"kusion"}}' +``` + +### Setting a Backend Type + +The key to set a backend type is `backends.${name}.type`, whose value must be `local`, `oss` or `s3`. + +```shell +# set a backend type +kusion config set backends.s3_prod.type s3 +``` + +### Setting a Whole Set of Backend Config Items + +The key to set a whole set of backend config items is `backends.${name}.configs`, whose value must be the JSON marshal result in a specified format, which is determined by the backend type. The backend config must be set after the backend type, and corresponds to the backend type. + +```shell +# set a whole backend config +kusion config set backends.s3_prod.configs '{"bucket":"kusion"}' +``` + +### Setting a Backend Config Item + +The key to set a backend config item is `backends.${name}.configs.${item}`. The item name and value type both depend on the backend type. Like the whole backend config, the config item must be valid and set after the backend type. + +```shell +# set a backend config item +kusion config set backends.s3_prod.configs.bucket kusion +``` + +When executing `kusion config set`, the configuration will be stored in a local file. For security reason, the environment variables are supported to configure some config items, such as `password`, `accessKeyID`, `accessKeySecret`. Using environment variables rather than `kusion config` set to set sensitive data is the best practice. If both configured, the environment variables have higher priority. For details about the supported environment variables, please see above. + +Kusion has a default backend with `local` type and the path is `$KUSION_HOME`, whose name is exactly `default`. The `default` backend is forbidden to modification, that is setting or unsetting the default backend is not allowed. Besides, the keyword `current` is also used by Kusion itself, please do not use it as the backend name. + +## Unsetting a Backend + +When a backend is not in use, or the configuration is out of date, use the command `kusion config unset ${key}` to unset a backend or a specified config item. Same as the setting, there are also four modes of unsetting. + +- unsetting a whole backend +- unsetting a backend type +- unsetting a whole set of backend config items +- unsetting a backend config item + +When unsetting a whole backend, the backend must not be the current backend. When unsetting the backend type, the config items must be empty and the backend not be the current. + +Unsetting the `default` backend is forbidden. + +## Setting the Current Backend + +In order not to specify backend for every operation command. Kusion provides the mechanism of setting current backend, then the current workspace will be use by default. This is very useful when you execute a series of Kusion operation commands, for they usually use the same backend. + +Use the command `kusion config set backends.current ${name}` to set the current backend, where the `name` must be the already set backend. + +```shell +# set the current workspace +kusion config set backends.current s3_prod +``` + +Setting the current backend to `default` is legal. Actually, if there is no backend related configuration, the current backend is the `default` backend. + +## Getting Backend Configuration + +Use the command `kusion config get ${key}` to get a whole backend configuration or a specified backend config item. The `key` is same as setting and unsetting operation, the whole list can be found in the [Configuration](configuration). + +```shell +# get a whole backend +kusion config get backends.s3_prod + +# get a specified config item +kusion config get backends.s3_prod.configs.bucekt +``` + +Besides, the command `kusion config list` can also be used, which returns the whole kusion configuration, while the backend configuration is included. + +```shell +# get the whole Kusion configuration +kusion config list +``` + +## Using Backend + +The backend is used to store Workspace and Release. Thus, the following commands use the backend, shown as below. + +- subcommands of `kusion workspace`: use to store the Workspace; +- `kusion apply`, `kusion destroy`: use to store the Release; + +For all the commands above, the flag `--backend` is provided to specify the backend, or using the current backend. When using backend, you usually need to specify the sensitive data by environment variables. The example is shown below. + +```shell +# set environment variables of sensitive and other necessary data +export AWS_REGION="${s3_region}" +export AWS_ACCESS_KEY_ID="${s3_access_key_id}" +export AWS_SECRET_ACCESS_KEY="${s3_access_key_secret}" + +# use current backend +kusion apply + +# use a specified backend +kusion apply --backend s3_prod +``` diff --git a/docs_versioned_docs/version-v0.12/3-concepts/8-configuration.md b/docs_versioned_docs/version-v0.12/3-concepts/8-configuration.md new file mode 100644 index 00000000..2ad72e6b --- /dev/null +++ b/docs_versioned_docs/version-v0.12/3-concepts/8-configuration.md @@ -0,0 +1,114 @@ +--- +id: configuration +sidebar_label: Configuration +--- + +# Configuration + +Kusion can be configured with some global settings, which are separate from the AppConfiguration written by the application developers and the workspace configurations written by the platform engineers. + +The configurations are only relevant to the Kusion itself, and can be managed by command `kusion config`. The configuration items are specified, which are in the hierarchical format with full stop for segmentation, such as `backends.current`. For now, only the backend configurations are included. + +The configuration is stored in the file `${KUSION_HOME}/config.yaml`. For sensitive data, such as password, access key id and secret, setting them in the configuration file is not recommended, using the corresponding environment variables is safer. + +## Configuration Management + +Kusion provides the command `kusion config`, and its sub-commands `get`, `list`, `set`, `unset` to manage the configuration. The usages are shown as below: + +### Get a Specified Configuration Item + +Use `kusion config get` to get the value of a specified configuration item, only the registered item can be obtained correctly. The example is as below. + +```shell +# get a configuration item +kusion config get backends.current +``` + +### List the Configuration Items + +Use `kusion config list` to list all the Kusion configurations, where the result is in the YAML format. The example is as below. + +```shell +# list all the Kusion configurations +kusion config list +``` + +### Set a Specified Configuration Item + +Use `kusion config set` to set the value of a specified configuration item, where the type of the value of is also determinate. Kusion supports `string`, `int`, `bool`, `array` and `map` as the value type, which should be conveyed in the following format through CLI. + +- `string`: the original format, such as `local-dev`, `oss-pre`; +- `int`: convert to string, such as `3306`, `80`; +- `bool`: convert to string, only support `true` and `false`; +- `array`: convert to string with JSON marshal, such as `'["s3","oss"]'`. To preserve the format, enclosing the string content in single quotes is a good idea, or there may be unexpected errors; +- `map`: convert to string with JSON marshal, such as `'{"path":"\etc"}'`. + +Besides the type, some configuration items have more setting requirements. The configuration item dependency may exist, that is, a configuration item must be set after another item. And there may exist more restrictions for the configuration values themselves. For example, the valid keys for the map type value, the data range for the int type value. For detailed configuration item information, please refer to the following content of this article. + +The example of setting configuration item is as blow. + +```shell +# set a configuration item of type string +kusion config set backends.pre.type s3 + +# set a configuration item of type map +kusion config set backends.prod `{"configs":{"bucket":"kusion"},"type":"s3"}` +``` + +### Unset a Specified Configuration Item + +Use `kusion config unset` to unset a specified configuration item. Be attention, some items have dependencies, which must be unset in a correct order. The example is as below. + +```shell +# unset a specified configuration item +kusion config unset backends.pre +``` + +## Backend Configurations + +The backend configurations define the place to store Workspace, Spec and State files. Multiple backends and current backend are supported to set. + +### Available Configuration Items + +- **backends.current**: type `string`, the current used backend name. It can be set as the configured backend name. If not set, the default local backend will be used. +- **backends.${name}**: type `map`, a total backend configuration, contains type and config items, whose format is as below. It can be unset when the backend is not the current. +```yaml +{ + "type": "${backend_type}", # type string, required, support local, oss, s3. + "configs": ${backend_configs} # type map, optional for type local, required for the others, the specific keys depend on the type, refer to the description of backends.${name}.configs. +} +``` +- **backends.${name}.type**: type `string`, the backend type, support `local`, `s3` and `oss`. It can be unset when the backend is not the current, and the corresponding `backends.${name}.configs` are empty. +- **backends.${name}.configs**: type `map`, the backend config items, whose format depends on the backend type and is as below. It must be set after `backends.${name}.type`. +```yaml +# type local +{ + "path": "${local_path}" # type string, optional, the directory to store the files. If not set, use the default path ${KUSION_HOME}. +} + +# type oss +{ + "endpoint": "${oss_endpoint}", # type string, required, the oss endpoint. + "accessKeyID": "${oss_access_key_id}", # type string, optional, the oss access key id, which can be also obtained by environment variable OSS_ACCESS_KEY_ID. + "accessKeySecret": "${oss_access_key_secret}", # type string, optional, the oss access key secret, which can be also obtained by environment variable OSS_ACCESS_KEY_SECRET + "bucket": "${oss_bucket}", # type string, required, the oss bucket. + "prefix": "${oss_prefix}" # type string, optional, the prefix to store the files. +} + + # type s3 +{ + "region": "${s3_region}", # type string, optional, the aws region, which can be also obtained by environment variables AWS_REGION and AWS_DEFAULT_REGION. + "endpoint": "${s3_endpoint}", # type string, optional, the aws endpoint. + "accessKeyID": "${s3_access_key_id}", # type string, optional, the aws access key id, which can be also obtained by environment variable AWS_ACCESS_KEY_ID. + "accessKeySecret": "${s3_access_key_secret}", # type string, optional, the aws access key secret, which can be also obtained by environment variable AWS_SECRET_ACCESS_KEY + "bucket": "${s3_bucket}", # type string, required, the s3 bucket. + "prefix": "${s3_prefix}" # type string, optional, the prefix to store the files. +} +``` +- **backends.${name}.configs.path**: type `string`, the path of local type backend. It must be set after `backends.${name}.type` and which must be `local`. +- **backends.${name}.configs.endpoint**: type `string`, the endpoint of oss or s3 type backend. It must be set after `backends.${name}.type` and which must be `oss` or `s3`. +- **backends.${name}.configs.accessKeyID**: type `string`, the access key id of oss or s3 type backend. It must be set after `backends.${name}.type` and which must be `oss` or `s3`. For `oss`, it can be also obtained by environment variable `OSS_ACCESS_KEY_ID`; while for s3, it is `AWS_ACCESS_KEY_ID`. +- **backends.${name}.configs.accessKeySecret**: type `string`, the access key secret of oss or s3 type backend. It must be set after `backends.${name}.type` and which must be `oss` or `s3`. For `oss`, it can be also obtained by environment variable `OSS_ACCESS_KEY_SECRET`; while for s3, it is `AWS_SECRET_ACCESS_KEY`. +- **backends.${name}.configs.bucket**: type `string`, the bucket of oss or s3 type backend. It must be set after `backends.${name}.type` and which must be `oss` or `s3`. +- **backends.${name}.configs.prefix**: type `string`, the prefix to store the files of oss or s3 type backend. It must be set after `backends.${name}.type` and which must be `oss` or `s3`. +- **backends.${name}.configs.region**: type `string`, the aws region of s3 type backend. It must be set after `backends.${name}.type` and which must be `s3`. It can be also obtained by environment variables `AWS_REGION` and `AWS_DEFAULT_REGION`, where the former is priority. diff --git a/docs_versioned_docs/version-v0.12/3-concepts/9-release.md b/docs_versioned_docs/version-v0.12/3-concepts/9-release.md new file mode 100644 index 00000000..fd96b5b0 --- /dev/null +++ b/docs_versioned_docs/version-v0.12/3-concepts/9-release.md @@ -0,0 +1,22 @@ +--- +id: release +sidebar_label: Releases +--- + +# Release + +Release is used to indicate a single operation, triggered by `kusion apply` and `kusion destroy`, providing users with a more coherent and consistent operation experience with Kusion. Release also provides audit and rollback capabilities, which is currently under development. + +Every time `kusion apply` or `kusion destroy` is executed, it will trigger the generation of a `release` file. The combination of a `project` and `workspace` corresponds to a set of `release` files, which also relates to a set of the real application resources. The `release` file is stored in the same `backend` as the `workspace`, and the default path is `$HOME/.kusion/releases/$PROJECT_NAME/$WORKSPACE_NAME`, whose revision starts from 1 and increments. + +The release file contains the [Spec](./6-spec.md) and [State](./9-release.md#state) of an application, both of which are composed of `Resources`, representing the expected description from the configuration code and the actual state of the resources respectively. In addition, the release file also contains the information of creation and modification time, operation phase, and application metadata, etc. + +### State + +State is a record of an operation's result. It is a mapping between resources managed by Kusion and the actual infra resources. State is often used as a data source for three-way merge/diff in operations like `Apply` and `Preview`. + +State can be stored in many storage [backend](./7-backend.md) mediums like filesystems, S3, and OSS, etc. + +## Concurrency Control + +Release supports collaboration among multiple users and implements the concurrency control through operation `phase`. When the field of `phase` in the release file is not `succeeded` or `failed`, kusion will not be able to execute `kusion apply` or `kusion destroy` operation to the corresponding stack. For example, if a user unexpectedly exits during the `kusion apply` or `kusion destroy` process, the `phase` of the release file may be kept as `applying` or `destroying`. In this case, the user can use the command of `kusion release unlock` to unlock the release file for a specified application and workspace, setting the `phase` to `failed`. diff --git a/docs_versioned_docs/version-v0.12/3-concepts/_category_.json b/docs_versioned_docs/version-v0.12/3-concepts/_category_.json new file mode 100644 index 00000000..bccddbf1 --- /dev/null +++ b/docs_versioned_docs/version-v0.12/3-concepts/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Concepts" +} diff --git a/docs_versioned_docs/version-v0.12/4-configuration-walkthrough/1-overview.md b/docs_versioned_docs/version-v0.12/4-configuration-walkthrough/1-overview.md new file mode 100644 index 00000000..e7339ec9 --- /dev/null +++ b/docs_versioned_docs/version-v0.12/4-configuration-walkthrough/1-overview.md @@ -0,0 +1,223 @@ +--- +id: overview +--- + +# Configuration File Overview + +Kusion consumes one or more declarative configuration files (written in KCL) that describe the application, and delivers intent to the target runtime including Kubernetes, clouds, or on-prem infrastructure. + +This documentation series walks you through the odds and ends of managing such configuration files. + +## Table of Content + +- [Configuration File Overview](#configuration-file-overview) + - [Table of Content](#table-of-content) + - [Directory Structure](#directory-structure) + - [AppConfiguration Model](#appconfiguration-model) + - [Authoring Configuration Files](#authoring-configuration-files) + - [Identifying KCL file](#identifying-kcl-file) + - [KCL Schemas and KAM](#kcl-schemas-and-kam) + - [Kusion Modules](#kusion-modules) + - [Import Statements](#import-statements) + - [Understanding kcl.mod](#understanding-kclmod) + - [Building Blocks](#building-blocks) + - [Instantiating an application](#instantiating-an-application) + - [Using `kusion init`](#using-kusion-init) + - [Using references](#using-references) + +## Directory Structure + +Kusion expects the configuration file to be placed in a certain directory structure because it might need some metadata (that is not stored in the application configuration itself) in order to proceed. + +:::info + +See [Project](../concepts/project/overview) and [Stack](../concepts/stack/overview) for more details about Project and Stack. +::: + +A sample multi-stack directory structure looks like the following: +``` +~/playground$ tree multi-stack-project/ +multi-stack-project/ +├── README.md +├── base +│   └── base.k +├── dev +│   ├── kcl.mod +│   ├── main.k +│   └── stack.yaml +├── prod +│   ├── kcl.mod +│   ├── main.k +│   └── stack.yaml +└── project.yaml +``` + +In general, the directory structure follows a hierarchy where the top-level is the project configurations, and the sub-directories represent stack-level configurations. + +You may notice there is a `base` directory besides all the stacks. The `base` directory is not mandatory, but rather a place to store common configurations between different stacks. A common pattern we observed is to use stacks to represent different stages (dev, stage, prod, etc.) in the software development lifecycle, and/or different deployment targets (azure-eastus, aws-us-east-1, etc). A project can have as many stacks as needed. + +In practice, the applications deployed into dev and prod might very likely end up with a similar set of configurations except a few fields such as the application image (dev might be on newer versions), resource requirements (prod might require more resources), etc. + +As a general best practice, we recommend managing the common configurations in `base.k` as much as possible to minimize duplicate code. We will cover how override works in [Base and Override](base-override). + +## AppConfiguration Model + +`AppConfiguration` is the out-of-the-box model we build that describes an application. It serves as the declarative intent for a given application. + +The schema for `AppConfiguration` is defined in the [KusionStack/kam](https://github.com/KusionStack/kam/blob/main/v1/app_configuration.k) repository. It is designed as a unified, application-centric model that encapsulates the comprehensive configuration details and in the meantime, hides the complexity of the infrastructure as much as possible. + +`AppConfiguration` consists of multiple sub-components that each represent either the application workload itself, its dependencies (in the form of [Kusion Modules](../concepts/module/overview)), relevant workflows or operational expectations. We will deep dive into the details on how to author each of these elements in this upcoming documentation series. + +For more details on the `AppConfiguration`, please refer to the [design documentation](../concepts/app-configuration). + +## Authoring Configuration Files + +[KCL](https://kcl-lang.io/) is the choice of configuration language consumed by Kusion. KCL is an open-source constraint-based record and functional language. KCL works well with a large number of complex configurations via modern programming language technology and practice, and is committed to provide better modularity, scalability, stability and extensibility. + +### Identifying KCL file + +KCL files are identified with `.k` suffix in the filename. + +### KCL Schemas and KAM + +Similar to most modern General Programming Languages (GPLs), KCL provide packages that are used to organize collections of related KCL source files into modular and re-usable units. + +In the context of Kusion, we abstracted a core set of KCL Schemas (such as the aforementioned `AppConfiguration`, `Workload`, `Container`, etc)that represent the concepts that we believe that are relatively universal and developer-friendly, also known as [Kusion Application Model](https://github.com/KusionStack/kam), or KAM. + +### Kusion Modules + +To extend the capabilities beyond the core KAM model, we use a concept known as [Kusion Modules](../concepts/module/overview) to define components that could best abstract the capabilities during an application delivery. We provide a collection of official out-of-the-box Kusion Modules that represents the most common capabilities. They are maintained in [KusionStack's GitHub container registry](https://github.com/orgs/KusionStack/packages). When authoring an application configuration file, you can simply declare said Kusion Modules as dependencies and import them to declare ship-time capabilities that the application requires. + +If the modules in the KusionStack container registry does not meet the needs of your applications, Kusion provides the necessary mechanisms to extend with custom-built Kusion Modules. You can always create and publish your own module, then import the new module in your application configuration written in KCL. + +For the steps to develop your own module, please refer to the Module developer guide. + +### Import Statements + +An example of the import looks like the following: +``` +### import from the official kam package +import kam.v1.app_configuration as ac + +### import kusion modules +import service +import service.container as c +import monitoring as m +import network as n +``` + +Take `import kam.v1.app_configuration as ac` as an example, the `.v1.app_configuration` part after `import kam` represents the relative path of a specific schema to import. In this case, the `AppConfiguration` schema is defined under `v1/app_configuration` directory in the `kam` package. + +### Understanding kcl.mod + +Much similar to the concept of `go.mod`, Kusion uses `kcl.mod` as the source of truth to manage metadata (such as package name, dependencies, etc.) for the current package. Kusion will also auto-generate a `kcl.mod.lock` as the dependency lock file. + +The most common usage for `kcl.mod` is to manage the dependency of your application configuration file. + +:::info + +Please note this `kcl.mod` will be automatically generated if you are using `kusion init` to initialize a project with a template. You will only need to modify this file if you are modifying the project metadata outside the initialization process, such as upgrading the dependency version or adding a new dependency altogether, etc. +:::info + +There are 3 sections in a `kcl.mod` file: +- `package`, representing the metadata for the current package. +- `dependencies`, describing the packages the current package depends on. Supports referencing either a git repository or an OCI artifact. +- `profile`, defining the behavior for Kusion. In the example below, it describes the list of files Kusion should look for when parsing the application configuration. + +An example of `kcl.mod`: +``` +[package] +name = "multi-stack-project" +edition = "0.5.0" +version = "0.1.0" + +[dependencies] +monitoring = { oci = "oci://ghcr.io/kusionstack/monitoring", tag = "0.1.0" } +kam = { git = "https://github.com/KusionStack/kam.git", tag = "0.1.0" } +# Uncomment the line below to use your own modified module +# my-module = { oci = "oci://ghcr.io/my-repository/my-package", tag = "my-version" } + +[profile] +entries = ["../base/base.k", "main.k"] +``` + +### Building Blocks + +Configuration files consist of building blocks that are made of instances of schemas. An `AppConfiguration` instance consists of several child schemas, most of which are optional. The only mandatory one is the `workload` instance. We will take a closer look in the [workload walkthrough](workload). The order of the building blocks does NOT matter. + +The major building blocks as of version `0.12.0`: +``` +myapp: ac.AppConfiguration { + workload: service.Service { + containers: { + "myapp": c.Container {} + ... + } + secrets: {} + ... + } + # optional dependencies, usually expressed in kusion modules + accessories: { + ... + } + ... +} +``` + +We will deep dive into each one of the building blocks in this documentation series. + +### Instantiating an application + +In Kusion's out-of-the-box experience, an application is identified with an instance of `AppConfiguration`. You may have more than one application in the same project or stack. + +Here's an example of a configuration that can be consumed by Kusion (assuming it is placed inside the proper directory structure that includes project and stack configurations, with a `kcl.mod` present): + +``` +import kam.v1.app_configuration as ac +import service +import service.container as c +import network as n + +gocity: ac.AppConfiguration { + workload: service.Service { + containers: { + "gocity": c.Container { + image = "howieyuen/gocity:latest" + resources: { + "cpu": "500m" + "memory": "512Mi" + } + } + } + replicas: 1 + } + accessories: { + "network": n.Network { + ports: [ + n.Port { + port: 80 + public: True + } + ] + } + } +} +``` + +Don't worry about what `workload` or `n.Network` stand for at the moment. We will deep dive into each one of them in this upcoming documentation series. + +### Using `kusion init` + +Kusion offers a `kusion init` sub-command which initializes a new project using a pre-built template, which saves you from the hassle of manually building the aforementioned directory structure that Kusion expects. + +There is a built-in template `quickstart` in the Kusion binary that can be used offline. + +The pre-built templates are meant to help you get off the ground quickly with some simple out-of-the-box examples. You can refer to the [QuickStart documentation](../getting-started/deliver-quickstart) for some step-by-step tutorials. + +### Using references + +The reference documentation for the `kam` package and the official Kusion Modules is located in [Reference](../reference/modules/developer-schemas/app-configuration). + +If you are using them out of the box, the reference documentation provides a comprehensive view for each schema involved, including all the attribute names and description, their types, default value if any, and whether a particular attribute is required or not. There will also be an example attached to each schema reference. + +We will also deep dive into some common examples in the upcoming sections. \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.12/4-configuration-walkthrough/2-kcl-basics.md b/docs_versioned_docs/version-v0.12/4-configuration-walkthrough/2-kcl-basics.md new file mode 100644 index 00000000..aaa80366 --- /dev/null +++ b/docs_versioned_docs/version-v0.12/4-configuration-walkthrough/2-kcl-basics.md @@ -0,0 +1,144 @@ +--- +id: kcl-basics +--- + +# KCL Basics + +## Table of Content +- [Variable assignments](#variable-assignments) +- [Common built-in types](#common-built-in-types) +- [Lists and maps](#lists-and-maps) +- [Conditional statements](#conditional-statements) +- [The : and = operator](#the--and--operator) +- [Advanced KCL capabilities](#advanced-kcl-capabilities) + +[KCL](https://kcl-lang.io/) is the choice of configuration language consumed by Kusion. KCL is an open source constraint-based record and functional language. KCL works well with a large number of complex configurations via modern programming language technology and practice, and is committed to provide better modularity, scalability, stability and extensibility. + +## Variable assignments + +There are two ways to initialize a variable in KCL. You can either use the `:` operator or the `=` operator. We will discuss the difference between them in [this section later](#the--and--operator). + +Here are the two ways to create a variable and initialize it: +``` +foo = "Foo" # Declare a variable named `foo` and its value is a string literal "Foo" +bar: "Bar" # Declare a variable named `bar` and its value is a string literal "Bar" +``` + +You will be able to override a variable assignment via the `=` operator. We will discuss this in depth in the [`:` and `=` operator section](#the--and--operator). + +## Common built-in types + +KCL supports `int`, `float`, `bool` and `string` as the built-in types. + +Other types are defined in the packages that are imported into the application configuration files. One such example would be the `AppConfiguration` object (or `Container`, `Probe`, `Port` object, etc) that are defined in the `kam` repository. + +## Lists and maps + +Lists are represented using the `[]` notation. +An example of lists: +``` +list0 = [1, 2, 3] +list1 = [4, 5, 6] +joined_list = list0 + list1 # [1, 2, 3, 4, 5, 6] +``` + +Maps are represented using the `{}` notation. +An example of maps: +``` +a = {"one" = 1, "two" = 2, "three" = 3} +b = {'one' = 1, 'two' = 2, 'three' = 3} +assert a == b # True +assert len(a) == 3 # True +``` + +## Conditional statements +You can also use basic control flow statements when writing the configuration file. + +An example that sets the value of `replicas` conditionally based on the value of `containers.myapp.resources.cpu`: +``` +import kam.v1.app_configuration as ac +import service +import service.container as c + +myapp: ac.AppConfiguration { + workload: service.Service { + containers: { + "myapp": c.Container { + image: "" + resources: { + "cpu": "500m" + "memory": "512Mi" + } + } + } + replicas: 1 if containers.myapp.resources.cpu == "500m" else 2 + } +} +``` + +For more details on KCL's control flow statements, please refer to the [KCL documentation](https://kcl-lang.io/docs/reference/lang/tour#control-flow-statements). + +## The `:` and `=` operator + +You might have noticed there is a mixed usage of the `:` and `=` in the samples above. + +:::info + +**TLDR: The recommendation is to use `:` in the common configurations, and `=` for override in the environment-specific configurations.** +::: + +In KCL: +- `:` represents a union-ed value assignment. In the pattern `identifier: E` or `identifier: T E`, the value of the expression `E` with optional type annotation `T` will be merged and union-ed into the element value. +- `=` represents a value override. In the pattern `identifier = E` or `identifier = T E`, The value of the expression `E` with optional type annotation `T` will override the `identifier` attribute value. + +Let's take a look at an example: +``` +# This is one configuration that will be merged. +config: Config { + data.d1 = 1 +} +# This is another configuration that will be merged. +config: Config { + data.d2 = 2 +} +``` + +The above is equivalent to the snippet below since the two expressions for `config` get merged/union-ed into one: +``` +config: Config { + data.d1 = 1 + data.d2 = 1 +} +``` + +whereas using the `=` operators will result in a different outcome: +``` +# This is first configuration. +config = Config { + data.d1 = 1 +} +# This is second configuration that will override the prior one. +config = Config { + data.d2 = 2 +} +``` + +The config above results in: +``` +config: Config { + data.d2 = 2 +} +``` + +Please note that the `:` attribute operator represents an idempotent merge operation, and an error will be thrown when the values that need to be merged conflict with each other. + +``` +data0 = {id: 1} | {id: 2} # Error:conflicting values between {'id': 2} and {'id': 1} +data1 = {id: 1} | {id = 2} # Ok, the value of `data` is {"id": 2} +``` + +More about `:` and `=` operator can be found in the [KCL documentation](https://kcl-lang.io/docs/reference/lang/tour#config-operations). + +## Advanced KCL capabilities + +For more advanced KCL capabilities, please visit the [KCL website](https://kcl-lang.io/docs/user_docs/support/faq-kcl). \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.12/4-configuration-walkthrough/3-base-override.md b/docs_versioned_docs/version-v0.12/4-configuration-walkthrough/3-base-override.md new file mode 100644 index 00000000..f14af112 --- /dev/null +++ b/docs_versioned_docs/version-v0.12/4-configuration-walkthrough/3-base-override.md @@ -0,0 +1,94 @@ +--- +id: base-override +--- + +# Base and Override + +In practice, what we have observed for production-grade applications is that they usually need to be deployed to a wide range of different targets, be it different environments in the SDLC, or different clouds, regions or runtimes for cost/regulation/performance or disaster recovery related reasons. + +In that context, we advocate for a pattern where you can leverage some Kusion and KCL features to minimize the amount of duplicate configurations, by separating the common base application configuration and environment-specific ones. + +:::info + +The file names in the below examples don't matter as long as they are called out and appear in the correct order in the `entries` field (the field is a list) in `kcl.mod`. The files with common configurations should appear first in the list and stack-specific ones last. The latter one takes precedence. + +The configurations also don't have be placed into a single `.k` file. For complex projects, they can be broken down into smaller organized `.k` files for better readability. +::: + +Base configuration defined in `base/base.k`: +``` +import kam.v1.app_configuration as ac +import service +import service.container as c +import network.network as n + +myapp: ac.AppConfiguration { + workload: service.Service { + containers: { + "myapp": c.Container { + image: "" + resources: { + "cpu": "500m" + "memory": "512Mi" + } + } + } + replicas: 1 + } + accessories: { + "network": n.Network { + ports: [ + n.Port { + port: 80 + public: True + } + ] + } + } +} +``` + +Environment-specific configuration defined in `dev/main.k`: +``` +import kam.v1.app_configuration as ac +import service +import service.container as c + +# main.k declares customized configurations for dev stack. +myapp: ac.AppConfiguration { + workload: service.Service { + containers: { + "myapp": c.Container { + # dev stack has different app configuration from the base + image = "gcr.io/google-samples/gb-frontend:v5" + resources = { + "cpu": "250m" + "memory": "256Mi" + } + } + } + replicas = 2 + } +} +``` + +Alternatively, you could locate a specific property (in this case below, the `Container` object) in the `AppConfiguration` object using the dot selector shorthand(such as `workload.containers.myapp` or `workload.replicas` below): +``` +import kam.v1.app_configuration as ac + +# main.k declares customized configurations for dev stack. +myapp: ac.AppConfiguration { + workload.replicas = 2 + workload.containers.myapp: { + # dev stack has different app configuration + image = "gcr.io/google-samples/gb-frontend:v5" + resources = { + "cpu": "250m" + "memory": "256Mi" + } + } +} +``` +This is especially useful when the application configuration is complex but the override is relatively straightforward. + +The two examples above are equivalent when overriding the base. \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.12/4-configuration-walkthrough/4-workload.md b/docs_versioned_docs/version-v0.12/4-configuration-walkthrough/4-workload.md new file mode 100644 index 00000000..2b880df0 --- /dev/null +++ b/docs_versioned_docs/version-v0.12/4-configuration-walkthrough/4-workload.md @@ -0,0 +1,373 @@ +# Workload + +The `workload` attribute in the `AppConfiguration` instance is used to describe the specification for the application workload. The application workload generally represents the computing component for the application. + +A `workload` maps to an `AppConfiguration` instance 1:1. If there are more than one workload, they should be considered different applications. + +## Table of Content +- [Import](#import) +- [Types of workloads](#types-of-workloads) +- [Configure containers](#configure-containers) + - [Application image](#application-image) + - [Resource Requirements](#resource-requirements) + - [Health Probes](#health-probes) + - [Lifecycle Hooks](#lifecycle-hooks) + - [Create Files](#create-files) + - [Customize container initialization](#customize-container-initialization) +- [Configure Replicas](#configure-replicas) +- [Differences between Service and Job](#differences-between-service-and-job) +- [Workload References](#workload-references) + +## Import + +In the examples below, we are using schemas defined in the `catalog` package. For more details on KCL package import, please refer to the [Configuration File Overview](overview). + +The `import` statements needed for the following walkthrough: +``` +import kam.v1.app_configuration as ac +import service +import service.container as c +import service.container.probe as p +import service.container.lifecycle as lc +``` + +## Types of Workloads + +There are currently two types of workloads: + +- `Service`, representing a long-running, scalable workload type that should "never" go down and respond to short-lived latency-sensitive requests. This workload type is commonly used for web applications and services that expose APIs. +- `Job`, representing batch tasks that take from a few seconds to days to complete and then stop. These are commonly used for batch processing that is less sensitive to short-term performance fluctuations. + +To instantiate a `Service`: +``` +import kam.v1.app_configuration as ac +import service +import service.container as c + +myapp: ac.AppConfiguration { + workload: service.Service {} +} +``` + +To instantiate a `Job`: +``` +import kam.v1.app_configuration as ac +import job +import job.container as c + +myapp: ac.AppConfiguration { + workload: job.Job {} +} +``` + +Of course, the `AppConfiguration` instances above is not sufficient to describe an application. We still need to provide more details in the `workload` section. + +## Configure containers + +Kusion is built on top of cloud-native philosophies. One of which is that applications should run as loosely coupled microservices on abstract and self-contained software units, such as containers. + +The `containers` attribute in a workload instance is used to define the behavior for the containers that run application workload. The `containers` attribute is a map, from the name of the container to the `catalog.models.schema.v1.workload.container.Container` Object which includes the container configurations. + +:::info + +The name of the container is in the context of the configuration file, so you could refer to it later. It's not referring to the name of the container in the Kubernetes cluster (or any other runtime). +::: + +Everything defined in the `containers` attribute is considered an application container, as opposed to a sidecar container. Sidecar containers will be introduced in a different attribute in a future version. + +In most of the cases, only one application container is needed. Ideally, we recommend mapping an `AppConfiguration` instance to a microservice in the microservice terminology. + +We will walk through the details of configuring a container using an example of the `Service` type. + +To add an application container: +``` +import kam.v1.app_configuration as ac +import service +import service.container as c + +myapp: ac.AppConfiguration { + workload: service.Service { + containers: { + "myapp": c.Container {} + } + } +} +``` + +### Application image + +The `image` attribute in the `Container` schema specifies the application image to run. This is the only required field in the `Container` schema. + +To specify an application image: +``` +import kam.v1.app_configuration as ac +import service +import service.container as c + +myapp: ac.AppConfiguration { + workload: service.Service { + containers: { + "myapp": c.Container { + image: "gcr.io/google-samples/gb-frontend:v5" + } + # ... + } + } +} +``` + +### Resource Requirements + +The `resources` attribute in the `Container` schema specifies the application resource requirements such as cpu and memory. + +You can specify an upper limit (which maps to resource limits only) or a range as the resource requirements (which maps to resource requests and limits in Kubernetes). + +To specify an upper bound (only resource limits): +``` +import kam.v1.app_configuration as ac +import service +import service.container as c + +myapp: ac.AppConfiguration { + workload: service.Service { + containers: { + "myapp": c.Container { + image: "gcr.io/google-samples/gb-frontend:v5" + resources: { + "cpu": "500m" + "memory": "512Mi" + } + # ... + } + } + } +} +``` + +To specify a range (both resource requests and limits): +``` +import kam.v1.app_configuration as ac +import service +import service.container as c + +myapp: ac.AppConfiguration { + workload: service.Service { + containers: { + "myapp": c.Container { + image: "gcr.io/google-samples/gb-frontend:v5" + # Sets requests to cpu=250m and memory=256Mi + # Sets limits to cpu=500m and memory=512Mi + resources: { + "cpu": "250m-500m" + "memory": "256Mi-512Mi" + } + # ... + } + } + } +} +``` + +### Health Probes + +There are three types of `Probe` defined in a `Container`: + +- `livenessProbe` - used to determine if the container is healthy and running +- `readinessProbe` - used to determine if the container is ready to accept traffic +- `startupProbe` - used to determine if the container has started properly. Liveness and readiness probes don't start until `startupProbe` succeeds. Commonly used for containers that takes a while to start + +The probes are optional. You can only have one Probe of each kind for a given `Container`. + +To configure a `Http` type `readinessProbe` that probes the health via HTTP request and a `Exec` type `livenessProbe` which executes a command: +``` +import kam.v1.app_configuration as ac +import service +import service.container as c + +myapp: ac.AppConfiguration { + workload: service.Service { + containers: { + "myapp": c.Container { + image: "gcr.io/google-samples/gb-frontend:v5" + # ... + # Configure an Http type readiness probe at /healthz + readinessProbe: p.Probe { + probeHandler: p.Http { + url: "/healthz" + } + initialDelaySeconds: 10 + timeoutSeconds: 5 + periodSeconds: 15 + successThreshold: 3 + failureThreshold: 1 + } + # Configure an Exec type liveness probe that executes probe.sh + livenessProbe: p.Probe { + probeHandler: p.Exec { + command: ["probe.sh"] + } + initialDelaySeconds: 10 + } + } + } + } +} +``` + +### Lifecycle Hooks + +You can also configure lifecycle hooks that triggers in response to container lifecycle events such as liveness/startup probe failure, preemption, resource contention, etc. + +There are two types that is currently supported: + +- `PreStop` - triggers before the container is terminated. +- `PostStart` - triggers after the container is initialized. + +``` +import kam.v1.app_configuration as ac +import service +import service.container as c + +myapp: ac.AppConfiguration { + workload: service.Service { + containers: { + "myapp": c.Container { + image: "gcr.io/google-samples/gb-frontend:v5" + # ... + # Configure lifecycle hooks + lifecycle: lc.Lifecycle { + # Configures an Exec type pre-stop hook that executes preStop.sh + preStop: p.Exec { + command: ["preStop.sh"] + } + # Configures an Http type pre-stop hook at /post-start + postStart: p.Http { + url: "/post-start" + } + } + } + } + } +} +``` + +### Create Files + +You can also create files on-demand during the container initialization. + +To create a custom file and mount it to `/home/admin/my-file` when the container starts: +``` +import kam.v1.app_configuration as ac +import service +import service.container as c + +myapp: ac.AppConfiguration { + workload: service.Service { + containers: { + "myapp": c.Container { + image: "gcr.io/google-samples/gb-frontend:v5" + } + # ... + # Creates a file during container startup + files: { + "/home/admin/my-file": c.FileSpec { + content: "some file contents" + mode: "0777" + } + } + } + } +} +``` + +### Customize container initialization + +You can also customize the container entrypoint via `command`, `args`, and `workingDir`. These should **most likely not be required**. In most of the cases, the entrypoint details should be baked into the application image itself. + +To customize the container entrypoint: +``` +import kam.v1.app_configuration as ac +import service +import service.container as c + +myapp: ac.AppConfiguration { + workload: service.Service { + containers: { + "myapp": c.Container { + image: "gcr.io/google-samples/gb-frontend:v5" + # ... + # This command will overwrite the entrypoint set in the image Dockerfile + command: ["/usr/local/bin/my-init-script.sh"] + # Extra arguments append to command defined above + args: [ + "--log-dir=/home/my-app/logs" + "--timeout=60s" + ] + # Run the command as defined above, in the directory "/tmp" + workingDir: "/tmp" + } + } + } +} +``` + +## Configure Replicas + +The `replicas` field in the `workload` instance describes the number of identical copies to run at the same time. It is generally recommended to have multiple replicas in production environments to eliminate any single point of failure. In Kubernetes, this corresponds to the `spec.replicas` field in the relevant workload manifests. + +To configure a workload to have a replica count of 3: +``` +import kam.v1.app_configuration as ac +import service +import service.container as c + +myapp: ac.AppConfiguration { + workload: service.Service { + containers: { + # ... + } + replicas: 3 + # ... + } + # ... +} +``` + +## Differences between Service and Job + +The two types of workloads, namely `Service` and `Job`, share a majority of the attributes with some minor differences. + +### Exposure + +A `Service` usually represents a long-running, scalable workload that responds to short-lived latency-sensitive requests and never go down. Hence, a `Service` has an additional attribute that determines how it is exposed and can be accessed. A `Job` does NOT have the option to be exposed. We will explore more in the [application networking walkthrough](networking). + +### Job Schedule + +A `Job` can be configured to run in a recurring manner. In this case, the job will have a cron-format schedule that represents its recurring schedule. + +To configure a job to run at 21:00 every night: +``` +import kam.v1.app_configuration as ac +import job +import job.container as c + +myjob: ac.AppConfiguration { + workload: job.Job { + containers: { + "busybox": c.Container { + image: "busybox:1.28" + # Run the following command as defined + command: ["/bin/sh", "-c", "echo hello"] + } + } + # Run every hour. + schedule: "0 * * * *" + } +} +``` + +## Workload References + +You can find workload references [here](../reference/modules/developer-schemas/workload/service). + +You can find workload schema source [here](https://github.com/KusionStack/catalog/tree/main/models/schema/v1/workload). \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.12/4-configuration-walkthrough/5-networking.md b/docs_versioned_docs/version-v0.12/4-configuration-walkthrough/5-networking.md new file mode 100644 index 00000000..adaa9904 --- /dev/null +++ b/docs_versioned_docs/version-v0.12/4-configuration-walkthrough/5-networking.md @@ -0,0 +1,174 @@ +--- +id: networking +--- + +# Application Networking + +In addition to configuring application's [container specifications](workload#configure-containers), you can also configure its networking behaviors, including how to expose the application and how it can be accessed. You can specify a `network` module in the `accessories` field in `AppConfiguration` to achieve that. + +In future versions, this will also include ingress-based routing strategy and DNS configurations. + +## Import + +In the examples below, we are using schemas defined in the `kam` package and the `network` Kusion Module. For more details on KCL package and module import, please refer to the [Configuration File Overview](overview). + +The `import` statements needed for the following walkthrough: +``` +import kam.v1.app_configuration as ac +import service +import service.container as c +import network as n +``` + +The `kcl.mod` must contain reference to the network module: +``` +#... + +[dependencies] +network = { oci = "oci://ghcr.io/kusionstack/network", tag = "0.2.0" } + +#... +``` + +## Private vs Public Access + +Private network access means the service can only be access from within the target cluster. + +Public access is implemented using public load balancers on the cloud. This generally requires a Kubernetes cluster that is running on the cloud with a vendor-specific service controller. + +Any ports defined default to private access unless explicitly specified. + +To expose port 80 to be accessed privately: +``` +import kam.v1.app_configuration as ac +import service +import service.container as c +import network as n + +myapp: ac.AppConfiguration { + workload: service.Service { + # ... + } + accessories: { + "network": n.Network { + ports: [ + n.Port { + port: 80 + } + ] + } + } +} +``` + +To expose port 80 to be accessed publicly: +``` +import kam.v1.app_configuration as ac +import service +import service.container as c +import network as n + +myapp: ac.AppConfiguration { + workload: service.Service { + # ... + } + accessories: { + "network": n.Network { + ports: [ + n.Port { + port: 80 + public: True + } + ] + } + } +} +``` + +:::info +The CSP (Cloud Service Provider) used to provide load balancers is defined by platform engineers in workspace. +::: + +## Mapping ports + +To expose a port `80` that maps to a different port `8088` on the container: +``` +import kam.v1.app_configuration as ac +import service +import service.container as c +import network as n + +myapp: ac.AppConfiguration { + workload: service.Service { + # ... + } + accessories: { + "network": n.Network { + ports: [ + n.Port { + port: 80 + targetPort: 8088 + } + ] + } + } +} +``` + +## Exposing multiple ports + +You can also expose multiple ports and configure them separately. + +To expose port 80 to be accessed publicly, and port 9099 for private access (to be scraped by Prometheus, for example): +``` +import kam.v1.app_configuration as ac +import service +import service.container as c +import network as n + +myapp: ac.AppConfiguration { + workload: service.Service { + # ... + } + accessories: { + "network": n.Network { + ports: [ + n.Port { + port: 80 + public: True + } + n.Port { + port: 9099 + } + ] + } + } +} +``` + +## Choosing protocol + +To expose a port using the `UDP` protocol: +``` +import kam.v1.app_configuration as ac +import service +import service.container as c +import network as n + +myapp: ac.AppConfiguration { + workload: service.Service { + # ... + } + accessories: { + "network": n.Network { + ports: [ + n.Port { + port: 80 + targetPort: 8088 + protocol: "UDP" + } + ] + } + } +} +``` \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.12/4-configuration-walkthrough/6-database.md b/docs_versioned_docs/version-v0.12/4-configuration-walkthrough/6-database.md new file mode 100644 index 00000000..6a8dedab --- /dev/null +++ b/docs_versioned_docs/version-v0.12/4-configuration-walkthrough/6-database.md @@ -0,0 +1,467 @@ +--- +id: databse +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Managed Databases + +You could also specify a database needed for the application. That can be achieved via a `mysql` or a `postgres` module (or bring-your-own-module) in the `accessories` field in `AppConfiguration` to achieve that. + +You can currently have several databases with **different database names** for an application at the same time. + +## Import + +In the examples below, we are using schemas defined in the `kam` package and the `mysql` Kusion Module. For more details on KCL package and module import, please refer to the [Configuration File Overview](./1-overview.md#configuration-file-overview). + +The `import` statements needed for the following walkthrough: +``` +import kam.v1.app_configuration as ac +import service +import service.container as c +import mysql +import postgres +``` + +The `kcl.mod` must contain reference to the `mysql` module or `postgres` module: +``` +#... + +[dependencies] +mysql = { oci = "oci://ghcr.io/kusionstack/mysql", tag = "0.2.0" } +postgres = { oci = "oci://ghcr.io/kusionstack/postgres", tag = "0.2.0" } +#... +``` + +## Types of Database offerings + +As of version 0.11.0, Kusion supports the following database offerings on the cloud: +- MySQL and PostgreSQL Relational Database Service (RDS) on [AWS](https://aws.amazon.com/rds/) +- MySQL and PostgreSQL Relational Database Service (RDS) on [AliCloud](https://www.alibabacloud.com/product/databases) + +More database types on more cloud vendors will be added in the future. + +Alternatively, Kusion also supports creating a database at `localhost` for local testing needs. A local database is quicker to stand up and easier to manage. It also eliminates the need for an account and any relevant costs with the cloud providers in the case that a local testing environment is sufficient. + +:::info +You do need a local Kubernetes cluster to run the local database workloads. You can refer to [Minikube](https://minikube.sigs.k8s.io/docs/start/) or [Kind](https://kind.sigs.k8s.io/docs/user/quick-start/) to get started. +To see an end-to-end use case for standing up a local testing environment including a local database, please refer to the [Kusion Quickstart](../2-getting-started/2-deliver-quickstart.md). +::: + +## Cloud Credentials and Permissions + +Kusion provisions databases on the cloud via [terraform](https://www.terraform.io/) providers. For it to create _any_ cloud resources, it requires a set of credentials that belongs to an account that has the appropriate write access so the terraform provider can be initialized properly. + +For AWS, the environment variables needed: +``` +export AWS_REGION=us-east-1 # replace it with your region +export AWS_ACCESS_KEY_ID="xxxxxxxxxxx" # replace it with your AccessKey +export AWS_SECRET_ACCESS_KEY="xxxxxxx" # replace it with your SecretKey +``` + +For AliCloud, the environment variables needed: +``` +export ALICLOUD_REGION=cn-shanghai # replace it with your region +export ALICLOUD_ACCESS_KEY="xxxxxxxxx" # replace it with your AccessKey +export ALICLOUD_SECRET_KEY="xxxxxxxxx" # replace it with your SecretKey +``` + +The user account that owns these credentials would need to have the proper permission policies attached to create databases and security groups. If you are using the cloud-managed policies, the policies needed to provision a database and configure firewall rules are listed below. + +For AWS: +- `AmazonVPCFullAccess` for creating and managing database firewall rules via security group +- `AmazonRDSFullAccess` for creating and managing RDS instances + +For AliCloud: +- `AliyunVPCFullAccess` for creating and managing database firewall rules via security group +- `AliyunRDSFullAccess` for creating and managing RDS instances + +Alternatively, you can use customer managed policies if the cloud provider built-in policies don't meet your needs. The list of permissions needed are in the [AmazonRDSFullAccess Policy Document](https://docs.aws.amazon.com/aws-managed-policy/latest/reference/AmazonRDSFullAccess.html#AmazonRDSFullAccess-json) and [AmazonVPCFullAccess Policy Document](https://docs.aws.amazon.com/aws-managed-policy/latest/reference/AmazonVPCFullAccess.html). It will most likely be a subset of the permissions in the policy documents. + +## Configure Database + +### Provision a Cloud Database + +Assuming the steps in the [Cloud Credentials and Permissions](#cloud-credentials-and-permissions) section is setup properly, you can now provision cloud databases via Kusion. + +#### AWS RDS Instance +To provision an AWS RDS instance with MySQL v8.0 or PostgreSQL v14.0, you can append the following YAML file to your own workspace configurations and update the corresponding workspace with command `kusion workspace update`. + + + + +```yaml +runtimes: + terraform: + random: + version: 3.5.1 + source: hashicorp/random + aws: + version: 5.0.1 + source: hashicorp/aws + region: us-east-1 # Please replace with your own aws provider region + +# MySQL configurations for AWS RDS +modules: + kusionstack/mysql@0.1.0: + default: + cloud: aws + size: 20 + instanceType: db.t3.micro + securityIPs: + - 0.0.0.0/0 + suffix: "-mysql" +``` + +```mdx-code-block + + +``` +```yaml +runtimes: + terraform: + random: + version: 3.5.1 + source: hashicorp/random + aws: + version: 5.0.1 + source: hashicorp/aws + region: us-east-1 # Please replace with your own aws provider region + +# PostgreSQL configurations for AWS RDS +modules: + kusionstack/postgres@0.1.0: + default: + cloud: aws + size: 20 + instanceType: db.t3.micro + securityIPs: + - 0.0.0.0/0 + suffix: "-postgres" +``` + +```mdx-code-block + + +``` + +For KCL configuration file declarations: + + + + +```python +wordpress: ac.AppConfiguration { + # ... + accessories: { + "mysql": mysql.MySQL { + type: "cloud" + version: "8.0" + } + } +} +``` + +```mdx-code-block + + +``` + +```python +pgadmin: ac.AppConfiguration { + # ... + accessories: { + "postgres": postgres.PostgreSQL { + type: "cloud" + version: "14.0" + } + } +} +``` + +```mdx-code-block + + +``` + +It's highly recommended to replace `0.0.0.0/0` and closely manage the whitelist of IPs that can access the database for security purposes. The `0.0.0.0/0` in the example above or if `securityIPs` is omitted altogether will allow connections from anywhere which would typically be a security bad practice. + +The `instanceType` field determines the computation and memory capacity of the RDS instance. The `db.t3.micro` instance type in the example above represents the `db.t3` instance class with a size of `micro`. In the same `db.t3` instance family there are also `db.t3.small`, `db.t3.medium`, `db.t3.2xlarge`, etc. + +The full list of supported `instanceType` values can be found [here](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html#Concepts.DBInstanceClass.Support). + +You can also adjust the storage capacity for the database instance by changing the `size` field which is storage size measured in gigabytes. The minimum is 20. More details can be found [here](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Storage.html#Concepts.Storage.GeneralSSD). + +#### AliCloud RDS Instance + +To provision an Alicloud RDS instance with MySQL or PostgreSQL, you can append the following YAML file to your own workspace configurations and update the corresponding workspace with command `kusion workspace update`. Note that AliCloud RDS has several additional fields such as `category`, `subnetID` and `privateRouting`: + + + + +```yaml +runtimes: + terraform: + random: + version: 3.5.1 + source: hashicorp/random + alicloud: + version: 1.209.1 + source: aliyun/alicloud + region: cn-beijing # Please replace with your own alicloud provider region + +# MySQL configurations for Alicloud RDS +modules: + kusionstack/mysql@0.1.0: + default: + cloud: alicloud + size: 20 + instanceType: mysql.n2.serverless.1c + category: serverless_basic + privateRouting: false + subnetID: [your-subnet-id] + securityIPs: + - 0.0.0.0/0 + suffix: "-mysql" +``` + +```mdx-code-block + + +``` +```yaml +runtimes: + terraform: + random: + version: 3.5.1 + source: hashicorp/random + alicloud: + version: 1.209.1 + source: aliyun/alicloud + region: cn-beijing # Please replace with your own alicloud provider region + +# PostgreSQL configurations for Alicloud RDS +modules: + kusionstack/postgres@0.1.0: + default: + cloud: alicloud + size: 20 + instanceType: pg.n2.serverless.1c + category: serverless_basic + privateRouting: false + subnetID: [your-subnet-id] + securityIPs: + - 0.0.0.0/0 + suffix: "-postgres" +``` + +```mdx-code-block + + +``` + +For KCL configuration file declarations: + + + + +```python +wordpress: ac.AppConfiguration { + # ... + accessories: { + "mysql": mysql.MySQL { + type: "cloud" + version: "8.0" + } + } +} +``` + +```mdx-code-block + + +``` + +```python +pgadmin: ac.AppConfiguration { + # ... + accessories: { + "postgres": postgres.PostgreSQL { + type: "cloud" + version: "14.0" + } + } +} +``` + +```mdx-code-block + + +``` + +We will walkthrough `subnetID` and `privateRouting` in the [Configure Network Access](#configure-network-access) section. + +The full list of supported `instanceType` values can be found in: +- [MySQL instance types(x86)](https://www.alibabacloud.com/help/en/rds/apsaradb-rds-for-mysql/primary-apsaradb-rds-for-mysql-instance-types#concept-2096487) +- [PostgreSQL instance types](https://www.alibabacloud.com/help/en/rds/apsaradb-rds-for-postgresql/primary-apsaradb-rds-for-postgresql-instance-types#concept-2096578) + +### Local Database + +To deploy a local database with MySQL v8.0 or PostgreSQL v14.0: + + + + +```python +wordpress: ac.AppConfiguration { + # ... + accessories: { + "mysql": mysql.MySQL { + type: "local" + version: "8.0" + } + } +} +``` + +```mdx-code-block + + +``` + +```python +pgadmin: ac.AppConfiguration { + # ... + accessories: { + "postgres": postgres.PostgreSQL { + type: "local" + version: "14.0" + } + } +} +``` + +```mdx-code-block + + +``` + +## Database Credentials + +There is no need to manage the database credentials manually. Kusion will automatically generate a random password, set it as the credential when creating the database, and then inject the hostname, username and password into the application runtime. + +You have the option to BYO (Bring Your Own) username for the database credential by specifying the `username` attribute in the `workspace.yaml`: +```yaml +modules: + kusionstack/mysql@0.1.0: + default: + # ... + username: "my_username" +``` + +You **cannot** bring your own password. The password will always be managed by Kusion automatically. + +The database credentials are injected into the environment variables of the application container. You can access them via the following env vars: +``` +# env | grep KUSION_DB +KUSION_DB_HOST_WORDPRESS_MYSQL=wordpress.xxxxxxxx.us-east-1.rds.amazonaws.com +KUSION_DB_USERNAME_WORDPRESS_MYSQL=xxxxxxxxx +KUSION_DB_PASSWORD_WORDPRESS_MYSQL=xxxxxxxxx +``` + +:::info +More details about the environment of database credentials injected by Kusion can be found at [mysql credentials and connectivity](../6-reference/2-modules/1-developer-schemas/database/mysql.md#credentials-and-connectivity) and [postgres credentials and connectivity](../6-reference/2-modules/1-developer-schemas/database/postgres.md#credentials-and-connectivity) +::: + +You can use these environment variables out of the box. Or most likely, your application might retrieve the connection details from a different set of environment variables. In that case, you can map the kusion environment variables to the ones expected by your application using the `$()` expression. + +This example below will assign the value of `KUSION_DB_HOST_WORDPRESS_MYSQL` into `WORDPRESS_DB_HOST`, `KUSION_DB_USERNAME_WORDPRESS_MYSQL` into `WORDPRESS_DB_USER`, likewise for `KUSION_DB_PASSWORD_WORDPRESS_MYSQL` and `WORDPRESS_DB_PASSWORD`: +``` +wordpress: ac.AppConfiguration { + workload: service.Service { + containers: { + wordpress: c.Container { + image = "wordpress:6.3-apache" + env: { + "WORDPRESS_DB_HOST": "$(KUSION_DB_HOST_WORDPRESS_MYSQL)" + "WORDPRESS_DB_USER": "$(KUSION_DB_USERNAME_WORDPRESS_MYSQL)" + "WORDPRESS_DB_PASSWORD": "$(KUSION_DB_PASSWORD_WORDPRESS_MYSQL)" + } + # ... + } + } + # ... + } + accessories: { + # ... + } +} +``` + +## Configure Network Access + +You can also optionally configure the network access to the database as part of the `AppConfiguration`. This is highly recommended because it dramatically increases the security posture of your cloud environment in the means of least privilege principle. + +The `securityIPs` field in the `Database` schema declares the list of network addresses that are allowed to access the database. The network addresses are in the [CIDR notation](https://aws.amazon.com/what-is/cidr/) and can be either a private IP range ([RFC-1918](https://datatracker.ietf.org/doc/html/rfc1918) and [RFC-6598](https://datatracker.ietf.org/doc/html/rfc6598) address) or a public one. + +If the database need to be accessed from a public location (which should most likely not be the case in a production environment), `securityIPs` need to include the public IP address of the traffic source (For instance, if the RDS database needs to be accessed from your computer). + +To configure AWS RDS to restrict network access from a VPC with a CIDR of `10.0.1.0/24` and a public IP of `103.192.227.125`: + +```yaml +modules: + kusionstack/mysql@0.1.0: + default: + cloud: aws + # ... + securityIPs: + - "10.0.1.0/24" + - "103.192.227.125/32" +``` + +Depending on the cloud provider, the default behavior of the database firewall settings may differ if omitted. + +### Subnet ID + +On AWS, you have the option to launch the RDS instance inside a specific VPC if a `subnetID` is present in the application configuration. By default, if `subnetID` is not provided, the RDS will be created in the default VPC for that account. However, the recommendation is to self-manage your VPCs to provider better isolation from a network security perspective. + +On AliCloud, the `subnetID` is required. The concept of subnet maps to VSwitch in AliCloud. + +To place the RDS instance into a specific VPC on Alicloud: + +```yaml +modules: + kusionstack/mysql@0.1.0: + default: + cloud: alicloud + # ... + subnetID: "subnet-xxxxxxxxxxxxxxxx" +``` + +### Private Routing + +There is an option to enforce private routing on certain cloud providers if both the workload and the database are running on the cloud. + +On AliCloud, you can set the `privateRouting` flag to `True`. The database host generated will be a private FQDN that is only resolvable and accessible from within the AliCloud VPCs. Setting `privateRouting` flag to `True` when `type` is `aws` is a no-op. + +To enforce private routing on AliCloud: + +```yaml +modules: + kusionstack/mysql@0.1.0: + default: + cloud: alicloud + # ... + privateRouting: true +``` + +Kusion will then generate a private FQDN and inject it into the application runtime as the environment variable `KUSION_DB_HOST_` for the application to use. A complete list of Kusion-managed environment variables for mysql database can be found [here](../6-reference/2-modules/1-developer-schemas/database/mysql.md#credentials-and-connectivity). + +Otherwise when using the public FQDN to connect to a database from the workload, the route will depend on cloud provider's routing preference. The options are generally either: +- Travel as far as possible on the cloud provider's global backbone network, or also referred to as cold potato routing, or +- Egress as early as possible to the public Internet and re-enter the cloud provider's datacenter later, or also referred to as hot potato routing + +The prior generally has better performance but is also more expensive. + +You can find a good read on the [AWS Blog](https://aws.amazon.com/blogs/architecture/internet-routing-and-traffic-engineering/) or the [Microsoft Learn](https://learn.microsoft.com/en-us/azure/virtual-network/ip-services/routing-preference-overview). \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.12/4-configuration-walkthrough/7-secret.md b/docs_versioned_docs/version-v0.12/4-configuration-walkthrough/7-secret.md new file mode 100644 index 00000000..db1d576e --- /dev/null +++ b/docs_versioned_docs/version-v0.12/4-configuration-walkthrough/7-secret.md @@ -0,0 +1,251 @@ +--- +id: secret +--- + +# Secrets + +Secrets are used to store sensitive data like passwords, API keys, TLS certificates, tokens, or other credentials. Kusion provides multiple secret types, and makes it easy to be consumed in containers. + +For application dependent cloud resources that are managed by Kusion, their credentials are automatically managed by Kusion (generated and injected into application runtime environment variable). You shouldn't have to manually create those. + +## Using secrets in workload + +Secrets must be defined in AppConfiguration. The values can be generated by Kusion or reference existing secrets stored in third-party vault. Secrets can be consumed in containers by referencing them through the `secret:///` URI syntax. + +### Consume secret in an environment variable + +You can consume the data in Secrets as environment variable in your container. For example the db container uses an environment variable to set the root password. + +``` +import kam.v1.app_configuration as ac +import service +import service.container as c +import service.secret as sec + +sampledb: ac.AppConfiguration { + workload: service.Service { + containers: { + "db": c.Container { + image: "mysql" + env: { + # Consume db-root-password secret in environment + "ROOT_PASSWORD": "secret://db-root-password/token" + } + } + } + # Secrets used to generate token + secrets: { + "init-info": sec.Secret { + type: "token" + } + } + } +} +``` + +The example shows the secret `root-password` being consumed as an environment variable in the db container. The secret is of type token and will automatically be generated at runtime by Kusion. + +### Consume all secret keys as environment variables + +Sometimes your secret contains multiple data that need to be consumed as environment variables. The example below shows how to consume all the values in a secret as environment variables named after the keys. + +``` +import kam.v1.app_configuration as ac +import service +import service.container as c +import service.secret as sec + +sampledb: ac.AppConfiguration { + workload: service.Service { + containers: { + "db": c.Container { + image: "mysql" + env: { + # Consume all init-info secret keys as environment variables + "secret://init-info": "" + } + } + } + # Secrets used to init mysql instance + secrets: { + "init-info": sec.Secret { + type: "opaque" + data: { + "ROOT_PASSWORD": "admin" + } + } + } + } +} +``` + +This will set the environment variable "ROOT_PASSWORD" to the value "admin" in the db container. + +## Types of secrets + +Kusion provides multiple types of secrets to application developers. + +1. Basic: Used to generate and/or store usernames and passwords. +2. Token: Used to generate and/or store secret strings for password. +3. Opaque: A generic secret that can store arbitrary user-defined data. +4. Certificate: Used to store a certificate and its associated key that are typically used for TLS. +5. External: Used to retrieve secret form third-party vault. + +### Basic secrets + +Basic secrets are defined in the secrets block with the type "basic". + +``` +import kam.v1.app_configuration as ac +import service +import service.container as c +import service.secret as sec + +sampleapp: ac.AppConfiguration { + workload: service.Service { + # ... + secrets: { + "auth-info": sec.Secret { + type: "basic" + data: { + "username": "admin" + "password": "******" + } + } + } + } +} +``` + +The basic secret type is typically used for basic authentication. The key names must be username and password. If one or both of the fields are defined with a non-empty string, those values will be used. If the empty string, the default value, is used Acorn will generate random values for one or both. + +### Token secrets + +Token secrets are useful for generating a password or secure string used for passwords when the user is already known or not required. + +``` +import kam.v1.app_configuration as ac +import service +import service.container as c +import service.secret as sec + +sampleapp: ac.AppConfiguration { + workload: service.Service { + # ... + secrets: { + "api-token": sec.Secret { + type: "token" + data: { + "token": "" + } + } + } + } +} +``` + +The token secret type must be defined. The `token` field in the data object is optional and if left empty Kusion will generate the token, which is 54 characters in length by default. If the `token` is defined that value will always be used. + +### Opaque secrets + +Opaque secrets have no defined structure and can have arbitrary key value pairs. + +``` +import kam.v1.app_configuration as ac +import service +import service.container as c +import service.secret as sec + +sampleapp: ac.AppConfiguration { + workload: service.Service { + # ... + secrets: { + "my-secret": sec.Secret { + type: "opaque" + } + } + } +} +``` + +### Certificate secrets + +Certificate secrets are useful for storing a certificate and its associated key. One common use for TLS Secrets is to configure encryption in transit for an Ingress, but you can also use it with other resources or directly in your workload. + +``` +import kam.v1.app_configuration as ac +import service +import service.container as c +import service.secret as sec + +sampleapp: ac.AppConfiguration { + workload: service.Service { + # ... + secrets: { + "server-cert": sec.Secret { + type: "certificate" + data: { + # Please do not put private keys in configuration files + "tls.crt": "The cert file content" + "tls.key": "The key file content" + } + } + } + } +} +``` + +### External secrets + +As a general principle, storing secrets in a plain text configuration file is highly discouraged, keeping secrets outside of Git is especially important for future-proofing, even encrypted secrets are not recommended to check into Git. The most common approach is to store secrets in a third-party vault (such as Hashicorp Vault, AWS Secrets Manager and Azure Key Vault, etc) and retrieve the secret in the runtime only. External secrets are used to retrieve sensitive data from external secret store to make it easy to be consumed in containers. + +``` +import kam.v1.app_configuration as ac +import service +import service.container as c +import service.secret as sec + +sampleapp: ac.AppConfiguration { + workload: service.Service { + # ... + secrets: { + "api-access-token": sec.Secret { + type: "external" + data: { + # Please do not put private keys in configuration files + "accessToken": "ref://api-auth-info/accessToken?version=1" + } + } + } + } +} +``` + +The value field in data object follow `ref://PATH[?version=]` URI syntax. `PATH` is the provider-specific path for the secret to be retried. Kusion provides out-of-the-box integration with `Hashicorp Vault`, `AWS Secrets Manager`, `Azure Key Vault` and `Alicloud Secrets Manager`. + +## Immutable secrets + +You can also declare a secret as immutable to prevent it from being changed accidentally. + +To declare a secret as immutable: + +``` +import kam.v1.app_configuration as ac +import service +import service.container as c +import service.secret as sec + +sampleapp: ac.AppConfiguration { + workload: service.Service { + # ... + secrets: { + "my-secret": sec.Secret { + # ... + immutable: True + } + } + } +} +``` + +You can change a secret from mutable to immutable but not the other way around. That is because the Kubelet will stop watching secrets that are immutable. As the name suggests, you can only delete and re-create immutable secrets but you cannot change them. \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.12/4-configuration-walkthrough/8-monitoring.md b/docs_versioned_docs/version-v0.12/4-configuration-walkthrough/8-monitoring.md new file mode 100644 index 00000000..13e430f3 --- /dev/null +++ b/docs_versioned_docs/version-v0.12/4-configuration-walkthrough/8-monitoring.md @@ -0,0 +1,102 @@ +# Application Monitoring + +You could also specify the collection of monitoring requirements for the application. That can be achieved via a `monitoring` module (or bring-your-own-module) in the `accessories` field in `AppConfiguration` to achieve that. + +As of version 0.11.0, Kusion supports integration with Prometheus by managing scraping behaviors in the configuration file. + +:::info + +For the monitoring configuration to work (more specifically, consumed by Prometheus), this requires the target cluster to have installed Prometheus correctly, either as a Kubernetes operator or a server/agent. + +More about how to set up Prometheus can be found in the [Prometheus User Guide for Kusion](../user-guides/observability/prometheus) +::: + +## Import + +In the examples below, we are using schemas defined in the `kam` package and the `monitoring` Kusion Module. For more details on KCL package and module import, please refer to the [Configuration File Overview](overview). + +The `import` statements needed for the following walkthrough: +``` +import kam.v1.app_configuration as ac +import kam.v1.workload as wl +import monitoring as m +``` + +## Workspace configurations + +In addition to the KCL configuration file, there are also workspace-level configurations that should be set first. In an ideal scenario, this step is done by the platform engineers. + +In the event that they do not exist for you or your organization, e.g. if you are an individual developer, you can either do it yourself or use the [default values](#default-values) provided by the KusionStack team. The steps to do this yourself can be found in the [Prometheus User Guide for Kusion](../user-guides/observability/prometheus#setting-up-workspace-configs). + +:::info + +For more details on how workspaces work, please refer to the [workspace concept](../3-concepts/4-workspace.md) +::: + +By separating configurations that the developers are interested in and those that platform owners are interested in, we can reduce the cognitive complexity of the application configuration and achieve separation of concern. + +You can append the following YAML file to your own workspace configurations and update the corresponding workspace with command `kusion workspace update`. + +```yaml +modules: + kusionstack/monitoring@v0.1.0: + default: + interval: 30s + monitorType: Pod + operatorMode: true + scheme: http + timeout: 15s +``` + +## Managing Scraping Configuration +To manage scrape configuration for the application: +``` +myapp: ac.AppConfiguration { + workload: service.Service { + # ... + } + # Add the monitoring configuration backed by Prometheus + accessories: { + "monitoring": m.Prometheus { + path: "/metrics" + port: "web" + } + } +} +``` + +The example above will instruct the Prometheus job to scrape metrics from the `/metrics` endpoint of the application on the port named `web`. + +To instruct Prometheus to scrape from `/actuator/metrics` on port `9099` instead: +``` +myapp: ac.AppConfiguration { + workload: service.Service { + # ... + } + # Add the monitoring configuration backed by Prometheus + accessories: { + "monitoring": m.Prometheus { + path: "/actuator/metrics" + port: "9099" + } + } +} +``` + +Note that numbered ports only work when your Prometheus is not running as an operator. + +Neither `path` and `port` are required fields if Prometheus runs as an operator. If omitted, `path` defaults to `/metrics`, and `port` defaults to the container port or service port, depending on which resource is being monitored. If Prometheus does not run as an operator, both fields are required. + +Scraping scheme, interval and timeout are considered platform-managed configurations and are therefore managed as part of the [workspace configurations](../user-guides/observability/prometheus#setting-up-workspace-configs). + +More details about how the Prometheus integration works can be found in the [design documentation](https://github.com/KusionStack/kusion/blob/main/docs/prometheus.md). + +## Default values + +If no workspace configurations are found, the default values provided by the KusionStack team are: +- Scraping interval defaults to 30 seconds +- Scraping timeout defaults to 15 seconds +- Scraping scheme defaults to http +- Defaults to NOT running as an operator + +If any of the default values does not meet your need, you can change them by [setting up the workspace configuration](../user-guides/observability/prometheus#setting-up-workspace-configs). \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.12/4-configuration-walkthrough/9-operational-rules.md b/docs_versioned_docs/version-v0.12/4-configuration-walkthrough/9-operational-rules.md new file mode 100644 index 00000000..674d2f2c --- /dev/null +++ b/docs_versioned_docs/version-v0.12/4-configuration-walkthrough/9-operational-rules.md @@ -0,0 +1,54 @@ +--- +id: operational-rules +--- + +# Operational Rules + +You could also specify the collection of operational rule requirements for the application. That can be achieved via a `opsrule` module (or bring-your-own-module) in the `accessories` field in `AppConfiguration` to achieve that. Operational rules are used as a preemptive measure to police and stop any unwanted changes. + +## Import + +In the examples below, we are using schemas defined in the `kam` package and the `opsrule` Kusion Module. For more details on KCL package and module import, please refer to the [Configuration File Overview](overview). + +The `import` statements needed for the following walkthrough: +``` +import kam.v1.app_configuration as ac +import kam.v1.workload as wl +import opsrule as o +``` + +## Max Unavailable Replicas + +Currently, the `opsrule` module supports setting a `maxUnavailable` parameter, which specifies the maximum number of pods that can be rendered unavailable at any time. It can be either a fraction of the total pods for the current application or a fixed number. This operational rule is particularly helpful against unexpected changes or deletes to the workloads. It can also prevent too many workloads from going down during an application upgrade. + +More rules will be available in future versions of Kusion. + +To set `maxUnavailable` to a percentage of pods: +``` +myapp: ac.AppConfiguration { + workload: service.Service { + containers: { + # ... + } + } + accessories: { + "opsRule": o.OpsRule { + maxUnavailable: "30%" + } + } +} +``` + +To set `maxUnavailable` to a fixed number of pods: +``` +myapp: ac.AppConfiguration { + workload: service.Service { + # ... + } + accessories: { + "opsRule": o.OpsRule { + maxUnavailable: 2 + } + } +} +``` \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.12/4-configuration-walkthrough/_category_.json b/docs_versioned_docs/version-v0.12/4-configuration-walkthrough/_category_.json new file mode 100644 index 00000000..64d45678 --- /dev/null +++ b/docs_versioned_docs/version-v0.12/4-configuration-walkthrough/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Configuration Walkthrough" +} diff --git a/docs_versioned_docs/version-v0.12/5-user-guides/1-cloud-resources/1-database.md b/docs_versioned_docs/version-v0.12/5-user-guides/1-cloud-resources/1-database.md new file mode 100644 index 00000000..497c3ab4 --- /dev/null +++ b/docs_versioned_docs/version-v0.12/5-user-guides/1-cloud-resources/1-database.md @@ -0,0 +1,305 @@ +--- +id: database +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Deliver the WordPress Application with Cloud RDS + +This tutorial will demonstrate how to deploy a WordPress application with Kusion, which relies on both Kubernetes and IaaS resources provided by cloud vendors. We can learn how to declare the Relational Database Service (RDS) to provide a cloud-based database solution with Kusion for our application from this article. + +## Prerequisites + +- Install [Kusion](../../2-getting-started/1-install-kusion.md). +- Install [kubectl CLI](https://kubernetes.io/docs/tasks/tools/#kubectl) and run a [Kubernetes](https://kubernetes.io/) or [k3s](https://docs.k3s.io/quick-start) or [k3d](https://k3d.io/v5.4.4/#installation) or [MiniKube](https://minikube.sigs.k8s.io/docs/tutorials/multi_node) cluster. +- Prepare a cloud service account and create a user with at least **VPCFullAccess** and **RDSFullAccess** related permissions to use the Relational Database Service (RDS). This kind of user can be created and managed in the Identity and Access Management (IAM) console of the cloud vendor. +- The environment that executes `kusion` needs to have connectivity to terraform registry to download the terraform providers. + +Additionally, we also need to configure the obtained AccessKey and SecretKey as well as the cloud resource region as environment variables for specific cloud provider: + + + + +```bash +export AWS_ACCESS_KEY_ID="AKIAQZDxxxx" # replace it with your AccessKey +export AWS_SECRET_ACCESS_KEY="oE/xxxx" # replace it with your SecretKey +export AWS_REGION=us-east-1 # replace it with your region +``` + +![aws iam account](/img/docs/user_docs/getting-started/aws-iam-account.png) + +```mdx-code-block + + +``` + +```bash +export ALICLOUD_ACCESS_KEY="LTAI5txxx" # replace it with your AccessKey +export ALICLOUD_SECRET_KEY="nxuowIxxx" # replace it with your SecretKey +export ALICLOUD_REGION=cn-hangzhou # replace it with your region +``` + +![alicloud iam account](/img/docs/user_docs/getting-started/set-rds-access.png) + +```mdx-code-block + + +``` + +## Init Workspace + +To deploy the WordPress application with cloud rds, we first need to initiate a `Workspace` for the targeted stack (here we are using `dev`). Please copy the following example YAML file to your local `workspace.yaml`. + + + + +`workspace.yaml` +```yaml +# MySQL configurations for AWS RDS +modules: + mysql: + path: oci://ghcr.io/kusionstack/mysql + version: 0.2.0 + configs: + default: + cloud: aws + size: 20 + instanceType: db.t3.micro + privateRouting: false + databaseName: "wordpress-mysql" +``` + +```mdx-code-block + + +``` + +`workspace.yaml` +```yaml +# MySQL configurations for Alicloud RDS +modules: + mysql: + path: oci://ghcr.io/kusionstack/mysql + version: 0.2.0 + configs: + default: + cloud: alicloud + size: 20 + instanceType: mysql.n2.serverless.1c + category: serverless_basic + privateRouting: false + subnetID: [your-subnet-id] + databaseName: "wordpress-mysql" +``` + +```mdx-code-block + + +``` + +If you would like to try creating the `Alicloud` RDS instance, you should replace the `[your-subnet-id]` of `modules.kusionstack/mysql@0.1.0.default.subnetID` field with the Alicloud `vSwitchID` to which the database will be provisioned in. After that, you can execute the following command line to initiate the configuration for `dev` workspace. + +```shell +kusion workspace create dev -f workspace.yaml +``` + +Since Kusion by default use the `default` workspace, we can switch to the `dev` workspace with the following cmd: + +```shell +kusion workspace switch dev +``` + +If you have already created and used the configuration of `dev` workspace, you can append the MySQL module configs to your workspace YAML file and use the following command line to update the workspace configuration. + +```shell +kusion workspace update dev -f workspace.yaml +``` + +We can use the following command lines to show the current workspace configurations for `dev` workspace. + +```shell +kusion workspace show +``` + +The `workspace.yaml` is a sample configuration file for workspace management, including `MySQL` module configs. Workspace configurations are usually declared by **Platform Engineers** and will take effect through the corresponding stack. + +:::info +More details about the configuration of Workspace can be found in [Concepts of Workspace](../../3-concepts/4-workspace.md). +::: + +## Create Project And Stack + +We can create a new project named `wordpress-rds-cloud` with the `kusion project create` command. + +```shell +# Create a new directory and navigate into it. +mkdir wordpress-rds-cloud && cd wordpress-rds-cloud + +# Create a new project with the name of the current directory. +kusion project create +``` + +After creating the new project, we can create a new stack named `dev` with the `kusion stack create` command. + +```shell +# Create a new stack with the specified name under current project directory. +kusion stack create dev +``` + +The created project and stack structure looks like below: + +```shell +tree +. +├── dev +│   ├── kcl.mod +│   ├── main.k +│   └── stack.yaml +└── project.yaml + +2 directories, 4 files +``` + +### Update And Review Configuration Codes + +The configuration codes in the created stack are basically empty, thus we should replace the `dev/kcl.mod` and `dev/main.k` with the below codes: + +```shell +# dev/kcl.mod +[dependencies] +kam = { git = "https://github.com/KusionStack/kam.git", tag = "0.2.0" } +service = { oci = "oci://ghcr.io/kusionstack/service", tag = "0.1.0" } +network = { oci = "oci://ghcr.io/kusionstack/network", tag = "0.2.0" } +mysql = { oci = "oci://ghcr.io/kusionstack/mysql", tag = "0.2.0" } +``` + +```python +# dev/main.k +import kam.v1.app_configuration as ac +import service +import service.container as c +import network as n +import mysql + +# main.k declares customized configurations for dev stacks. +wordpress: ac.AppConfiguration { + workload: service.Service { + containers: { + wordpress: c.Container { + image: "wordpress:6.3" + env: { + "WORDPRESS_DB_HOST": "$(KUSION_DB_HOST_WORDPRESS_MYSQL)" + "WORDPRESS_DB_USER": "$(KUSION_DB_USERNAME_WORDPRESS_MYSQL)" + "WORDPRESS_DB_PASSWORD": "$(KUSION_DB_PASSWORD_WORDPRESS_MYSQL)" + "WORDPRESS_DB_NAME": "mysql" + } + resources: { + "cpu": "500m" + "memory": "512Mi" + } + } + } + replicas: 1 + } + accessories: { + "network": n.Network { + ports: [ + n.Port { + port: 80 + } + ] + } + "mysql": mysql.MySQL { + type: "cloud" + version: "8.0" + } + } +} +``` + +## Application Delivery + +You can complete the delivery of the WordPress application in the folder of `wordpress-cloud-rds/dev` using the following command line. Kusion will enable the watching of the application resource creation and automatic port-forwarding of the specified port (80) from local to the Kubernetes Service. + +```shell +cd dev && kusion apply --watch +``` + +:::info +During the first apply, the models and modules as well as the Terraform CLI (if not exists) that the application depends on will be downloaded, so it may take some time (usually within two minutes). You can take a break and have a cup of coffee. +::: + + + + +![apply the wordpress application with aws rds](/img/docs/user_docs/getting-started/apply-wordpress-cloud-rds-aws.png) + +```mdx-code-block + + +``` + +![apply the wordpress application with alicloud rds](/img/docs/user_docs/getting-started/apply-wordpress-cloud-rds-alicloud.png) + +```mdx-code-block + + +``` + +After all the resources reconciled, we can port-forward our local port (e.g. 12345) to the WordPress frontend service port (80) in the cluster: + +```shell +kubectl port-forward -n wordpress-cloud-rds svc/wordpress-cloud-rds-dev-wordpress-private 12345:80 +``` + +![kubectl port-forward for wordpress](/img/docs/user_docs/getting-started/wordpress-cloud-rds-port-forward.png) + +## Verify WordPress Application + +Next, we will verify the WordPress site service we just delivered, along with the creation of the RDS instance it depends on. We can start using the WordPress site by accessing the link of local-forwarded port [(http://localhost:12345)](http://localhost:12345) we just configured in the browser. + +![wordpress site page](/img/docs/user_docs/getting-started/wordpress-site-page.png) + +In addition, we can also log in to the cloud service console page to view the RDS instance we just created. + + + + +![aws rds instance](/img/docs/user_docs/getting-started/cloud-rds-instance-aws.png) + +```mdx-code-block + + +``` + +![alicloud rds instance](/img/docs/user_docs/getting-started/cloud-rds-instance-alicloud.png) + +```mdx-code-block + + +``` + +## Delete WordPress Application + +You can delete the WordPress application and related RDS resources using the following command line. + +```shell +kusion destroy --yes +``` + + + + +![kusion destroy wordpress with aws rds](/img/docs/user_docs/getting-started/destroy-wordpress-cloud-rds-aws.png) + +```mdx-code-block + + +``` + +![kusion destroy wordpress with alicloud rds](/img/docs/user_docs/getting-started/destroy-wordpress-cloud-rds-alicloud.png) + +```mdx-code-block + + diff --git a/docs_versioned_docs/version-v0.12/5-user-guides/1-cloud-resources/2-expose-service.md b/docs_versioned_docs/version-v0.12/5-user-guides/1-cloud-resources/2-expose-service.md new file mode 100644 index 00000000..b3e78d73 --- /dev/null +++ b/docs_versioned_docs/version-v0.12/5-user-guides/1-cloud-resources/2-expose-service.md @@ -0,0 +1,259 @@ +--- +id: expose-service +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Expose Application Service Deployed on CSP Kubernetes + +Deploying applications on the Kubernetes provided by CSP (Cloud Service Provider) is convenient and reliable, which is adopted by many enterprises. Kusion has a good integration with CSP Kubernetes service. You can deploy your application to the Kubernetes cluster, and expose the service in a quite easy way. + +This tutorial demonstrates how to expose service of the application deployed on CSP Kubernetes. And the responsibilities of platform engineers and application developers are also clearly defined. + +## Prerequisites + +Create a Kubernetes cluster provided by CSP, and complete the corresponding configurations for `KUBECONFIG`. The following CSP Kubernetes services are supported: + +- [Amazon Elastic Kubernetes Service (EKS)](https://aws.amazon.com/eks) +- [Alibaba Cloud Container Service for Kubernetes (ACK)](https://www.alibabacloud.com/product/kubernetes) + +## Expose Service Publicly + +If you want the application to be accessed from outside the cluster, you should expose the service publicly. Follow the steps below, you will simply hit the goal. + +### Set up Workspace + +Create the workspace as the target where the application will be deployed to. The workspace is usually set up by **Platform Engineers**, which contains platform-standard and application-agnostic configurations. The workspace configurations are organized through a YAML file. + + + + +```yaml +modules: + network: + path: oci://ghcr.io/kusionstack/network + version: 0.2.0 + configs: + default: + port: + type: aws +``` + +```mdx-code-block + + +``` + +```yaml +modules: + network: + path: oci://ghcr.io/kusionstack/network + version: 0.2.0 + configs: + default: + port: + type: alicloud + annotations: + service.beta.kubernetes.io/alibaba-cloud-loadbalancer-spec: slb.s1.small +``` + +```mdx-code-block + + +``` + +The YAML shown above gives an example of the workspace configuration to expose service on **EKS** and **ACK**. The block `port` contains the workspace configuration of Kusion module `network`, which has the following fields: + +- type: the CSP providing Kubernetes service, support `alicloud` and `aws` +- annotations: annotations attached to the service, should be a map +- labels: labels attached to the service, should be a map + +Then, create the workspace with the configuration file. The following command creates a workspace named `dev` with configuration file `workspace.yaml`. + +```bash +kusion workspace create dev -f workspace.yaml +``` + +After that, we can switch to the `dev` workspace with the following cmd: + +```shell +kusion workspace switch dev +``` + +If you already create and use the configuration of `dev` workspace, you can append the MySQL module configs to your workspace YAML file and use the following command line to update the workspace configuration. + +```shell +kusion workspace update dev -f workspace.yaml +``` + +We can use the following command lines to show the current workspace configurations for `dev` workspace. + +```shell +kusion workspace show +``` + + +### Init Project + +After creating workspace, you should write application configuration code, which only contains simple and application-centric configurations. This step is usually accomplished by application developers. + +We can start by initializing this tutorial project with `kusion init` cmd: + +```shell +# Create a new directory and navigate into it. +mkdir nginx && cd nginx + +# Initialize the demo project with the name of the current directory. +kusion init +``` + +The created project structure looks like below: + +```shell +tree +. +├── dev +│   ├── kcl.mod +│   ├── main.k +│   └── stack.yaml +└── project.yaml + +2 directories, 4 files +``` + +:::info +More details about the directory structure can be found in [Project](../../3-concepts/1-project/1-overview.md) and [Stack](../../3-concepts/2-stack/1-overview.md). +::: + +### Update And Review Configuration Codes + +The initiated configuration codes are for the demo quickstart application, we should replace the `dev/kcl.mod` and `dev/main.k` with the below codes: + +`dev/kcl.mod` +```shell +[package] +name = "nginx" +version = "0.1.0" + +[dependencies] +kam = { git = "https://github.com/KusionStack/kam.git", tag = "0.2.0" } +service = { oci = "oci://ghcr.io/kusionstack/service", tag = "0.1.0" } +network = { oci = "oci://ghcr.io/kusionstack/network", tag = "0.2.0" } + +[profile] +entries = ["main.k"] +``` + +`dev/main.k` +```python +import kam.v1.app_configuration as ac +import service +import service.container as c +import network as n + +# main.k declares customized configurations for dev stacks. +nginx: ac.AppConfiguration { + workload: service.Service { + containers: { + nginx: c.Container { + image: "nginx:1.25.2" + resources: { + "cpu": "500m" + "memory": "512Mi" + } + } + } + replicas: 1 + } + accessories: { + "network": n.Network { + ports: [ + n.Port { + port: 80 + protocol: "TCP" + public: True + } + ] + } + } +} +``` + +The code shown above describes how to expose service publicly. Kusion use schema `Port` to describe the network configuration, the primary fields of Port are as follows: + +- port: port number to expose service +- protocol: protocol to expose service, support `TCP` and `UDP` +- public: whether to public the service + +To public the service, you should set `public` as True. Besides, schema `Service` should be used to describe the workload configuration. + +That's all what an application developer needs to configure! Next, preview and apply the configuration, the application will get deployed and exposed publicly. + +:::info +Kusion uses Load Balancer (LB) provided by the CSP to expose service publicly. For more detailed network configuration, please refer to [Application Networking](https://www.kusionstack.io/docs/kusion/configuration-walkthrough/networking) +::: + +:::info +During the first preview and apply, the models and modules as well as the Terraform CLI (if not exists) that the application depends on will be downloaded, so it may take some time (usually within two minutes). You can take a break and have a cup of coffee. +::: + +### Preview and Apply + +Execute `kusion preview` under the stack path, you will get what will be created in the real infrastructure. The picture below gives the preview result of the example. A Namespace, Service and Deployment will be created, which meets the expectation. The service name has a suffix `public`, which shows it can be accessed publicly. + +![preview-public](/img/docs/user_docs/cloud-resources/expose-service/preview-public.png) + +Then, execute `kusion apply --yes` to do the real deploying job. Just a command and a few minutes, you have accomplished deploying application and expose it publicly. + +![apply-public](/img/docs/user_docs/cloud-resources/expose-service/apply-public.png) + +### Verify Accessibility + +In the example above, the kubernetes Namespace whose name is nginx, and a `Service` and `Deployment` under the Namespace should be created. Use `kubectl get` to check, the Service whose type is `LoadBalancer` and Deployment are created indeed. And the Service has `EXTERNAL-IP` 106.5.190.109, which means it can be accessed from outside the cluster. + +![k8s-resource-public](/img/docs/user_docs/cloud-resources/expose-service/k8s-resource-public.png) + +Visit the `EXTERNAL-IP` via browser, the correct result is returned, which illustrates the servie getting publicly exposed successfully. + +![result-public](/img/docs/user_docs/cloud-resources/expose-service/result-public.png) + +## Expose Service Inside Cluster + +If you only need the application to be accessed inside the cluster, just configure `Public` as `False` in schema `Port`. There is no need to change the workspace, which means an application developer can easily change a service exposure range, without the involvement of platform engineers. + +```python +import kam.v1.app_configuration as ac +import service +import service.container as c +import network as n + +# main.k declares customized configurations for dev stacks. +nginx: ac.AppConfiguration { + workload: service.Service { + ... + } + accessories: { + "network": n.Network { + ports: [ + n.Port { + port: 80 + protocol: "TCP" + public: False + } + ] + } + } +} +``` + +Execute `kusion apply --yes`, the generated Service has suffix `private`. + +![apply-private](/img/docs/user_docs/cloud-resources/expose-service/apply-private.png) + +And the Service type is `ClusterIP`, only has `CLUSTER_IP` and no `EXTERNAL_IP`, which means it cannot get accessed from outside the cluster. + +![k8s-resource-private](/img/docs/user_docs/cloud-resources/expose-service/k8s-resource-private.png) + +## Summary +This tutorial demonstrates how to expose service of the application deployed on the CSP Kubernetes. By platform engineers' setup of workspace, and application developers' configuration of schema `Port` of Kusion module `network`, Kusion enables you expose service simply and efficiently. diff --git a/docs_versioned_docs/version-v0.12/5-user-guides/1-cloud-resources/_category_.json b/docs_versioned_docs/version-v0.12/5-user-guides/1-cloud-resources/_category_.json new file mode 100644 index 00000000..f6f2c380 --- /dev/null +++ b/docs_versioned_docs/version-v0.12/5-user-guides/1-cloud-resources/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Cloud Resources" +} diff --git a/docs_versioned_docs/version-v0.12/5-user-guides/2-working-with-k8s/1-deploy-application.md b/docs_versioned_docs/version-v0.12/5-user-guides/2-working-with-k8s/1-deploy-application.md new file mode 100644 index 00000000..6b06d4a1 --- /dev/null +++ b/docs_versioned_docs/version-v0.12/5-user-guides/2-working-with-k8s/1-deploy-application.md @@ -0,0 +1,282 @@ +--- +id: deploy-application +--- + +# Deploy Application + +This guide shows you how to use Kusion CLIs to complete the deployment of an application running in Kubernetes. +We call the abstraction of application operation and maintenance configuration as `AppConfiguration`, and its instance as `Application`. +It is essentially a configuration model that describes an application. The complete definition can be seen [here](../../reference/modules/developer-schemas/app-configuration). + +In production, the application generally includes minimally several k8s resources: + +- Namespace +- Deployment +- Service + +:::tip +This guide requires you to have a basic understanding of Kubernetes. +If you are not familiar with the relevant concepts, please refer to the links below: + +- [Learn Kubernetes Basics](https://kubernetes.io/docs/tutorials/kubernetes-basics/) +- [Namespace](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/) +- [Deployment](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/) +- [Service](https://kubernetes.io/docs/concepts/services-networking/service/) +::: + +## Prerequisites + +Before we start, we need to complete the following steps: + +1、Install Kusion + +We recommend using HomeBrew(Mac), Scoop(Windows), or an installation shell script to download and install Kusion. +See [Download and Install](../../getting-started/install-kusion) for more details. + +2、Running Kubernetes cluster + +There must be a running and accessible Kubernetes cluster and a [kubectl](https://Kubernetes.io/docs/tasks/tools/#kubectl) command line tool. +If you don't have a cluster yet, you can use [Minikube](https://minikube.sigs.k8s.io/docs/tutorials/multi_node/) to start one of your own. + +## Initializing + +This guide is to deploy an app using Kusion, relying on the Kusion CLI and an existing Kubernetes cluster. + +### Initializing workspace configuration + +In version 0.10.0, we have introduced the new concept of [workspaces](../../concepts/workspace), which is a logical layer whose configurations represent an opinionated set of defaults, often appointed by the platform team. In most cases workspaces are represented with an "environment" in traditional SDLC terms. These workspaces provide a means to separate the concerns between the **application developers** who wish to focus on business logic, and a group of **platform engineers** who wish to standardize the applications on the platform. + +Driven by the discipline of Platform Engineering, management of the workspaces, including create/updating/deleting workspaces and their configurations should be done by dedicated platform engineers in a large software organizations to facilitate a more mature and scalable collaboration pattern. + +:::tip +More on the collaboration pattern can be found in the [design doc](https://github.com/KusionStack/kusion/blob/main/docs/design/collaboration/collaboration_paradigm.md). +::: + +However, if that does NOT apply to your scenario, e.g. if you work in a smaller org without platform engineers or if you are an individual developer, we wish Kusion can still be a value tool to have when delivering an application. In this guide, we are NOT distinctively highlighting the different roles or what the best practices entails (the design doc above has all that) but rather the steps needed to get Kusion tool to work. + +As of version 0.11.0, workspace configurations in Kusion can not only be managed on the local filesystem in the form of YAML files, but the remotely-managed workspaces have been supported as well. + +To initialize the workspace configuration: + +```bash +~/playground$ touch ~/dev.yaml +~/playground$ kusion workspace create dev -f ~/dev.yaml +create workspace dev successfully +``` + +To verify the workspace has been created properly: + +``` +~/playground$ kusion workspace list +- default +- dev +~/playground$ kusion workspace show dev +{} +``` + +Note that `show` command tells us the workspace configuration is currently empty, which is expected because we created the `dev` workspace with an empty YAML file. An empty workspace configuration will suffice in some cases, where no platform configurations are needed. + +Kusion by default uses the `default` workspace, thus we need to switch to the `dev` workspace we have just created. + +```bash +~/playground$ kusion workspace switch dev +``` + +We will progressively add more workspace configurations throughout this user guide. + +### Initializing application configuration + +Now that workspaces are properly initialized, we can begin by initializing the application configuration: + +```bash +# Create a new directory and navigate into it. +mkdir simple-service && cd simple-service + +# Initialize the demo project with the name of the current directory. +kusion init +``` + +The directory structure is as follows: + +```shell +simple-service/ +. +├── dev +│   ├── kcl.mod +│   ├── main.k +│   └── stack.yaml +└── project.yaml + +2 directories, 4 files +``` + +The project directory has the following files that are automatically generated: +- `project.yaml` represents project-level configurations. +- `dev` directory stores the customized stack configuration: + - `dev/main.k` stores configurations in the `dev` stack. + - `dev/stack.yaml` stores stack-level configurations. + - `dev/kcl.mod` stores stack-level dependencies. + +In general, the `.k` files are the KCL source code that represents the application configuration, and the `.yaml` is the static configuration file that describes behavior at the project or stack level. + +:::info +See [Project](../../concepts/project/overview) and [Stack](../../concepts/stack/overview) for more details about Project and Stack. +::: + +The `kusion init` command will create a demo quickstart application, we may update the `dev/kcl.mod` and `dev/main.k` later. + +#### kcl.mod +There should be a `kcl.mod` file generated automatically under the project directory. The `kcl.mod` file describes the dependency for the current project or stack. By default, it should contain a reference to the official [`kam` repository](https://github.com/KusionStack/kam) which holds the Kusion `AppConfiguration` and related workload model definitions that fits best practices. You can also create your own models library and reference that. + +You can change the package name in `kcl.mod` to `simple-service`: + +`dev/kcl.mod` +```shell +[package] +name = "simple-service" +version = "0.1.0" + +[dependencies] +kam = { git = "https://github.com/KusionStack/kam.git", tag = "0.2.0" } +service = { oci = "oci://ghcr.io/kusionstack/service", tag = "0.1.0" } +network = { oci = "oci://ghcr.io/kusionstack/network", tag = "0.2.0" } + +[profile] +entries = ["main.k"] +``` + +#### main.k +The configuration file `main.k`, usually written by the application developers, declare customized configurations for a specific stack, including an `Application` instance of `AppConfiguration` model. + +You can update the `main.k` as follows: + +```python +import kam.v1.app_configuration as ac +import service +import service.container as c +import network as n + +"helloworld": ac.AppConfiguration { + workload: service.Service { + containers: { + "helloworld": c.Container { + image = "gcr.io/google-samples/gb-frontend:v4" + } + } + replicas: 2 + } + accessories: { + "network": n.Network { + ports: [ + n.Port { + port: 80 + } + ] + } + } +} +``` + +## Previewing + +At this point, the project has been completely initialized. +The configuration is written in KCL, not JSON/YAML which Kubernetes recognizes, so it needs to be built to get the final output. And we can use the `kusion preview` cmd to preview the Kubernetes resources intended to deliver. + +Enter stack dir `simple-service/dev` and preview: + +```bash +cd simple-service/dev && kusion preview +``` + +:::tip +For instructions on the kusion command line tool, execute `kusion -h`, or refer to the tool's online [documentation](../../reference/commands). +::: + +## Applying + +Preview is now completed. We can apply the configuration as the next step. In the output from `kusion preview`, you can see 3 resources: + +- a Namespace named `simple-service` +- a Deployment named `simple-service-dev-helloworld` in the `simple-service` namespace +- a Service named `simple-service-dev-helloworld-private` in the `simple-service` namespace + +Execute command: + +```bash +kusion apply +``` + +The output is similar to: + +``` + ✔︎ Generating Spec in the Stack dev... +Stack: dev ID Action +* ├─ v1:Namespace:simple-service Create +* ├─ v1:Service:simple-service:simple-service-dev-helloworld-private Create +* └─ apps/v1:Deployment:simple-service:simple-service-dev-helloworld Create + + +? Do you want to apply these diffs? yes +Start applying diffs ... + SUCCESS Create v1:Namespace:simple-service success + SUCCESS Create v1:Service:simple-service:simple-service-dev-helloworld-private success + SUCCESS Create apps/v1:Deployment:simple-service:simple-service-dev-helloworld success +Create apps/v1:Deployment:simple-service:simple-service-dev-helloworld success [3/3] ███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████ 100% | 0s +Apply complete! Resources: 3 created, 0 updated, 0 deleted. +``` + +After the configuration applying successfully, you can use the `kubectl` to check the actual status of these resources. + +1、 Check Namespace + +```bash +kubectl get ns +``` + +The output is similar to: + +``` +NAME STATUS AGE +default Active 117d +simple-service Active 38s +kube-system Active 117d +... +``` + +2、Check Deployment + +```bash +kubectl get deploy -n simple-service +``` + +The output is similar to: + +``` +NAME READY UP-TO-DATE AVAILABLE AGE +simple-service-dev-helloworld 1/1 1 1 59s +``` + +3、Check Service + +```bash +kubectl get svc -n simple-service +``` + +The output is similar to: + +``` +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +simple-service-dev-helloworld-private ClusterIP 10.98.89.104 80/TCP 79s +``` + +4、Validate app + +Using the `kubectl` tool, forward native port `30000` to the service port `80`. + +```bash +kubectl port-forward svc/simple-service-dev-helloworld-private -n simple-service 30000:80 +``` + +Open browser and visit [http://127.0.0.1:30000](http://127.0.0.1:30000): + +![app-preview](/img/docs/user_docs/guides/working-with-k8s/app-preview.png) diff --git a/docs_versioned_docs/version-v0.12/5-user-guides/2-working-with-k8s/2-container.md b/docs_versioned_docs/version-v0.12/5-user-guides/2-working-with-k8s/2-container.md new file mode 100644 index 00000000..eb51ec5e --- /dev/null +++ b/docs_versioned_docs/version-v0.12/5-user-guides/2-working-with-k8s/2-container.md @@ -0,0 +1,146 @@ +--- +id: container +--- + +# Configure Containers + +You can manage container-level configurations in the `AppConfiguration` model via the `containers` field (under the `workload` schema). By default, everything defined in the `containers` field will be treated as application containers. Sidecar containers will be supported in a future version of kusion. + +For the full `Container` schema reference, please see [here](../../reference/modules/developer-schemas/workload/service#schema-container) for more details. + +## Pre-requisite + +Please refer to the [prerequisites](deploy-application#prerequisites) in the guide for deploying an application. + +The example below also requires you to have [initialized the project](deploy-application#initializing) using the `kusion workspace create` and `kusion init` command, which will create a workspace and also generate a [`kcl.mod` file](deploy-application#kclmod) under the stack directory. + +## Managing Workspace Configuration + +In the last guide, we introduced a step to [initialize a workspace](deploy-application#initializing-workspace-configuration) with an empty configuration. The same empty configuration will still work in this guide, no changes are required there. + +However, if you (or the platform team) would like to set default values for the workloads to standardize the behavior of applications in the `dev` workspace, you can do so by updating the `~/dev.yaml`: + +```yaml +modules: + service: + default: + replicas: 3 + labels: + label-key: label-value + annotations: + annotation-key: annotation-value + type: CollaSet +``` + +Please note that the `replicas` in the workspace configuration only works as a default value and will be overridden by the value set in the application configuration. + +The workspace configuration need to be updated with the command: + +```bash +kusion workspace update dev -f ~/dev.yaml +``` + +For a full reference of what can be configured in the workspace level, please see the [workspace reference](../../reference/modules/workspace-configs/workload/service). + +## Example + +`simple-service/dev/main.k`: +```python +import kam.v1.app_configuration as ac +import service +import service.container as c +import network as n + +"helloworld": ac.AppConfiguration { + workload: service.Service { + containers: { + "helloworld": c.Container { + image = "gcr.io/google-samples/gb-frontend:v4" + env: { + "env1": "VALUE" + "env2": "VALUE2" + } + resources: { + "cpu": "500m" + "memory": "512Mi" + } + # Configure an HTTP readiness probe + readinessProbe: p.Probe { + probeHandler: p.Http { + url: "http://localhost:80" + } + initialDelaySeconds: 10 + } + } + } + replicas: 2 + } + accessories: { + "network": n.Network { + ports: [ + n.Port { + port: 80 + } + ] + } + } +} +``` + +## Apply + +Re-run steps in [Applying](deploy-application#applying), new container configuration can be applied. + +``` +$ kusion apply + ✔︎ Generating Spec in the Stack dev... +Stack: dev ID Action +* ├─ v1:Namespace:simple-service UnChanged +* ├─ v1:Service:simple-service:simple-service-dev-helloworld-private UnChanged +* └─ apps/v1:Deployment:simple-service:simple-service-dev-helloworld Update + + +? Do you want to apply these diffs? yes +Start applying diffs ... + SUCCESS UnChanged v1:Namespace:simple-service, skip + SUCCESS UnChanged v1:Service:simple-service:simple-service-dev-helloworld-private, skip + SUCCESS Update apps/v1:Deployment:simple-service:simple-service-dev-helloworld success +Update apps/v1:Deployment:simple-service:simple-service-dev-helloworld success [3/3] ███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████ 100% | 0s +Apply complete! Resources: 0 created, 1 updated, 0 deleted. +``` + +## Validation + +We can verify the container (in the deployment template) now has the updated attributes as defined in the container configuration: +``` +$ kubectl get deployment -n simple-service -o yaml +... + template: + ... + spec: + containers: + - env: + - name: env1 + value: VALUE + - name: env2 + value: VALUE2 + image: gcr.io/google-samples/gb-frontend:v4 + imagePullPolicy: IfNotPresent + name: helloworld + readinessProbe: + failureThreshold: 3 + httpGet: + host: localhost + path: / + port: 80 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + resources: + limits: + cpu: 500m + memory: 512M +... +``` \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.12/5-user-guides/2-working-with-k8s/3-service.md b/docs_versioned_docs/version-v0.12/5-user-guides/2-working-with-k8s/3-service.md new file mode 100644 index 00000000..f795430c --- /dev/null +++ b/docs_versioned_docs/version-v0.12/5-user-guides/2-working-with-k8s/3-service.md @@ -0,0 +1,139 @@ +--- +id: service +--- + +# Expose Service + +You can determine how to expose your service in the `AppConfiguration` model via the `ports` field (under the `network` accessory). The `ports` field defines a list of all the `Port`s you want to expose for the application (and their corresponding listening ports on the container, if they don't match the service ports), so that it can be consumed by other applications. + +Unless explicitly defined, each of the ports exposed is by default exposed privately as a `ClusterIP` type service. You can expose a port publicly by specifying the `public` field in the `Port` schema. At the moment, the implementation for publicly access is done via Load Balancer type service backed by cloud providers. Ingress will be supported in a future version of kusion. + +For the `Port` schema reference, please see [here](../../reference/modules/developer-schemas/workload/service#schema-port) for more details. + +## Prerequisites + +Please refer to the [prerequisites](deploy-application#prerequisites) in the guide for deploying an application. + +The example below also requires you to have [initialized the project](deploy-application#initializing) using the `kusion workspace create` and `kusion init` command, which will create a workspace and also generate a [`kcl.mod` file](deploy-application#kclmod) under the stack directory. + +## Managing Workspace Configuration + +In the first guide in this series, we introduced a step to [initialize a workspace](deploy-application#initializing-workspace-configuration) with an empty configuration. The same empty configuration will still work in this guide, no changes are required there. + +However, if you (or the platform team) would like to set default values for the services to standardize the behavior of applications in the `dev` workspace, you can do so by updating the `~/dev.yaml`: + +```yaml +modules: + kusionstack/network@0.1.0: + default: + port: + type: alicloud + labels: + kusionstack.io/control: "true" + annotations: + service.beta.kubernetes.io/alibaba-cloud-loadbalancer-spec: slb.s1.small +``` + +The workspace configuration need to be updated with the command: + +```bash +kusion workspace update dev -f ~/dev.yaml +``` + +For a full reference of what can be configured in the workspace level, please see the [workspace reference](../../reference/modules/workspace-configs/networking/network). + +## Example + +`simple-service/dev/main.k`: +```python +import kam.v1.app_configuration as ac +import service +import service.container as c +import network as n + +"helloworld": ac.AppConfiguration { + workload: service.Service { + containers: { + "helloworld": c.Container { + image = "gcr.io/google-samples/gb-frontend:v4" + env: { + "env1": "VALUE" + "env2": "VALUE2" + } + resources: { + "cpu": "500m" + "memory": "512Mi" + } + # Configure an HTTP readiness probe + readinessProbe: p.Probe { + probeHandler: p.Http { + url: "http://localhost:80" + } + initialDelaySeconds: 10 + } + } + } + replicas: 2 + } + accessories: { + "network": n.Network { + ports: [ + n.Port { + port: 8080 + targetPort: 80 + } + ] + } + } +} +``` + +The code above changes the service port to expose from `80` in the last guide to `8080`, but still targeting the container port `80` because that's what the application is listening on. + +## Applying + +Re-run steps in [Applying](deploy-application#applying), new service configuration can be applied. + +``` +$ kusion apply + ✔︎ Generating Spec in the Stack dev... +Stack: dev ID Action +* ├─ v1:Namespace:simple-service UnChanged +* ├─ v1:Service:simple-service:simple-service-dev-helloworld-private Update +* └─ apps/v1:Deployment:simple-service:simple-service-dev-helloworld UnChanged + + +? Do you want to apply these diffs? yes +Start applying diffs ... + SUCCESS UnChanged v1:Namespace:simple-service, skip + SUCCESS Update v1:Service:simple-service:simple-service-dev-helloworld-private success + SUCCESS UnChanged apps/v1:Deployment:simple-service:simple-service-dev-helloworld, skip +UnChanged apps/v1:Deployment:simple-service:simple-service-dev-helloworld, skip [3/3] ██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████ 100% | 0s +Apply complete! Resources: 0 created, 1 updated, 0 deleted. +``` + +## Validation + +We can verify the Kubernetes service now has the updated attributes (mapping service port 8080 to container port 80) as defined in the `ports` configuration: + +``` +kubectl get svc -n simple-service -o yaml +... + spec: + ... + ports: + - name: simple-service-dev-helloworld-private-8080-tcp + port: 8080 + protocol: TCP + targetPort: 80 +... +``` + +Exposing service port 8080: +``` +kubectl port-forward svc/simple-service-dev-helloworld-private -n simple-service 30000:8080 +``` + +Open browser and visit [http://127.0.0.1:30000](http://127.0.0.1:30000), the application should be up and running: + +![app-preview](/img/docs/user_docs/guides/working-with-k8s/app-preview.png) \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.12/5-user-guides/2-working-with-k8s/4-image-upgrade.md b/docs_versioned_docs/version-v0.12/5-user-guides/2-working-with-k8s/4-image-upgrade.md new file mode 100644 index 00000000..ccee54e0 --- /dev/null +++ b/docs_versioned_docs/version-v0.12/5-user-guides/2-working-with-k8s/4-image-upgrade.md @@ -0,0 +1,78 @@ +--- +id: image-upgrade +--- + +# Upgrade Image + +You can declare the application's container image via `image` field of the `Container` schema. + +For the full `Container` schema reference, please see [here](../../reference/modules/developer-schemas/workload/service#schema-container) for more details. + +## Pre-requisite + +Please refer to the [prerequisites](deploy-application#prerequisites) in the guide for deploying an application. + +The example below also requires you to have [initialized the project](deploy-application#initializing) using the `kusion workspace create` and `kusion init` command, which will create a workspace and also generate a [`kcl.mod` file](deploy-application#kclmod) under the stack directory. + +## Managing Workspace Configuration + +In the first guide in this series, we introduced a step to [initialize a workspace](deploy-application#initializing-workspace-configuration) with an empty configuration. The same empty configuration will still work in this guide, no changes are required there. + +## Example + +Update the image value in `simple-service/dev/main.k`: +```python +import kam.v1.app_configuration as ac + +helloworld: ac.AppConfiguration { + workload.containers.nginx: { + ... + # before: + # image = "gcr.io/google-samples/gb-frontend:v4" + # after: + image = "gcr.io/google-samples/gb-frontend:v5" + ... + } +} +``` + +Everything else in `main.k` stay the same. + +## Applying + +Re-run steps in [Applying](deploy-application#applying), update image is completed. + +``` +$ kusion apply + ✔︎ Generating Spec in the Stack dev... +Stack: dev ID Action +* ├─ v1:Namespace:simple-service UnChanged +* ├─ v1:Service:simple-service:simple-service-dev-helloworld-private UnChanged +* └─ apps/v1:Deployment:simple-service:simple-service-dev-helloworld Update + + +? Do you want to apply these diffs? yes +Start applying diffs ... + SUCCESS UnChanged v1:Namespace:simple-service, skip + SUCCESS UnChanged v1:Service:simple-service:simple-service-dev-helloworld-private, skip + SUCCESS Update apps/v1:Deployment:simple-service:simple-service-dev-helloworld success +Update apps/v1:Deployment:simple-service:simple-service-dev-helloworld success [3/3] ███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████ 100% | 0s +Apply complete! Resources: 0 created, 1 updated, 0 deleted. +``` + +## Validation + +We can verify the application container (in the deployment template) now has the updated image (v5) as defined in the container configuration: +``` +kubectl get deployment -n simple-service -o yaml +... + template: + ... + spec: + containers: + - env: + ... + image: gcr.io/google-samples/gb-frontend:v5 + ... +... +``` \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.12/5-user-guides/2-working-with-k8s/5-resource-spec.md b/docs_versioned_docs/version-v0.12/5-user-guides/2-working-with-k8s/5-resource-spec.md new file mode 100644 index 00000000..1e5f208a --- /dev/null +++ b/docs_versioned_docs/version-v0.12/5-user-guides/2-working-with-k8s/5-resource-spec.md @@ -0,0 +1,90 @@ +--- +id: resource-spec +--- + +# Configure Resource Specification + +You can manage container-level resource specification in the `AppConfiguration` model via the `resources` field (under the `Container` schema). + +For the full `Container` schema reference, please see [here](../../reference/modules/developer-schemas/workload/service#schema-container) for more details. + +## Prerequisites + +Please refer to the [prerequisites](deploy-application#prerequisites) in the guide for deploying an application. + +The example below also requires you to have [initialized the project](deploy-application#initializing) using the `kusion workspace create` and `kusion init` command, which will create a workspace and also generate a [`kcl.mod` file](deploy-application#kclmod) under the stack directory. + +## Managing Workspace Configuration + +In the first guide in this series, we introduced a step to [initialize a workspace](deploy-application#initializing-workspace-configuration) with an empty configuration. The same empty configuration will still work in this guide, no changes are required there. + +## Example + +Update the resources value in `simple-service/dev/main.k`: + +```py +import kam.v1.app_configuration as ac + +helloworld: ac.AppConfiguration { + workload.containers.helloworld: { + ... + # before: + # resources: { + # "cpu": "500m" + # "memory": "512M" + # } + # after: + resources: { + "cpu": "250m" + "memory": "256Mi" + } + ... + } +} +``` + +Everything else in `main.k` stay the same. + +## Applying + +Re-run steps in [Applying](deploy-application#applying), resource scaling is completed. + +``` +$ kusion apply + ✔︎ Generating Spec in the Stack dev... +Stack: dev ID Action +* ├─ v1:Namespace:simple-service UnChanged +* ├─ v1:Service:simple-service:simple-service-dev-helloworld-private UnChanged +* └─ apps/v1:Deployment:simple-service:simple-service-dev-helloworld Update + + +? Do you want to apply these diffs? yes +Start applying diffs ... + SUCCESS UnChanged v1:Namespace:simple-service, skip + SUCCESS UnChanged v1:Service:simple-service:simple-service-dev-helloworld-private, skip + SUCCESS Update apps/v1:Deployment:simple-service:simple-service-dev-helloworld success +Update apps/v1:Deployment:simple-service:simple-service-dev-helloworld success [3/3] ███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████ 100% | 0s +Apply complete! Resources: 0 created, 1 updated, 0 deleted. +``` + +## Validation + +We can verify the application container (in the deployment template) now has the updated resources attributes (cpu:250m, memory:256Mi) as defined in the container configuration: + +``` +kubectl get deployment -n simple-service -o yaml +... + template: + ... + spec: + containers: + - env: + ... + image: gcr.io/google-samples/gb-frontend:v5 + ... + resources: + limits: + cpu: 250m + memory: 256Mi +... +``` \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.12/5-user-guides/2-working-with-k8s/6-set-up-operational-rules.md b/docs_versioned_docs/version-v0.12/5-user-guides/2-working-with-k8s/6-set-up-operational-rules.md new file mode 100644 index 00000000..915f84c4 --- /dev/null +++ b/docs_versioned_docs/version-v0.12/5-user-guides/2-working-with-k8s/6-set-up-operational-rules.md @@ -0,0 +1,86 @@ +--- +id: set-up-operational-rules +--- + +# Set up Operational Rules + +You can set up operational rules in the `AppConfiguration` model with the `opsrule` accessory and corresponding platform configurations in the workspace directory. The `opsrule` is the collection of operational rule requirements for the application that are used as a preemptive measure to police and stop any unwanted changes. + +## Prerequisites + +Please refer to the [prerequisites](deploy-application#prerequisites) in the guide for deploying an application. + +The example below also requires you to have [initialized the project](deploy-application#initializing) using the `kusion workspace create` and `kusion init` command, which will create a workspace and also generate a [`kcl.mod` file](deploy-application#kclmod) under the stack directory. + +## Managing Workspace Configuration + +In the first guide in this series, we introduced a step to [initialize a workspace](deploy-application#initializing-workspace-configuration) with an empty configuration. The same empty configuration will still work in this guide, no changes are required there. + +However, if you (or the platform team) would like to set default values for the opsrule to standardize the behavior of applications, you can do so by updating the `~/dev.yaml`. +Note that the platform engineers should set the default workload to [Kusion Operation CollaSet](https://github.com/KusionStack/operating) and installed the Kusion Operation controllers properly, the `opsrules` module will generate a [PodTransitionRule](https://www.kusionstack.io/docs/operating/manuals/podtransitionrule) instead of updating the `maxUnavailable` value in the deployment: + +```yaml +modules: + service: + default: + type: CollaSet + kusionstack/opsrule@0.1.0: + default: + maxUnavailable: 30% +``` + +Please note that the `maxUnavailable` in the workspace configuration only works as a default value and will be overridden by the value set in the application configuration. + +The workspace configuration need to be updated with the command: + +```bash +kusion workspace update dev -f ~/dev.yaml +``` + +## Example + +Add the `opsrule` module dependency to `kcl.mod`: + +```shell +[package] +name = "simple-service" +version = "0.1.0" + +[dependencies] +kam = { git = "https://github.com/KusionStack/kam.git", tag = "0.2.0" } +service = { oci = "oci://ghcr.io/kusionstack/service", tag = "0.1.0" } +network = { oci = "oci://ghcr.io/kusionstack/network", tag = "0.2.0" } +opsrule = { oci = "oci://ghcr.io/kusionstack/opsrule", tag = "0.1.0" } + +[profile] +entries = ["main.k"] +``` + +Add the `opsrule` snippet to the `AppConfiguration` in `simple-service/dev/main.k`: + +```py +import kam.v1.app_configuration as ac +import service +import service.container as c +import opsrule + +helloworld: ac.AppConfiguration { + workload: service.Service { + ... + } + # Configure the maxUnavailable rule + accessories: { + "opsrule": opsrule.OpsRule { + "maxUnavailable": 50% + } + } +} +``` + +## Applying + +Re-run steps in [Applying](deploy-application#applying), resource scaling is completed. + +## Validation + +We can verify the application deployment strategy now has the updated attributes `maxUnavailable: 50%` in the container configuration. diff --git a/docs_versioned_docs/version-v0.12/5-user-guides/2-working-with-k8s/7-job.md b/docs_versioned_docs/version-v0.12/5-user-guides/2-working-with-k8s/7-job.md new file mode 100644 index 00000000..29a4466d --- /dev/null +++ b/docs_versioned_docs/version-v0.12/5-user-guides/2-working-with-k8s/7-job.md @@ -0,0 +1,146 @@ +--- +id: job +--- + +# Schedule a Job + +The guides above provide examples on how to configure workloads of the type `service.Service`, which is typically used for long-running web applications that should **never** go down. Alternatively, you could also schedule another kind of workload profile, namely `wl.Job` which corresponds to a one-off or recurring execution of tasks that run to completion and then stop. + +## Prerequisites + +Please refer to the [prerequisites](deploy-application#prerequisites) in the guide for scheduling a job. + +The example below also requires you to have [initialized the project](deploy-application#initializing) using the `kusion workspace create` and `kusion init` command, which will create a workspace and also generate a [`kcl.mod` file](deploy-application#kclmod) under the stack directory. + +## Managing Workspace Configuration + +In the first guide in this series, we introduced a step to [initialize a workspace](deploy-application#initializing-workspace-configuration) with an empty configuration. The same empty configuration will still work in this guide, no changes are required there. Alternatively, if you have updated your workspace config in the previous guides, no changes need to be made either. + +However, if you (or the platform team) would like to set default values for the workloads to standardize the behavior of applications in the `dev` workspace, you can do so by updating the `~/dev.yaml`: + +```yaml +modules: + service: + default: + replicas: 3 + labels: + label-key: label-value + annotations: + annotation-key: annotation-value +``` + +Please note that the `replicas` in the workspace configuration only works as a default value and will be overridden by the value set in the application configuration. + +The workspace configuration need to be updated with the command: + +```bash +kusion workspace update dev -f ~/dev.yaml +``` + +For a full reference of what can be configured in the workspace level, please see the [workspace reference](../../reference/modules/workspace-configs/workload/job). + +## Example + +To schedule a job with cron expression, update `simple-service/dev/kcl.mod` and `simple-service/dev/main.k` to the following: + +`simple-service/dev/kcl.mod`: +```py +[package] +name = "simple-service" +version = "0.1.0" + +[dependencies] +kam = { git = "https://github.com/KusionStack/kam.git", tag = "0.2.0" } +job = { oci = "oci://ghcr.io/kusionstack/job", tag = "0.1.0" } +network = { oci = "oci://ghcr.io/kusionstack/network", tag = "0.2.0" } + +[profile] +entries = ["main.k"] +``` + +`simple-service/dev/main.k`: +```py +import kam.v1.app_configuration as ac +import job +import job.container as c + +helloworld: ac.AppConfiguration { + workload: job.Job { + containers: { + "busybox": c.Container { + # The target image + image: "busybox:1.28" + # Run the following command as defined + command: ["/bin/sh", "-c", "echo hello"] + } + } + # Run every minute. + schedule: "* * * * *" + } +} +``` + +The KCL snippet above schedules a job. Alternatively, if you want a one-time job without cron, simply remove the `schedule` from the configuration. + +You can find the full example in here in the [konfig repo](https://github.com/KusionStack/konfig/tree/main/example/simple-job). + +## Applying + +Re-run steps in [Applying](deploy-application#applying) and schedule the job. Your output might look like one of the following: + +If you are starting from scratch, all resources are created on the spot: + +``` +$ kusion apply + ✔︎ Generating Spec in the Stack dev... +Stack: dev ID Action +* ├─ v1:Namespace:simple-service Create +* └─ batch/v1:CronJob:simple-service:simple-service-dev-helloworld Create + + +? Do you want to apply these diffs? yes +Start applying diffs ... + SUCCESS Create v1:Namespace:simple-service success + SUCCESS Create batch/v1:CronJob:simple-service:helloworld-dev-helloworld success +Create batch/v1:CronJob:simple-service:simple-service-dev-helloworld success [2/2] ██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████ 100% | 0s +Apply complete! Resources: 2 created, 0 updated, 0 deleted. +``` + +If you are starting from the last guide which configures an `opsrule`, the output looks like the following which destroys the `Deployment` and `Service` and replace it with a `CronJob`: + +``` +$ kusion apply + ✔︎ Generating Spec in the Stack dev... +Stack: dev ID Action +* ├─ v1:Namespace:simple-service UnChanged +* ├─ batch/v1:CronJob:simple-service:simple-service-dev-helloworld Create +* ├─ apps/v1:Deployment:simple-service:simple-service-dev-helloworld Delete +* └─ v1:Service:simple-service:simple-service-dev-helloworld-private Delete + + +? Do you want to apply these diffs? yes +Start applying diffs ... + SUCCESS UnChanged v1:Namespace:simple-service, skip + SUCCESS Delete apps/v1:Deployment:simple-service:simple-service-dev-helloworld success + SUCCESS Create batch/v1:CronJob:simple-service:simple-service-dev-helloworld success + SUCCESS Delete v1:Service:simple-service:simple-service-dev-helloworld-private success +Delete v1:Service:simple-service:simple-service-dev-helloworld-private success [4/4] ███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████ 100% | 0s +Apply complete! Resources: 1 created, 0 updated, 2 deleted. +``` + +## Validation + +We can verify the job has now been scheduled: + +```shell +$ kubectl get cronjob -n simple-service +NAME SCHEDULE SUSPEND ACTIVE LAST SCHEDULE AGE +simple-service-dev-helloworld * * * * * False 0 2m18s +``` + +Verify the job has been triggered after the minute mark since we scheduled it to run every minute: +```shell +$ kubectl get job -n simple-service +NAME COMPLETIONS DURATION AGE +simple-service-dev-helloworld-28415748 1/1 5s 11s +``` diff --git a/docs_versioned_docs/version-v0.12/5-user-guides/2-working-with-k8s/8-k8s-manifest.md b/docs_versioned_docs/version-v0.12/5-user-guides/2-working-with-k8s/8-k8s-manifest.md new file mode 100644 index 00000000..b706c71a --- /dev/null +++ b/docs_versioned_docs/version-v0.12/5-user-guides/2-working-with-k8s/8-k8s-manifest.md @@ -0,0 +1,208 @@ +--- +id: k8s-manifest +--- + +# Apply the Raw K8s Manifest YAML + +The guides above provide examples on how to configure workloads and accessories with KCL, and generate the related Kubernetes resources with Kusion Module generators, which is the usage method we recommend, as it can achieve the separation of concerns between developers and platform engineers, reducing the cognitive burden on developers. + +However, in some specific scenario, users may also have the need to directly use Kusion to apply and manage the raw Kubernetes manifest YAML files, such as taking over some existing resources and deploying CRD (CustomResourceDefinition), or other special resources. + +To help users directly apply raw K8s manifests, the KusionStack community has provided the [k8s_manifest](../../6-reference/2-modules/1-developer-schemas/k8s_manifest/k8s_manifest.md) Kusion Module. + +:::info +The module definition and implementation, as well as the example can be found at [here](https://github.com/KusionStack/catalog/tree/main/modules/k8s_manifest). +::: + +## Prerequisites + +Please refer to the [prerequisites](deploy-application#prerequisites) in the guide for deploying an application. + +The example below also requires you to have [initialized the project](deploy-application#initializing) using the `kusion workspace create`, `kusion project create`, `kusion stack create` command, which will create a workspace and project, and also generate a [kcl.mod](deploy-application#kclmod) file under the stack directory. + +## Managing Workspace Configuration + +In the first guide in this series, we introduced a step to [initialize a workspace](deploy-application#initializing-workspace-configuration) with an empty configuration. The same empty configuration will still work in this guide, no changes are required there. Alternatively, if you have updated your workspace config in the previous guides, no changes need to be made either. + +However, if you (or the platform team) would like to set some default paths for the raw K8s manifest YAML files to standardize the behavior of applications in the `dev` workspace, you can do so by updating the `dev.yaml` with the following config block: + +```yaml +modules: + k8s_manifest: + path: oci://ghcr.io/kusionstack/k8s_manifest + version: 0.1.0 + configs: + default: + # The default paths to apply for the raw K8s manifest YAML files. + paths: + - /path/to/k8s_manifest.yaml + - /dir/to/k8s_manifest/ +``` + +Please note that the `paths` decalred by the platform engineers in the workspace configs will be merged with the ones declared by the developers in the `AppConfiguration` in `main.k`. + +The workspace configuration needs to be updated with the command: + +```bash +kusion workspace update dev -f dev.yaml +``` + +## Example + +To apply the specified raw K8s manifest YAML files with `k8s_manifest` module, please use the `v0.2.1` version of `kam`, whose `workload` is no longer a required field in the `AppConfiguration` model. An example is shown below: + +`kcl.mod`: +```py +[dependencies] +kam = { git = "https://github.com/KusionStack/kam.git", tag = "v0.2.1" } +k8s_manifest = { oci = "oci://ghcr.io/kusionstack/k8s_manifest", tag = "0.1.0" } +``` + +`stack.yaml`: +```yaml +# Generate a specified namespace +name: dev +extensions: + - kind: kubernetesNamespace + kubernetesNamespace: + namespace: test +``` + +`main.k`: +```py +import kam.v1.app_configuration as ac +import k8s_manifest + +test: ac.AppConfiguration { + accessories: { + "k8s_manifests": k8s_manifest.K8sManifest { + paths: [ + # The `test.yaml` should be placed under the stack directory, + # as it is declared using a relative path. + "./test.yaml" + ] + } + } +} +``` + +`test.yaml`: +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-deployment + namespace: test + labels: + app: nginx +spec: + replicas: 3 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx:1.14.2 + ports: + - containerPort: 80 +``` + +## Generate and Applying + +Execute the `kusion generate` command, the `Deployment` in the `test.yaml` will be generated into a Kusion `Resource` with a Kusion ID in the `Spec`. + +``` +➜ dev git:(main) ✗ kusion generate + ✔︎ Generating Spec in the Stack dev... +resources: + - id: v1:Namespace:test + type: Kubernetes + attributes: + apiVersion: v1 + kind: Namespace + metadata: + creationTimestamp: null + name: test + spec: {} + status: {} + extensions: + GVK: /v1, Kind=Namespace + - id: apps/v1:Deployment:test:nginx-deployment + type: Kubernetes + attributes: + apiVersion: apps/v1 + kind: Deployment + metadata: + labels: + app: nginx + name: nginx-deployment + namespace: test + spec: + replicas: 3 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - image: nginx:1.14.2 + name: nginx + ports: + - containerPort: 80 + dependsOn: + - v1:Namespace:test +secretStore: null +context: {} +``` + +Execute the `kusion apply` command, you may get the output like the following: + +``` +➜ dev git:(main) ✗ kusion apply + ✔︎ Generating Spec in the Stack dev... +Stack: dev +ID Action +v1:Namespace:test Create +apps/v1:Deployment:test:nginx-deployment Create + + +Do you want to apply these diffs?: + > yes + +Start applying diffs ... + ✔︎ Succeeded v1:Namespace:test + ✔︎ Succeeded apps/v1:Deployment:test:nginx-deployment +Apply complete! Resources: 2 created, 0 updated, 0 deleted. + +[v1:Namespace:test] +Type Kind Name Detail +READY Namespace test Phase: Active +[apps/v1:Deployment:test:nginx-deployment] +Type Kind Name Detail +READY Deployment nginx-deployment Ready: 3/3, Up-to-date: 3, Available: 3 +READY ReplicaSet nginx-deployment-7fb96c846b Desired: 3, Current: 3, Ready: 3 +READY Pod nginx-deployment-7fb96c846b-d9pp4 Ready: 1/1, Status: Running, Restart: 0, Age: 2s +``` + +## Validation + +We can verify the `Deployment` and `Pod` we have just applied: + +```shell +➜ dev git:(main) ✗ kubectl get deployment -n test +NAME READY UP-TO-DATE AVAILABLE AGE +nginx-deployment 3/3 3 3 70s +➜ dev git:(main) ✗ kubectl get pod -n test +NAME READY STATUS RESTARTS AGE +nginx-deployment-7fb96c846b-d9pp4 1/1 Running 0 87s +nginx-deployment-7fb96c846b-j45nt 1/1 Running 0 87s +nginx-deployment-7fb96c846b-tnz5f 1/1 Running 0 87s +``` diff --git a/docs_versioned_docs/version-v0.12/5-user-guides/2-working-with-k8s/_category_.json b/docs_versioned_docs/version-v0.12/5-user-guides/2-working-with-k8s/_category_.json new file mode 100644 index 00000000..79d3c6c5 --- /dev/null +++ b/docs_versioned_docs/version-v0.12/5-user-guides/2-working-with-k8s/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Kubernetes" +} diff --git a/docs_versioned_docs/version-v0.12/5-user-guides/3-observability/1-prometheus.md b/docs_versioned_docs/version-v0.12/5-user-guides/3-observability/1-prometheus.md new file mode 100644 index 00000000..d67141de --- /dev/null +++ b/docs_versioned_docs/version-v0.12/5-user-guides/3-observability/1-prometheus.md @@ -0,0 +1,327 @@ +--- +id: prometheus +--- + +# Configure Monitoring Behavior With Prometheus + +This document provides the step-by-step instruction to set up monitoring for your application. + +As of today, Kusion supports the configuration of Prometheus scraping behaviors for the target application. In the future, we will add more cloud-provider-native solutions, such as AWS CloudWatch, Azure Monitor, etc. + +The user guide below is composed of the following components: + +- Namespace +- Deployment +- Service +- ServiceMonitor + +:::tip + +This guide requires you to have a basic understanding of Kubernetes and Prometheus. +If you are not familiar with the relevant concepts, please refer to the links below: + +- [Learn Kubernetes Basics](https://kubernetes.io/docs/tutorials/kubernetes-basics/) +- [Prometheus Introduction](https://prometheus.io/docs/introduction/overview/) +::: + +## Pre-requisite +Please refer to the [prerequisites](../working-with-k8s/deploy-application#prerequisites) in the guide for deploying an application. + +The example below also requires you to have [initialized the project](../working-with-k8s/deploy-application#initializing) using the `kusion init` command, which will generate a [`kcl.mod` file](../working-with-k8s/deploy-application#kclmod) under the project directory. + +## Setting up your own Prometheus + +There a quite a few ways to set up Prometheus in your cluster: +1. Installing a Prometheus operator +2. Installing a standalone Prometheus server +3. Installing a Prometheus agent and connect to a remote Prometheus server + +[The advice from the Prometheus team](https://github.com/prometheus-operator/prometheus-operator/issues/1547#issuecomment-401092041) is to use the `ServiceMonitor` or `PodMonitor` CRs via the Prometheus operator to manage scrape configs going forward[2]. + +In either case, you only have to do this setup once per cluster. This doc will use a minikube cluster and Prometheus operator as an example. + +### Installing Prometheus operator[3]. +To get the example in this user guide working, all you need is a running Prometheus operator. You can have that installed by running: +``` +LATEST=$(curl -s https://api.github.com/repos/prometheus-operator/prometheus-operator/releases/latest | jq -cr .tag_name) +curl -sL https://github.com/prometheus-operator/prometheus-operator/releases/download/${LATEST}/bundle.yaml | kubectl create -f - +``` + +This will install all the necessary CRDs and the Prometheus operator itself in the default namespace. Wait a few minutes, you can confirm the operator is up by running: +``` +kubectl wait --for=condition=Ready pods -l app.kubernetes.io/name=prometheus-operator -n default +``` + +### Make sure RBAC is properly set up +If you have RBAC enabled on the cluster, the following must be created for Prometheus to work properly: +``` +apiVersion: v1 +kind: ServiceAccount +metadata: + name: prometheus +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: prometheus +rules: +- apiGroups: [""] + resources: + - nodes + - nodes/metrics + - services + - endpoints + - pods + verbs: ["get", "list", "watch"] +- apiGroups: [""] + resources: + - configmaps + verbs: ["get"] +- apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: ["get", "list", "watch"] +- nonResourceURLs: ["/metrics"] + verbs: ["get"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: prometheus +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: prometheus +subjects: +- kind: ServiceAccount + name: prometheus + namespace: default +``` + +### Configure Prometheus instance via the operator +Once all of the above is set up, you can then configure the Prometheus instance via the operator: +``` +apiVersion: monitoring.coreos.com/v1 +kind: Prometheus +metadata: + name: prometheus +spec: + serviceAccountName: prometheus + serviceMonitorNamespaceSelector: {} + serviceMonitorSelector: {} + podMonitorNamespaceSelector: {} + podMonitorSelector: {} + resources: + requests: + memory: 400Mi +``` +This Prometheus instance above will be cluster-wide, picking up ALL the service monitors and pod monitors across ALL the namespaces. +You can adjust the requests and limits accordingly if you have a larger cluster. + +### Exposing the Prometheus portal (optional) +Once you have the managed Prometheus instance created via the Prometheus CR above, you should be able to see a service created called `prometheus-operated`: + +![prometheus-operated](/img/docs/user_docs/guides/prometheus/prometheus-operated.png) + +If you are also running on minikube, you can expose it onto your localhost via kubectl: +``` +kubectl port-forward svc/prometheus-operated 9099:9090 +``` + +You should then be able to see the Prometheus portal via `localhost:9099` in your browser: + +![prometheus-portal](/img/docs/user_docs/guides/prometheus/prometheus-portal.png) + +If you are running a non-local cluster, you can try to expose it via another way, through an ingress controller for example. + +## Setting up workspace configs + +Since v0.10.0, we have introduced the concept of [workspaces](../../3-concepts/4-workspace.md), whose configurations represent the part of the application behaviors that platform teams are interested in standardizing, or the ones to eliminate from developer's mind to make their lives easier. + +In the case of setting up Prometheus, there are a few things to set up on the workspace level: + +### Operator mode + +The `operatorMode` flag indicates to Kusion whether the Prometheus instance installed in the cluster runs as a Kubernetes operator or not. This determines the different kinds of resources Kusion manages. + +To see more about different ways to run Prometheus in the Kubernetes cluster, please refer to the [design documentation](https://github.com/KusionStack/kusion/blob/main/docs/prometheus.md#prometheus-installation). + +Most cloud vendors provide an out-of-the-box monitoring solutions for workloads running in a managed-Kubernetes cluster (EKS, AKS, etc), such as AWS CloudWatch, Azure Monitor, etc. These solutions mostly involve installing an agent (CloudWatch Agent, OMS Agent, etc) in the cluster and collecting the metrics to a centralized monitoring server. In those cases, you don't need to set `operatorMode` to `True`. It only needs to be set to `True` when you have an installation of the [Prometheus operator](https://github.com/prometheus-operator/prometheus-operator) running inside the Kubernetes cluster. + +:::info + +For differences between [Prometheus operator](https://github.com/prometheus-operator/prometheus-operator), [kube-prometheus](https://github.com/prometheus-operator/kube-prometheus) and the [community kube-prometheus-stack helm chart](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack), the details are documented [here](https://github.com/prometheus-operator/prometheus-operator#prometheus-operator-vs-kube-prometheus-vs-community-helm-chart). +::: + +### Monitor types + +The `monitorType` flag indicates the kind of monitor Kusion will create. It only applies when `operatorMode` is set to `True`. As of version 0.10.0, Kusion provides options to scrape metrics from either the application pods or its corresponding Kubernetes services. This determines the different kinds of resources Kusion manages when Prometheus runs as an operator in the target cluster. + +A sample `workspace.yaml` with Prometheus settings: +```yaml +modules: + ... + kusionstack/monitoring@0.1.0: + default: + operatorMode: True + monitorType: Service + scheme: http + interval: 30s + timeout: 15s +... +``` + +To instruct Prometheus to scrape from pod targets instead: +```yaml +modules: + ... + kusionstack/monitoring@0.1.0: + default: + operatorMode: True + monitorType: Pod + scheme: http + interval: 30s + timeout: 15s +... +``` + +If the `operatorMode` is omitted from the `workspace.yaml`, Kusion defaults `operatorMode` to false. + +### Overriding with projectSelector + +Workspace configurations contain a set of default setting group for all projects in the workspace, with means to override them by Projects using a `projectSelector` keyword. + +Projects with the name matching those in projectSelector will use the values defined in that override group instead of the default. If a key is not present in the override group, the default value will be used. + +Take a look at the sample `workspace.yaml`: +```yaml +modules: + ... + kusionstack/monitoring@0.1.0: + default: + operatorMode: True + monitorType: Pod + scheme: http + interval: 30s + timeout: 15s + low_frequency: + operatorMode: False + interval: 2m + projectSelector: + - foobar + high_frequency: + monitorType: Service + projectSelector: + - helloworld +... +``` + +In the example above, a project with the name `helloworld` will have the monitoring settings where `operatorMode` is set to `False`, a 2 minute scraping interval, 15 seconds timeout (coming from default) and http scheme (coming from default). + +You cannot have the same project appear in two projectSelectors. + +For a full reference of what can be configured in the workspace level, please see the [workspace reference](../../reference/modules/workspace-configs/monitoring/prometheus). + +## Updating the workspace config + +Assuming you now have a `workspace.yaml` that looks like the following: +```yaml +modules: + kusionstack/monitoring@0.1.0: + default: + operatorMode: True + monitorType: Service + scheme: http + interval: 30s + timeout: 15s +... +``` + +Update the workspace configuration by running the following command: +``` +kusion workspace update dev -f workspace.yaml +``` +Verify the workspace config is properly updated by running the command: +``` +kusion workspace show dev +``` + +## Using kusion to deploy your application with monitoring requirements + +At this point we are set up for good! Any new applications you deploy via kusion will now automatically have the monitoring-related resources created, should you declare you want it via the `monitoring` field in the `AppConfiguration` model. + +The monitoring in an AppConfiguration is declared in the `monitoring` field. See the example below for a full, deployable AppConfiguration. + +Please note we are using a new image `quay.io/brancz/prometheus-example-app` since the app itself need to expose metrics for Prometheus to scrape: + +`helloworld/dev/kcl.mod`: +``` +[package] +name = "helloworld" + +[dependencies] +monitoring = { oci = "oci://ghcr.io/kusionstack/monitoring", tag = "0.2.0" } +kam = { git = "https://github.com/KusionStack/kam.git", tag = "0.2.0" } +service = { oci = "oci://ghcr.io/kusionstack/service", tag = "0.1.0" } + +[profile] +entries = ["main.k"] +``` + +`helloworld/dev/main.k`: +``` +import kam.v1.app_configuration as ac +import service +import service.container as c +import monitoring as m +import network.network as n + +helloworld: ac.AppConfiguration { + workload: service.Service { + containers: { + "monitoring-sample-app": c.Container { + image: "quay.io/brancz/prometheus-example-app:v0.3.0" + } + } + } + # Add the monitoring configuration backed by Prometheus + accessories: { + "monitoring": m.Prometheus { + path: "/metrics" + } + "network": n.Network { + ports: [ + n.Port { + port: 8080 + } + ] + } + } +} +``` + +The KCL file above represents an application with a service type workload, exposing the port 8080, and would like Prometheus to scrape the `/metrics` endpoint every 2 minutes. + +Running `kusion apply` would show that kusion will create a `Namespace`, a `Deployment`, a `Service` and a `ServiceMonitor`: +![kusion-apply-with-monitor](/img/docs/user_docs/guides/prometheus/kusion-apply-with-monitor.png) + +Continue applying all resources: +![kusion-apply-success](/img/docs/user_docs/guides/prometheus/kusion-apply-success.png) + +If we want to, we can verify the service monitor has been created successfully: +![service-monitor](/img/docs/user_docs/guides/prometheus/service-monitor.png) + +In a few seconds, you should be able to see in the Prometheus portal that the service we just deployed has now been discovered and monitored by Prometheus: +![prometheus-targets](/img/docs/user_docs/guides/prometheus/prometheus-targets.png) + +You can run a few simply queries for the data that Prometheus scraped from your application: +![prometheus-simple-query](/img/docs/user_docs/guides/prometheus/prometheus-simple-query.png) + +For more info about PromQL, you can find them [here](https://prometheus.io/docs/prometheus/latest/querying/basics/)[4]. + +## References +1. Prometheus: https://prometheus.io/docs/introduction/overview/ +2. Prometheus team advise: https://github.com/prometheus-operator/prometheus-operator/issues/1547#issuecomment-446691500 +3. Prometheus operator getting started doc: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/user-guides/getting-started.md +4. PromQL basics: https://prometheus.io/docs/prometheus/latest/querying/basics/ \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.12/5-user-guides/3-observability/_category_.json b/docs_versioned_docs/version-v0.12/5-user-guides/3-observability/_category_.json new file mode 100644 index 00000000..b061ae3e --- /dev/null +++ b/docs_versioned_docs/version-v0.12/5-user-guides/3-observability/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Automated Observability" +} diff --git a/docs_versioned_docs/version-v0.12/5-user-guides/4-secrets-management/1-using-cloud-secrets.md b/docs_versioned_docs/version-v0.12/5-user-guides/4-secrets-management/1-using-cloud-secrets.md new file mode 100644 index 00000000..56a8ccb8 --- /dev/null +++ b/docs_versioned_docs/version-v0.12/5-user-guides/4-secrets-management/1-using-cloud-secrets.md @@ -0,0 +1,97 @@ +--- +id: using-cloud-secrets +--- + +# Using Cloud Secrets Manager + +Applications usually store sensitive data in secrets by using centralized secrets management solutions. For example, you authenticate databases, services, and external systems with passwords, API keys, tokens, and other credentials stored in a secret store, e.g. Hashicorp Vault, AWS Secrets Manager, Azure Key Vault, etc + +Kusion provides out-of-the-box support to reference existing external secrets management solution, this tutorial introduces that how to pull the secret from AWS Secrets Manager to make it available to applications. + +## Prerequisites + +Please refer to the [prerequisites](../working-with-k8s/deploy-application#prerequisites) in the guide for deploying an application. + +The example below also requires you to have [initialized the project](../working-with-k8s/deploy-application#initializing) using the `kusion init` command, which will generate a [`kcl.mod` file](../working-with-k8s/deploy-application#kclmod) under the project directory. + +Additionally, you also need to configure the obtained AccessKey and SecretKey as environment variables: + +```bash +export AWS_ACCESS_KEY_ID="AKIAQZDxxxx" # replace it with your AccessKey +export AWS_SECRET_ACCESS_KEY="oE/xxxx" # replace it with your SecretKey +``` + +![aws iam account](/img/docs/user_docs/getting-started/aws-iam-account.png) + +## Setting up workspace + +Since v0.10.0, we have introduced the concept of [workspaces](../../3-concepts/4-workspace.md), whose configurations represent the part of the application behaviors that platform teams are interested in standardizing, or the ones to eliminate from developer's mind to make their lives easier. + +In the case of setting up cloud secrets manager, platform teams need to specify which secrets management solution to use and necessary information to access on the workspace level. + +A sample `workspace.yaml` with AWS Secrets Manager settings: + +``` +modules: + ... +secretStore: + provider: + aws: + region: us-east-1 + profile: The optional profile to be used to interact with AWS Secrets Manager. +... +``` + +## Update AppConfiguration + +At this point we are set up for good! Now you can declare external type of secrets via the `secrets` field in the `AppConfiguration` model to consume sensitive data stored in AWS Secrets Manager. + +See the example below for a full, deployable AppConfiguration. + +``` +import kam.v1.app_configuration as ac +import service +import service.container as c +import service.secret as sec + +gitsync: ac.AppConfiguration { + workload: service.Service { + containers: { + "syncer": c.Container { + image: "dyrnq/git-sync" + # Run the following command as defined + command: [ + "--repo=https://github.com/KusionStack/kusion" + "--ref=HEAD" + "--root=/mnt/git" + ] + # Consume secrets in environment variables + env: { + "GIT_SYNC_USERNAME": "secret://git-auth/username" + "GIT_SYNC_PASSWORD": "secret://git-auth/password" + } + } + } + # Secrets used to retrieve secret data from AWS Secrets Manager + secrets: { + "git-auth": sec.Secret { + type: "external" + data: { + "username": "ref://git-auth-info/username" + "password": "ref://git-auth-info/password" + } + } + } + } +} +``` + +## Apply and Verify + +Run `kusion apply` command to deploy above application, then use the below command to verify if the secret got deployed: + +``` +kubectl get secret -n secretdemo +``` + +You will find `git-auth` of type Opaque automatically created and contains sensitive information pulled from AWS Secrets Manager. \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.12/5-user-guides/4-secrets-management/_category_.json b/docs_versioned_docs/version-v0.12/5-user-guides/4-secrets-management/_category_.json new file mode 100644 index 00000000..8990c11b --- /dev/null +++ b/docs_versioned_docs/version-v0.12/5-user-guides/4-secrets-management/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Secrets Management" +} diff --git a/docs_versioned_docs/version-v0.12/5-user-guides/5-production-practice-case/1-production-practice-case.md b/docs_versioned_docs/version-v0.12/5-user-guides/5-production-practice-case/1-production-practice-case.md new file mode 100644 index 00000000..52d5f07e --- /dev/null +++ b/docs_versioned_docs/version-v0.12/5-user-guides/5-production-practice-case/1-production-practice-case.md @@ -0,0 +1,190 @@ +--- +id: collaborate-with-github-actions +--- + +# Achieving Team Collaboration in Production Practice with GitHub Actions + +In this article, we will introduce how to use Kusion CLI in combination with GitHub Actions to achieve team collaboration in production practice. + +Adopting the concept of separation of concerns, we divide the staff involved in application delivery and operation into two groups: **Platform Engineers (PEs)** and **Developers (Devs)**. As the builders of the Internal Developer Platform (IDP), platform engineers are primarily responsible for creating the [storage backend](../../3-concepts/7-backend.md) for the Kusion CLI in team collaborative scenarios (e.g. AWS S3 or Alicloud OSS), developing custom reusable [Kusion modules](../../3-concepts/3-module/1-overview.md), and creating and maintaining standardized platform configurations in [workspace](../../3-concepts/4-workspace.md). While application developers can focus on writing the application business logic and the configuration codes, self-serving the application delivery and operation by triggering the automated CI/CD pipelines. [GitHub Actions](https://github.com/features/actions) is such a CI/CD platform, and by customizing [GitHub Actions workflow](https://docs.github.com/en/actions/using-workflows), the pipeline such as building, testing, and deploying will be executed automatically. + +In the following sections, we will demonstrate the specific workflow from the perspectives of both PEs and Devs with the sample workflows from our [konfg](https://github.com/KusionStack/konfig) and [catalog](https://github.com/KusionStack/catalog) repository. + +## Perspective of PE + +### Setup Kusion Storage Backend + +In order to enable multiple people to collaboratively edit and modify application configuration code within a team, PEs need to create a centralized remote storage backend for Kusion CLI, such as [AWS S3](https://aws.amazon.com/pm/serv-s3/) or [Alicloud OSS](https://www.alibabacloud.com/en/product/object-storage-service). Below is an example OSS bucket, we can see that it is mainly used to store application **releases** and **workspace** configurations. + +![alicloud oss bucket for storage backend](/img/docs/user_docs/guides/github-actions/alicloud_oss_storage_backend.png) + +Suppose PEs have set up the Alicloud OSS storage backend and get the AK/SK with the permission to read and write the bucket, they can use the following commands to set up the remote storage backend. + +```shell +# please replace the env with actual AK/SK +export OSS_ACCESS_KEY_ID=LTAxxxxxxxxxxxxxx +export OSS_ACCESS_KEY_SECRET=uUPxxxxxxxxxx + +# set up backend +kusion config set backends.oss_test '{"type":"oss","configs":{"bucket":"kusion-test","endpoint":"oss-cn-shanghai.aliyuncs.com"}}' +kusion config set backends.current oss_test +``` + +### Develop Customized Kusion Modules + +In the production practice of an enterprise, a common scenario is that PEs need to abstract and encapsulate the on-premises infrastructural computing, storage and networking resources to reduce the cognitive burden of the developers. And they can develop customized Kusion modules, a kind of reusable building blocks to achieve this goal. Below shows an example [GitHub Actions workflow](https://github.com/KusionStack/catalog/actions/runs/9398478367/job/25883893076) for pushing the module artifacts provided by KusionStack Official with multiple os/arch to [GitHub Packages](https://github.com/features/packages). + +![upload kusion modules through github actions](/img/docs/user_docs/guides/github-actions/upload_modules.png) + +### Create and Update Workspace + +Moreover, PEs also need to create and update the workspace configurations, where they can declare the Kusion modules available in the workspace, and add some standardized default or application-specific configurations across the entire scope of the workspace. + +Suppose PEs have set up the remote storage backend, they can use the following commands to create and update workspace. + +```shell +# create workspace with the name of 'dev' +kusion workspace create dev + +# update workspace with 'dev.yaml' +kusion workspace update dev -f dev.yaml + +# switch to the 'dev' workspace +kusion workspace switch dev +``` + +```yaml +# dev.yaml declares 'mysql' and 'network' modules in the workspace +modules: + mysql: + path: oci://ghcr.io/kusionstack/mysql + version: 0.2.0 + network: + path: oci://ghcr.io/kusionstack/network + version: 0.2.0 +``` + +So far, PE has almost completed the fundamental work for setting up the IDP. + +## Perspective of Dev + +### Setup Kusion Storage Backend + +In order to get the available modules of the workspace and validate the generated [spec](../../3-concepts/6-spec.md), developers need to communicate with PEs to obtain the AK/SK (usually with **Read-Only** permission), bucket name, and the endpoint to access the remote storage backend. And similar to the PEs, developers can set up the backend configs with the following commands. + +```shell +# please replace the env with actual AK/SK +export OSS_ACCESS_KEY_ID=LTAxxxxxxxxxxxxxx +export OSS_ACCESS_KEY_SECRET=uUPxxxxxxxxxx + +# set up backend +kusion config set backends.oss_test '{"type":"oss","configs":{"bucket":"kusion-test","endpoint":"oss-cn-shanghai.aliyuncs.com"}}' +kusion config set backends.current oss_test +``` + +### Create and Update Project and Stack + +Next, developers can create and update the [Project](../../3-concepts/1-project/1-overview.md) and [Stack](../../3-concepts/2-stack/1-overview.md) configurations with `kusion project` and `kusion stack` command. + +```shell +# create a new project named quickstart +mkdir quickstart && cd quickstart +kusion project create + +# create a stack named dev +kusion stack create dev +``` + +Below shows the initiated project and stack contents. + +```yaml +# quickstart/project.yaml +name: quickstart +``` + +```yaml +# quickstart/dev/stack.yaml +# The metadata information of the stack. +name: dev +``` + +```python +# kcl.mod +# Please add the modules you need in 'dependencies'. +[dependencies] +kam = { git = "https://github.com/KusionStack/kam.git", tag = "0.2.0" } +service = {oci = "oci://ghcr.io/kusionstack/service", tag = "0.1.0" } +``` + +```python +# main.k +# The configuration codes in perspective of developers. +import kam.v1.app_configuration as ac +import service +import service.container as c + +# Please replace the ${APPLICATION_NAME} with the name of your application, and complete the +# 'AppConfiguration' instance with your own workload and accessories. +${APPLICATION_NAME}: ac.AppConfiguration { + workload: service.Service { + containers: { + + } + } + accessories: { + + } +} +``` + +Developers can use `kusion mod list` to get the available modules in current workspace and use `kusion mod add` to add a specified module to current stack. + +```shell +# list the available modules in the current workspace +➜ kusion mod list +Name Version URL +mysql 0.2.0 oci://ghcr.io/kusionstack/mysql +network 0.2.0 oci://ghcr.io/kusionstack/network +``` + +```shell +# add the specified modules to the current stack +kusion mod add mysql && kusion mod add network +``` + +The corresponding module artifacts will be downloaded and the declaration of the modules will be added to `kcl.mod`, which can be compared to `go mod tidy` and `go.mod`. + +```python +# kcl.mod after executing 'kusion mod add' +[package] + +[dependencies] +kam = { git = "https://github.com/KusionStack/kam.git", tag = "0.2.0" } +service = { oci = "oci://ghcr.io/kusionstack/service", tag = "0.1.0" } +mysql = { oci = "oci://ghcr.io/kusionstack/mysql", tag = "0.2.0" } +network = { oci = "oci://ghcr.io/kusionstack/network", tag = "0.2.0" } +``` + +After this, developers can edit the application configuration codes according to the actual needs. + +### Trigger Preview and Apply Pipeline + +[KusionStack/konfig](https://github.com/KusionStack/konfig) is the official example repository, and provides a set of GitHub Actions workflows [preview.yml](https://github.com/KusionStack/konfig/blob/main/.github/workflows/preview.yml) and [apply.yml](https://github.com/KusionStack/konfig/blob/main/.github/workflows/apply.yml). The `preview.yml` is triggered by a pull request to the main branch, while `apply.yml` is triggered by a push to the main branch. + +![preview workflow](/img/docs/user_docs/guides/github-actions/github_actions_preview.png) + +![apply workflow](/img/docs/user_docs/guides/github-actions/github_actions_apply.png) + +The previewing workflow will first get the changed projects and stacks. + +![get changed projects and stacks](/img/docs/user_docs/guides/github-actions/github_actions_get_changed_projects_stacks.png) + +Then the previewing workflow will execute the `kusion preview` command to all of the changed stacks, and open an issue for manual approval to merge the changes after the approvers check the preview result artifact. + +![preview workflow details](/img/docs/user_docs/guides/github-actions/github_actions_preview_details.png) + +![mannual approval](/img/docs/user_docs/guides/github-actions/github_actions_mannual_approval.png) + +Once the code review is completed and the pull request is merged into the main branch, it will trigger the apply workflow, which will deploy the changes to the affected Projects and Stacks, and upload the respective results to the GitHub Actions Artifacts. + +![apply workflow details](/img/docs/user_docs/guides/github-actions/github_actions_apply_details.png) \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.12/5-user-guides/5-production-practice-case/_category_.json b/docs_versioned_docs/version-v0.12/5-user-guides/5-production-practice-case/_category_.json new file mode 100644 index 00000000..2b76a644 --- /dev/null +++ b/docs_versioned_docs/version-v0.12/5-user-guides/5-production-practice-case/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Production Practice Case" +} diff --git a/docs_versioned_docs/version-v0.12/5-user-guides/_category_.json b/docs_versioned_docs/version-v0.12/5-user-guides/_category_.json new file mode 100644 index 00000000..abf4c874 --- /dev/null +++ b/docs_versioned_docs/version-v0.12/5-user-guides/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "User Guides" +} diff --git a/docs_versioned_docs/version-v0.12/6-reference/1-commands/_category_.json b/docs_versioned_docs/version-v0.12/6-reference/1-commands/_category_.json new file mode 100644 index 00000000..d783ca2e --- /dev/null +++ b/docs_versioned_docs/version-v0.12/6-reference/1-commands/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Kusion Commands" +} diff --git a/docs_versioned_docs/version-v0.12/6-reference/1-commands/index.md b/docs_versioned_docs/version-v0.12/6-reference/1-commands/index.md new file mode 100644 index 00000000..55ecc938 --- /dev/null +++ b/docs_versioned_docs/version-v0.12/6-reference/1-commands/index.md @@ -0,0 +1,38 @@ +# Kusion Commands + +Kusion is the Platform Orchestrator of Internal Developer Platform + +Find more information at: https://www.kusionstack.io + +### Synopsis + +As a Platform Orchestrator, Kusion delivers user intentions to Kubernetes, Clouds and On-Premise resources. Also enables asynchronous cooperation between the development and the platform team and drives the separation of concerns. + +``` +kusion [flags] +``` + +### Options + +``` + -h, --help help for kusion + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion apply](kusion-apply.md) - Apply the operational intent of various resources to multiple runtimes +* [kusion config](kusion-config.md) - Interact with the Kusion config +* [kusion destroy](kusion-destroy.md) - Destroy resources within the stack. +* [kusion generate](kusion-generate.md) - Generate and print the resulting Spec resources of target Stack +* [kusion init](kusion-init.md) - Initialize the scaffolding for a demo project +* [kusion mod](kusion-mod.md) - Manage Kusion modules +* [kusion options](kusion-options.md) - Print the list of flags inherited by all commands +* [kusion preview](kusion-preview.md) - Preview a series of resource changes within the stack +* [kusion project](kusion-project.md) - Project is a folder that contains a project.yaml file and is linked to a Git repository +* [kusion stack](kusion-stack.md) - Stack is a folder that contains a stack.yaml file within the corresponding project directory +* [kusion version](kusion-version.md) - Print the Kusion version information for the current context +* [kusion workspace](kusion-workspace.md) - Workspace is a logical concept representing a target that stacks will be deployed to + +###### Auto generated by spf13/cobra on 12-Jun-2024 diff --git a/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-apply.md b/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-apply.md new file mode 100644 index 00000000..57442b29 --- /dev/null +++ b/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-apply.md @@ -0,0 +1,71 @@ +# kusion apply + +Apply the operational intent of various resources to multiple runtimes + +### Synopsis + +Apply a series of resource changes within the stack. + + Create, update or delete resources according to the operational intent within a stack. By default, Kusion will generate an execution preview and prompt for your approval before performing any actions. You can review the preview details and make a decision to proceed with the actions or abort them. + +``` +kusion apply [flags] +``` + +### Examples + +``` + # Apply with specified work directory + kusion apply -w /path/to/workdir + + # Apply with specified arguments + kusion apply -D name=test -D age=18 + + # Skip interactive approval of preview details before applying + kusion apply --yes + + # Apply without output style and color + kusion apply --no-style=true + + # Apply without watching the resource changes and waiting for reconciliation + kusion apply --watch=false + + # Apply with the specified timeout duration for kusion apply command, measured in second(s) + kusion apply --timeout=120 + + # Apply with localhost port forwarding + kusion apply --port-forward=8080 +``` + +### Options + +``` + -a, --all --detail Automatically show all preview details, combined use with flag --detail + -D, --argument stringArray Specify arguments on the command line + --backend string The backend to use, supports 'local', 'oss' and 's3'. + -d, --detail Automatically show preview details with interactive options (default true) + --dry-run Preview the execution effect (always successful) without actually applying the changes + -h, --help help for apply + --ignore-fields strings Ignore differences of target fields + --no-style no-style sets to RawOutput mode and disables all of styling + -o, --output string Specify the output format + --port-forward int Forward the specified port from local to service + --timeout int The timeout duration for kusion apply command, measured in second(s) + --watch After creating/updating/deleting the requested object, watch for changes (default true) + -w, --workdir string The work directory to run Kusion CLI. + --workspace string The name of target workspace to operate in. + -y, --yes Automatically approve and perform the update after previewing it +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion](index.md) - Kusion is the Platform Orchestrator of Internal Developer Platform + +###### Auto generated by spf13/cobra on 12-Jun-2024 diff --git a/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-config-get.md b/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-config-get.md new file mode 100644 index 00000000..5116ceda --- /dev/null +++ b/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-config-get.md @@ -0,0 +1,37 @@ +# kusion config get + +Get a config item + +### Synopsis + +This command gets the value of a specified kusion config item, where the config item must be registered. + +``` +kusion config get +``` + +### Examples + +``` + # Get a config item + kusion config get backends.current +``` + +### Options + +``` + -h, --help help for get +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion config](kusion-config.md) - Interact with the Kusion config + +###### Auto generated by spf13/cobra on 12-Jun-2024 diff --git a/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-config-list.md b/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-config-list.md new file mode 100644 index 00000000..39e1ad34 --- /dev/null +++ b/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-config-list.md @@ -0,0 +1,37 @@ +# kusion config list + +List all config items + +### Synopsis + +This command lists all the kusion config items and their values. + +``` +kusion config list +``` + +### Examples + +``` + # List config items + kusion config list +``` + +### Options + +``` + -h, --help help for list +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion config](kusion-config.md) - Interact with the Kusion config + +###### Auto generated by spf13/cobra on 12-Jun-2024 diff --git a/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-config-set.md b/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-config-set.md new file mode 100644 index 00000000..886a170c --- /dev/null +++ b/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-config-set.md @@ -0,0 +1,40 @@ +# kusion config set + +Set a config item + +### Synopsis + +This command sets the value of a specified kusion config item, where the config item must be registered, and the value must be in valid type. + +``` +kusion config set +``` + +### Examples + +``` + # Set a config item with string type value + kusion config set backends.current s3-pre + + # Set a config item with struct or map type value + kusion config set backends.s3-pre.configs '{"bucket":"kusion"}' +``` + +### Options + +``` + -h, --help help for set +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion config](kusion-config.md) - Interact with the Kusion config + +###### Auto generated by spf13/cobra on 12-Jun-2024 diff --git a/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-config-unset.md b/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-config-unset.md new file mode 100644 index 00000000..c39f90b9 --- /dev/null +++ b/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-config-unset.md @@ -0,0 +1,37 @@ +# kusion config unset + +Unset a config item + +### Synopsis + +This command unsets a specified kusion config item, where the config item must be registered. + +``` +kusion config unset +``` + +### Examples + +``` + # Unset a config item + kusion config unset backends.s3-pre.configs.bucket +``` + +### Options + +``` + -h, --help help for unset +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion config](kusion-config.md) - Interact with the Kusion config + +###### Auto generated by spf13/cobra on 12-Jun-2024 diff --git a/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-config.md b/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-config.md new file mode 100644 index 00000000..70670944 --- /dev/null +++ b/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-config.md @@ -0,0 +1,34 @@ +# kusion config + +Interact with the Kusion config + +### Synopsis + +Config contains the operation of Kusion configurations. + +``` +kusion config [flags] +``` + +### Options + +``` + -h, --help help for config +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion](index.md) - Kusion is the Platform Orchestrator of Internal Developer Platform +* [kusion config get](kusion-config-get.md) - Get a config item +* [kusion config list](kusion-config-list.md) - List all config items +* [kusion config set](kusion-config-set.md) - Set a config item +* [kusion config unset](kusion-config-unset.md) - Unset a config item + +###### Auto generated by spf13/cobra on 12-Jun-2024 diff --git a/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-destroy.md b/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-destroy.md new file mode 100644 index 00000000..87a3d2e6 --- /dev/null +++ b/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-destroy.md @@ -0,0 +1,45 @@ +# kusion destroy + +Destroy resources within the stack. + +### Synopsis + +Destroy resources within the stack. + + Please note that the destroy command does NOT perform resource version checks. Therefore, if someone submits an update to a resource at the same time you execute a destroy command, their update will be lost along with the rest of the resource. + +``` +kusion destroy [flags] +``` + +### Examples + +``` + # Delete resources of current stack + kusion destroy +``` + +### Options + +``` + --backend string The backend to use, supports 'local', 'oss' and 's3'. + -d, --detail Automatically show preview details after previewing it + -h, --help help for destroy + --no-style no-style sets to RawOutput mode and disables all of styling + -w, --workdir string The work directory to run Kusion CLI. + --workspace string The name of target workspace to operate in. + -y, --yes Automatically approve and perform the update after previewing it +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion](index.md) - Kusion is the Platform Orchestrator of Internal Developer Platform + +###### Auto generated by spf13/cobra on 12-Jun-2024 diff --git a/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-generate.md b/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-generate.md new file mode 100644 index 00000000..04c8f122 --- /dev/null +++ b/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-generate.md @@ -0,0 +1,51 @@ +# kusion generate + +Generate and print the resulting Spec resources of target Stack + +### Synopsis + +This command generates Spec resources with given values, then write the resulting Spec resources to specific output file or stdout. + + The nearest parent folder containing a stack.yaml file is loaded from the project in the current directory. + +``` +kusion generate [flags] +``` + +### Examples + +``` + # Generate and write Spec resources to specific output file + kusion generate -o /tmp/spec.yaml + + # Generate spec with custom workspace + kusion generate -o /tmp/spec.yaml --workspace dev + + # Generate spec with specified arguments + kusion generate -D name=test -D age=18 +``` + +### Options + +``` + -D, --argument stringArray Specify arguments on the command line + --backend string The backend to use, supports 'local', 'oss' and 's3'. + -h, --help help for generate + --no-style no-style sets to RawOutput mode and disables all of styling + -o, --output string File to write generated Spec resources to + -w, --workdir string The work directory to run Kusion CLI. + --workspace string The name of target workspace to operate in. +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion](index.md) - Kusion is the Platform Orchestrator of Internal Developer Platform + +###### Auto generated by spf13/cobra on 12-Jun-2024 diff --git a/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-init.md b/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-init.md new file mode 100644 index 00000000..2fa87051 --- /dev/null +++ b/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-init.md @@ -0,0 +1,44 @@ +# kusion init + +Initialize the scaffolding for a demo project + +### Synopsis + +This command initializes the scaffolding for a demo project with the name of the current directory to help users quickly get started. + + Note that target directory needs to be an empty directory. + +``` +kusion init [flags] +``` + +### Examples + +``` + # Initialize a demo project with the name of the current directory + mkdir quickstart && cd quickstart + kusion init + + # Initialize the demo project in a different target directory + kusion init --target projects/my-demo-project +``` + +### Options + +``` + -h, --help help for init + -t, --target string specify the target directory +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion](index.md) - Kusion is the Platform Orchestrator of Internal Developer Platform + +###### Auto generated by spf13/cobra on 12-Jun-2024 diff --git a/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-mod-add.md b/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-mod-add.md new file mode 100644 index 00000000..65d76d73 --- /dev/null +++ b/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-mod-add.md @@ -0,0 +1,39 @@ +## kusion mod add + +Add a module from a workspace + +``` +kusion mod add MODULE_NAME [--workspace WORKSPACE] [flags] +``` + +### Examples + +``` + # Add a kusion module to the kcl.mod from the current workspace to use it in AppConfiguration + kusion mod add my-module + + # Add a module to the kcl.mod from a specified workspace to use it in AppConfiguration + kusion mod add my-module --workspace=dev +``` + +### Options + +``` + --backend string The backend to use, supports 'local', 'oss' and 's3'. + -h, --help help for add + -w, --workdir string The work directory to run Kusion CLI. + --workspace string The name of target workspace to operate in. +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion mod](kusion-mod.md) - Manage Kusion modules + +###### Auto generated by spf13/cobra on 12-Jun-2024 diff --git a/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-mod-init.md b/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-mod-init.md new file mode 100644 index 00000000..6abca2bb --- /dev/null +++ b/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-mod-init.md @@ -0,0 +1,40 @@ +# kusion mod init + +Create a kusion module along with common files and directories in the current directory + +``` +kusion mod init MODULE_NAME PATH [flags] +``` + +### Examples + +``` + # Create a kusion module template in the current directory + kusion mod init my-module + + # Init a kusion module at the specified Path + kusion mod init my-module ./modules + + # Init a module from a remote git template repository + kusion mod init my-module --template https://github.com// +``` + +### Options + +``` + -h, --help help for init + --template string Initialize with specified template +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion mod](kusion-mod.md) - Manage Kusion modules + +###### Auto generated by spf13/cobra on 12-Jun-2024 diff --git a/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-mod-list.md b/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-mod-list.md new file mode 100644 index 00000000..7803b990 --- /dev/null +++ b/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-mod-list.md @@ -0,0 +1,39 @@ +## kusion mod list + +List kusion modules in a workspace + +``` +kusion mod list [--workspace WORKSPACE] [flags] +``` + +### Examples + +``` + # List kusion modules in the current workspace + kusion mod list + + # List modules in a specified workspace + kusion mod list --workspace=dev +``` + +### Options + +``` + --backend string The backend to use, supports 'local', 'oss' and 's3'. + -h, --help help for list + -w, --workdir string The work directory to run Kusion CLI. + --workspace string The name of target workspace to operate in. +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion mod](kusion-mod.md) - Manage Kusion modules + +###### Auto generated by spf13/cobra on 12-Jun-2024 diff --git a/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-mod-push.md b/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-mod-push.md new file mode 100644 index 00000000..0649dc36 --- /dev/null +++ b/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-mod-push.md @@ -0,0 +1,62 @@ +# kusion mod push + +Push a module to OCI registry + +### Synopsis + +The push command packages the module as an OCI artifact and pushes it to the OCI registry using the version as the image tag. + +``` +kusion mod push MODULE_PATH OCI_REPOSITORY_URL [--creds CREDENTIALS] +``` + +### Examples + +``` + # Push a module of current OS arch to an OCI Registry using a token + kusion mod push /path/to/my-module oci://ghcr.io/org --creds + + # Push a module of specific OS arch to an OCI Registry using a token + kusion mod push /path/to/my-module oci://ghcr.io/org --os-arch==darwin/arm64 --creds + + # Push a module to an OCI Registry using a credentials in : format. + kusion mod push /path/to/my-module oci://ghcr.io/org --creds : + + # Push a release candidate without marking it as the latest stable + kusion mod push /path/to/my-module oci://ghcr.io/org --latest=false + + # Push a module with custom OCI annotations + kusion mod push /path/to/my-module oci://ghcr.io/org \ + --annotation='org.opencontainers.image.documentation=https://app.org/docs' + + # Push and sign a module with Cosign (the cosign binary must be present in PATH) + export COSIGN_PASSWORD=password + kusion mod push /path/to/my-module oci://ghcr.io/org \ + --sign=cosign --cosign-key=/path/to/cosign.key +``` + +### Options + +``` + -a, --annotations strings Set custom OCI annotations in '=' format. + --cosign-key string The Cosign private key for signing the module. + --creds string The credentials token for the OCI registry in or : format. + -h, --help help for push + --insecure-registry If true, allows connecting to a OCI registry without TLS or with self-signed certificates. + --latest Tags the current version as the latest stable module version. (default true) + --os-arch string The os arch of the module e.g. 'darwin/arm64', 'linux/amd64'. + --sign string Signs the module with the specified provider. +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion mod](kusion-mod.md) - Manage Kusion modules + +###### Auto generated by spf13/cobra on 12-Jun-2024 diff --git a/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-mod.md b/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-mod.md new file mode 100644 index 00000000..d623a334 --- /dev/null +++ b/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-mod.md @@ -0,0 +1,37 @@ +# kusion mod + +Manage Kusion modules + +### Synopsis + +Commands for managing Kusion modules. + + These commands help you manage the lifecycle of Kusion modules. + +``` +kusion mod +``` + +### Options + +``` + -h, --help help for mod +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion](index.md) - Kusion is the Platform Orchestrator of Internal Developer Platform +* [kusion mod add](kusion-mod-add.md) - Add a module from a workspace +* [kusion mod init](kusion-mod-init.md) - Create a kusion module along with common files and directories in the current directory +* [kusion mod list](kusion-mod-list.md) - List kusion modules in a workspace +* [kusion mod push](kusion-mod-push.md) - Push a module to OCI registry + + +###### Auto generated by spf13/cobra on 12-Jun-2024 diff --git a/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-options.md b/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-options.md new file mode 100644 index 00000000..56b47123 --- /dev/null +++ b/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-options.md @@ -0,0 +1,37 @@ +# kusion options + +Print the list of flags inherited by all commands + +### Synopsis + +Print the list of flags inherited by all commands + +``` +kusion options [flags] +``` + +### Examples + +``` + # Print flags inherited by all commands + kubectl options +``` + +### Options + +``` + -h, --help help for options +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion](index.md) - Kusion is the Platform Orchestrator of Internal Developer Platform + +###### Auto generated by spf13/cobra on 12-Jun-2024 diff --git a/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-preview.md b/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-preview.md new file mode 100644 index 00000000..43d24428 --- /dev/null +++ b/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-preview.md @@ -0,0 +1,60 @@ +# kusion preview + +Preview a series of resource changes within the stack + +### Synopsis + +Preview a series of resource changes within the stack. + + Create, update or delete resources according to the intent described in the stack. By default, Kusion will generate an execution preview and present it for your approval before taking any action. + +``` +kusion preview [flags] +``` + +### Examples + +``` + # Preview with specified work directory + kusion preview -w /path/to/workdir + + # Preview with specified arguments + kusion preview -D name=test -D age=18 + + # Preview with ignored fields + kusion preview --ignore-fields="metadata.generation,metadata.managedFields" + + # Preview with json format result + kusion preview -o json + + # Preview without output style and color + kusion preview --no-style=true +``` + +### Options + +``` + -a, --all --detail Automatically show all preview details, combined use with flag --detail + -D, --argument stringArray Specify arguments on the command line + --backend string The backend to use, supports 'local', 'oss' and 's3'. + -d, --detail Automatically show preview details with interactive options (default true) + -h, --help help for preview + --ignore-fields strings Ignore differences of target fields + --no-style no-style sets to RawOutput mode and disables all of styling + -o, --output string Specify the output format + -w, --workdir string The work directory to run Kusion CLI. + --workspace string The name of target workspace to operate in. +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion](index.md) - Kusion is the Platform Orchestrator of Internal Developer Platform + +###### Auto generated by spf13/cobra on 12-Jun-2024 diff --git a/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-project-create.md b/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-project-create.md new file mode 100644 index 00000000..5df9006c --- /dev/null +++ b/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-project-create.md @@ -0,0 +1,44 @@ +# kusion project create + +Create a new project + +### Synopsis + +This command creates a new project.yaml file under the target directory which by default is the current working directory. + + Note that the target directory needs to be an empty directory. + +``` +kusion project create +``` + +### Examples + +``` + # Create a new project with the name of the current working directory + mkdir my-project && cd my-project + kusion project create + + # Create a new project in a specified target directory + kusion project create --target /dir/to/projects/my-project +``` + +### Options + +``` + -h, --help help for create + -t, --target string specify the target directory +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion project](kusion-project.md) - Project is a folder that contains a project.yaml file and is linked to a Git repository + +###### Auto generated by spf13/cobra on 12-Jun-2024 diff --git a/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-project.md b/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-project.md new file mode 100644 index 00000000..3da7737c --- /dev/null +++ b/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-project.md @@ -0,0 +1,33 @@ +# kusion project + +Project is a folder that contains a project.yaml file and is linked to a Git repository + +### Synopsis + +Project in Kusion is defined as any folder that contains a project.yaml file and is linked to a Git repository. + + Project organizes logical configurations for internal components to orchestrate the application and assembles them to suit different roles, such as developers and platform engineers. + +``` +kusion project [flags] +``` + +### Options + +``` + -h, --help help for project +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion](index.md) - Kusion is the Platform Orchestrator of Internal Developer Platform +* [kusion project create](kusion-project-create.md) - Create a new project + +###### Auto generated by spf13/cobra on 12-Jun-2024 diff --git a/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-stack-create.md b/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-stack-create.md new file mode 100644 index 00000000..934e3815 --- /dev/null +++ b/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-stack-create.md @@ -0,0 +1,49 @@ +# kusion stack create + +Create a new stack + +### Synopsis + +This command creates a new stack under the target directory which by default is the current working directory. + + The stack folder to be created contains 'stack.yaml', 'kcl.mod' and 'main.k' with the specified values. + + Note that the target directory needs to be a valid project directory with project.yaml file + +``` +kusion stack create +``` + +### Examples + +``` + # Create a new stack at current project directory + kusion stack create dev + + # Create a new stack in a specified target project directory + kusion stack create dev --target /dir/to/projects/my-project + + # Create a new stack copied from the referenced stack under the target project directory + kusion stack create prod --copy-from dev +``` + +### Options + +``` + --copy-from string specify the referenced stack path to copy from + -h, --help help for create + -t, --target string specify the target project directory +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion stack](kusion-stack.md) - Stack is a folder that contains a stack.yaml file within the corresponding project directory + +###### Auto generated by spf13/cobra on 12-Jun-2024 diff --git a/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-stack.md b/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-stack.md new file mode 100644 index 00000000..edc6d2bf --- /dev/null +++ b/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-stack.md @@ -0,0 +1,33 @@ +# kusion stack + +Stack is a folder that contains a stack.yaml file within the corresponding project directory + +### Synopsis + +Stack in Kusion is defined as any folder that contains a stack.yaml file within the corresponding project directory. + + A stack provides a mechanism to isolate multiple deployments of the same application, serving with the target workspace to which an application will be deployed. + +``` +kusion stack [flags] +``` + +### Options + +``` + -h, --help help for stack +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion](index.md) - Kusion is the Platform Orchestrator of Internal Developer Platform +* [kusion stack create](kusion-stack-create.md) - Create a new stack + +###### Auto generated by spf13/cobra on 12-Jun-2024 diff --git a/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-version.md b/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-version.md new file mode 100644 index 00000000..8a5aff3e --- /dev/null +++ b/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-version.md @@ -0,0 +1,38 @@ +# kusion version + +Print the Kusion version information for the current context + +### Synopsis + +Print the Kusion version information for the current context + +``` +kusion version [flags] +``` + +### Examples + +``` + # Print the Kusion version + kusion version +``` + +### Options + +``` + -h, --help help for version + -o, --output string Output format. Only json format is supported for now +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion](index.md) - Kusion is the Platform Orchestrator of Internal Developer Platform + +###### Auto generated by spf13/cobra on 12-Jun-2024 diff --git a/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-workspace-create.md b/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-workspace-create.md new file mode 100644 index 00000000..e4ddf333 --- /dev/null +++ b/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-workspace-create.md @@ -0,0 +1,46 @@ +# kusion workspace create + +Create a new workspace + +### Synopsis + +This command creates a workspace with specified name and configuration file, where the file must be in the YAML format. + +``` +kusion workspace create +``` + +### Examples + +``` + # Create a workspace + kusion workspace create dev -f dev.yaml + + # Create a workspace and set as current + kusion workspace create dev -f dev.yaml --current + + # Create a workspace in a specified backend + kusion workspace create prod -f prod.yaml --backend oss-prod +``` + +### Options + +``` + --backend string the backend name + --current set the creating workspace as current + -f, --file string the path of workspace configuration file + -h, --help help for create +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion workspace](kusion-workspace.md) - Workspace is a logical concept representing a target that stacks will be deployed to + +###### Auto generated by spf13/cobra on 12-Jun-2024 diff --git a/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-workspace-delete.md b/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-workspace-delete.md new file mode 100644 index 00000000..933e008f --- /dev/null +++ b/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-workspace-delete.md @@ -0,0 +1,44 @@ +# kusion workspace delete + +Delete a workspace + +### Synopsis + +This command deletes the current or a specified workspace. + +``` +kusion workspace delete +``` + +### Examples + +``` + # Delete the current workspace + kusion workspace delete + + # Delete a specified workspace + kusion workspace delete dev + + # Delete a specified workspace in a specified backend + kusion workspace delete prod --backend oss-prod +``` + +### Options + +``` + --backend string the backend name + -h, --help help for delete +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion workspace](kusion-workspace.md) - Workspace is a logical concept representing a target that stacks will be deployed to + +###### Auto generated by spf13/cobra on 12-Jun-2024 diff --git a/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-workspace-list.md b/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-workspace-list.md new file mode 100644 index 00000000..202bd49d --- /dev/null +++ b/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-workspace-list.md @@ -0,0 +1,41 @@ +# kusion workspace list + +List all workspace names + +### Synopsis + +This command list the names of all workspaces. + +``` +kusion workspace list +``` + +### Examples + +``` + # List all workspace names + kusion workspace list + + # List all workspace names in a specified backend + kusion workspace list --backend oss-prod +``` + +### Options + +``` + --backend string the backend name + -h, --help help for list +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion workspace](kusion-workspace.md) - Workspace is a logical concept representing a target that stacks will be deployed to + +###### Auto generated by spf13/cobra on 12-Jun-2024 diff --git a/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-workspace-show.md b/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-workspace-show.md new file mode 100644 index 00000000..dc558f4f --- /dev/null +++ b/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-workspace-show.md @@ -0,0 +1,44 @@ +# kusion workspace show + +Show a workspace configuration + +### Synopsis + +This command gets the current or a specified workspace configuration. + +``` +kusion workspace show +``` + +### Examples + +``` + # Show current workspace configuration + kusion workspace show + + # Show a specified workspace configuration + kusion workspace show dev + + # Show a specified workspace in a specified backend + kusion workspace show prod --backend oss-prod +``` + +### Options + +``` + --backend string the backend name + -h, --help help for show +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion workspace](kusion-workspace.md) - Workspace is a logical concept representing a target that stacks will be deployed to + +###### Auto generated by spf13/cobra on 12-Jun-2024 diff --git a/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-workspace-switch.md b/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-workspace-switch.md new file mode 100644 index 00000000..376c69d2 --- /dev/null +++ b/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-workspace-switch.md @@ -0,0 +1,41 @@ +# kusion workspace switch + +Switch the current workspace + +### Synopsis + +This command switches the workspace, where the workspace must be created. + +``` +kusion workspace switch +``` + +### Examples + +``` + # Switch the current workspace + kusion workspace switch dev + + # Switch the current workspace in a specified backend + kusion workspace switch prod --backend oss-prod +``` + +### Options + +``` + --backend string the backend name + -h, --help help for switch +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion workspace](kusion-workspace.md) - Workspace is a logical concept representing a target that stacks will be deployed to + +###### Auto generated by spf13/cobra on 12-Jun-2024 diff --git a/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-workspace-update.md b/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-workspace-update.md new file mode 100644 index 00000000..1c404664 --- /dev/null +++ b/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-workspace-update.md @@ -0,0 +1,46 @@ +# kusion workspace update + +Update a workspace configuration + +### Synopsis + +This command updates a workspace configuration with specified configuration file, where the file must be in the YAML format. + +``` +kusion workspace update +``` + +### Examples + +``` + # Update the current workspace + kusion workspace update -f dev.yaml + + # Update a specified workspace and set as current + kusion workspace update dev -f dev.yaml --current + + # Update a specified workspace in a specified backend + kusion workspace update prod -f prod.yaml --backend oss-prod +``` + +### Options + +``` + --backend string the backend name + --current set the creating workspace as current + -f, --file string the path of workspace configuration file + -h, --help help for update +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion workspace](kusion-workspace.md) - Workspace is a logical concept representing a target that stacks will be deployed to + +###### Auto generated by spf13/cobra on 12-Jun-2024 diff --git a/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-workspace.md b/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-workspace.md new file mode 100644 index 00000000..8a84d511 --- /dev/null +++ b/docs_versioned_docs/version-v0.12/6-reference/1-commands/kusion-workspace.md @@ -0,0 +1,38 @@ +# kusion workspace + +Workspace is a logical concept representing a target that stacks will be deployed to + +### Synopsis + +Workspace is a logical concept representing a target that stacks will be deployed to. + + Workspace is managed by platform engineers, which contains a set of configurations that application developers do not want or should not concern, and is reused by multiple stacks belonging to different projects. + +``` +kusion workspace [flags] +``` + +### Options + +``` + -h, --help help for workspace +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion](index.md) - Kusion is the Platform Orchestrator of Internal Developer Platform +* [kusion workspace create](kusion-workspace-create.md) - Create a new workspace +* [kusion workspace delete](kusion-workspace-delete.md) - Delete a workspace +* [kusion workspace list](kusion-workspace-list.md) - List all workspace names +* [kusion workspace show](kusion-workspace-show.md) - Show a workspace configuration +* [kusion workspace switch](kusion-workspace-switch.md) - Switch the current workspace +* [kusion workspace update](kusion-workspace-update.md) - Update a workspace configuration + +###### Auto generated by spf13/cobra on 12-Jun-2024 diff --git a/docs_versioned_docs/version-v0.12/6-reference/2-modules/1-developer-schemas/_category_.json b/docs_versioned_docs/version-v0.12/6-reference/2-modules/1-developer-schemas/_category_.json new file mode 100644 index 00000000..0df3bade --- /dev/null +++ b/docs_versioned_docs/version-v0.12/6-reference/2-modules/1-developer-schemas/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Developer Schemas" +} \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.12/6-reference/2-modules/1-developer-schemas/app-configuration.md b/docs_versioned_docs/version-v0.12/6-reference/2-modules/1-developer-schemas/app-configuration.md new file mode 100644 index 00000000..6808bee7 --- /dev/null +++ b/docs_versioned_docs/version-v0.12/6-reference/2-modules/1-developer-schemas/app-configuration.md @@ -0,0 +1,35 @@ +# appconfiguration + +## Schema AppConfiguration + +AppConfiguration is a developer-centric definition that describes how to run an Application.
This application model builds upon a decade of experience at AntGroup running super large scale
internal developer platform, combined with best-of-breed ideas and practices from the community. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**accessories**|{str:any}|Accessories defines a collection of accessories that will be attached to the workload.|{}| +|**annotations**|{str:str}|Annotations are key/value pairs that attach arbitrary non-identifying metadata to resources.|{}| +|**labels**|{str:str}|Labels can be used to attach arbitrary metadata as key-value pairs to resources.|{}| +|**workload** `required`|[service.Service](workload/service#schema-service) \| [wl.Job](workload/job#schema-job) |Workload defines how to run your application code. Currently supported workload profile
includes Service and Job.|N/A| + +### Examples +```python +# Instantiate an App with a long-running service and its image is "nginx:v1" + +import kam as ac +import kam.workload as wl +import kam.workload.container as c + +helloworld : ac.AppConfiguration { + workload: service.Service { + containers: { + "nginx": c.Container { + image: "nginx:v1" + } + } + } +} +``` + + diff --git a/docs_versioned_docs/version-v0.12/6-reference/2-modules/1-developer-schemas/database/mysql.md b/docs_versioned_docs/version-v0.12/6-reference/2-modules/1-developer-schemas/database/mysql.md new file mode 100644 index 00000000..8f6135bb --- /dev/null +++ b/docs_versioned_docs/version-v0.12/6-reference/2-modules/1-developer-schemas/database/mysql.md @@ -0,0 +1,39 @@ +# mysql + +## Schema MySQL + +MySQL describes the attributes to locally deploy or create a cloud provider
managed mysql database instance for the workload. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**type** `required`|"local" | "cloud"|Type defines whether the mysql database is deployed locally or provided by
cloud vendor.|| +|**version** `required`|str|Version defines the mysql version to use.|| + +### Examples +```python +# Instantiate a local mysql database with version of 5.7. + +import mysql + +accessories: { + "mysql": mysql.MySQL { + type: "local" + version: "8.0" + } +} +``` + + +### Credentials and Connectivity + +For sensitive information such as the **host**, **username** and **password** for the database instance, Kusion will automatically inject them into the application container for users through environment variables. The relevant environment variables are listed in the table below. + +| Name | Explanation | +| ---- | ----------- | +| KUSION_DB\_HOST\_`` | Host address for accessing the database instance | +| KUSION_DB\_USERNAME\_`` | Account username for accessing the database instance | +| KUSION_DB\_PASSWORD\_`` | Account password for accessing the database instance | + +The `databaseName` can be declared in [workspace configs of mysql](../../2-workspace-configs/database/mysql.md), and Kusion will automatically concatenate the ``, ``, `` and `mysql` with `-` if not specified. When injecting the credentials into containers' environment variables, Kusion will convert the `databaseName` to uppercase, and replace `-` with `_`. diff --git a/docs_versioned_docs/version-v0.12/6-reference/2-modules/1-developer-schemas/database/postgres.md b/docs_versioned_docs/version-v0.12/6-reference/2-modules/1-developer-schemas/database/postgres.md new file mode 100644 index 00000000..ad8cbb7e --- /dev/null +++ b/docs_versioned_docs/version-v0.12/6-reference/2-modules/1-developer-schemas/database/postgres.md @@ -0,0 +1,39 @@ +# postgres + +## Schema PostgreSQL + +PostgreSQL describes the attributes to locally deploy or create a cloud provider
managed postgresql database instance for the workload. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**type** `required`|"local" | "cloud"|Type defines whether the postgresql database is deployed locally or provided by
cloud vendor.|| +|**version** `required`|str|Version defines the postgres version to use.|| + +### Examples +```python +#Instantiate a local postgresql database with image version of 14.0. + +import postgres as postgres + +accessories: { + "postgres": postgres.PostgreSQL { + type: "local" + version: "14.0" + } +} +``` + + +### Credentials and Connectivity + +For sensitive information such as the **host**, **username** and **password** for the database instance, Kusion will automatically inject them into the application container for users through environment variables. The relevant environment variables are listed in the table below. + +| Name | Explanation | +| ---- | ----------- | +| KUSION_DB\_HOST\_`` | Host address for accessing the database instance | +| KUSION_DB\_USERNAME\_`` | Account username for accessing the database instance | +| KUSION_DB\_PASSWORD\_`` | Account password for accessing the database instance | + +The `databaseName` can be declared in [workspace configs of postgres](../../2-workspace-configs/database/postgres.md), and Kusion will automatically concatenate the ``, ``, `` and `postgres` with `-` if not specified. When injecting the credentials into containers' environment variables, Kusion will convert the `databaseName` to uppercase, and replace `-` with `_`. diff --git a/docs_versioned_docs/version-v0.12/6-reference/2-modules/1-developer-schemas/internal/common.md b/docs_versioned_docs/version-v0.12/6-reference/2-modules/1-developer-schemas/internal/common.md new file mode 100644 index 00000000..8b649196 --- /dev/null +++ b/docs_versioned_docs/version-v0.12/6-reference/2-modules/1-developer-schemas/internal/common.md @@ -0,0 +1,17 @@ +# common + +## Schema WorkloadBase + +WorkloadBase defines set of attributes shared by different workload profile, e.g Service
and Job. You can inherit this Schema to reuse these common attributes. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**annotations**|{str:str}|Annotations are key/value pairs that attach arbitrary non-identifying metadata to the workload.|| +|**containers** `required`|{str:}|Containers defines the templates of containers to be ran.
More info: https://kubernetes.io/docs/concepts/containers|| +|**labels**|{str:str}|Labels are key/value pairs that are attached to the workload.|| +|**replicas**|int|Number of container replicas based on this configuration that should be ran.|| +|**secrets**|{str:[s.Secret](#schema-secret)}|Secrets can be used to store small amount of sensitive data e.g. password, token.|| + + diff --git a/docs_versioned_docs/version-v0.12/6-reference/2-modules/1-developer-schemas/internal/container/container.md b/docs_versioned_docs/version-v0.12/6-reference/2-modules/1-developer-schemas/internal/container/container.md new file mode 100644 index 00000000..ce170fc6 --- /dev/null +++ b/docs_versioned_docs/version-v0.12/6-reference/2-modules/1-developer-schemas/internal/container/container.md @@ -0,0 +1,63 @@ +# container + +## Schema Container + +Container describes how the Application's tasks are expected to be run. Depending on
the replicas parameter 1 or more containers can be created from each template. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**args**|[str]|Arguments to the entrypoint.
Args will overwrite the CMD value set in the Dockfile, otherwise the Docker
image's CMD is used if this is not provided.|| +|**command**|[str]|Entrypoint array. Not executed within a shell.
Command will overwrite the ENTRYPOINT value set in the Dockfile, otherwise the Docker
image's ENTRYPOINT is used if this is not provided.|| +|**dirs**|{str:str}|Collection of volumes mount into the container's filesystem.
The dirs parameter is a dict with the key being the folder name in the container and the value
being the referenced volume.|| +|**env**|{str:str}|List of environment variables to set in the container.
The value of the environment variable may be static text or a value from a secret.|| +|**files**|{str:[FileSpec](#filespec)}|List of files to create in the container.
The files parameter is a dict with the key being the file name in the container and the value
being the target file specification.|| +|**image** `required`|str|Image refers to the Docker image name to run for this container.
More info: https://kubernetes.io/docs/concepts/containers/images|| +|**lifecycle**|[lc.Lifecycle](lifecycle/lifecycle.md#schema-lifecycle)|Lifecycle refers to actions that the management system should take in response to container lifecycle events.|| +|**livenessProbe**|[p.Probe](probe/probe.md#schema-probe)|LivenessProbe indicates if a running process is healthy.
Container will be restarted if the probe fails.|| +|**readinessProbe**|[p.Probe](probe/probe.md#schema-probe)|ReadinessProbe indicates whether an application is available to handle requests.|| +|**resources**|{str:str}|Map of resource requirements the container should run with.
The resources parameter is a dict with the key being the resource name and the value being
the resource value.|| +|**startupProbe**|[p.Probe](probe/probe.md#schema-probe)|StartupProbe indicates that the container has started for the first time.
Container will be restarted if the probe fails.|| +|**workingDir**|str|The working directory of the running process defined in entrypoint.
Default container runtime will be used if this is not specified.|| + +### Examples +```python +import kam.workload.container as c + +web = c.Container { + image: "nginx:latest" + command: ["/bin/sh", "-c", "echo hi"] + env: { + "name": "value" + } + resources: { + "cpu": "2" + "memory": "4Gi" + } +} +``` + +## Schema FileSpec + +FileSpec defines the target file in a Container. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**content**|str|File content in plain text.|| +|**contentFrom**|str|Source for the file content, reference to a secret of configmap value.|| +|**mode** `required`|str|Mode bits used to set permissions on this file, must be an octal value
between 0000 and 0777 or a decimal value between 0 and 511|"0644"| + +### Examples +```python +import kam.workload.container as c + +tmpFile = c.FileSpec { + content: "some file contents" + mode: "0777" +} +``` + + diff --git a/docs_versioned_docs/version-v0.12/6-reference/2-modules/1-developer-schemas/internal/container/lifecycle/lifecycle.md b/docs_versioned_docs/version-v0.12/6-reference/2-modules/1-developer-schemas/internal/container/lifecycle/lifecycle.md new file mode 100644 index 00000000..91123526 --- /dev/null +++ b/docs_versioned_docs/version-v0.12/6-reference/2-modules/1-developer-schemas/internal/container/lifecycle/lifecycle.md @@ -0,0 +1,29 @@ +# lifecycle + +## Schema Lifecycle + +Lifecycle describes actions that the management system should take in response
to container lifecycle events. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**postStart**| | |The action to be taken after a container is created.
More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks|| +|**preStop**| | |The action to be taken before a container is terminated due to an API request or
management event such as liveness/startup probe failure, preemption, resource contention, etc.
More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks|| + +### Examples +```python +import kam.workload.container.probe as p +import kam.workload.container.lifecycle as lc + +lifecycleHook = lc.Lifecycle { + preStop: p.Exec { + command: ["preStop.sh"] + } + postStart: p.Http { + url: "http://localhost:80" + } +} +``` + + diff --git a/docs_versioned_docs/version-v0.12/6-reference/2-modules/1-developer-schemas/internal/container/probe/probe.md b/docs_versioned_docs/version-v0.12/6-reference/2-modules/1-developer-schemas/internal/container/probe/probe.md new file mode 100644 index 00000000..64d709cd --- /dev/null +++ b/docs_versioned_docs/version-v0.12/6-reference/2-modules/1-developer-schemas/internal/container/probe/probe.md @@ -0,0 +1,92 @@ +# probe + +## Schema Probe + +Probe describes a health check to be performed against a container to determine whether it is
alive or ready to receive traffic. There are three probe types: readiness, liveness, and startup. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**failureThreshold**|int|Minimum consecutive failures for the probe to be considered failed after having succeeded.|| +|**initialDelaySeconds**|int|The number of seconds before health checking is activated.
More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes|| +|**periodSeconds**|int|How often (in seconds) to perform the probe.|| +|**probeHandler** `required`|[Exec](#exec) | [Http](#http) | [Tcp](#tcp)|The action taken to determine the alive or health of a container|| +|**successThreshold**|int|Minimum consecutive successes for the probe to be considered successful after having failed.|| +|**terminationGracePeriod**|int|Duration in seconds before terminate gracefully upon probe failure.|| +|**timeoutSeconds**|int|The number of seconds after which the probe times out.
More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes|| + +### Examples +```python +import kam.workload.container.probe as p + +probe = p.Probe { + probeHandler: p.Http { + path: "/healthz" + } + initialDelaySeconds: 10 +} +``` + +## Schema Exec + +Exec describes a "run in container" action. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**command** `required`|[str]|The command line to execute inside the container.|| + +### Examples +```python +import kam.workload.container.probe as p + +execProbe = p.Exec { + command: ["probe.sh"] +} +``` + +## Schema Http + +Http describes an action based on HTTP Get requests. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**headers**|{str:str}|Collection of custom headers to set in the request|| +|**url** `required`|str|The full qualified url to send HTTP requests.|| + +### Examples +```python +import kam.workload.container.probe as p + +httpProbe = p.Http { + url: "http://localhost:80" + headers: { + "X-HEADER": "VALUE" + } +} +``` + +## Schema Tcp + +Tcp describes an action based on opening a socket. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**url** `required`|str|The full qualified url to open a socket.|| + +### Examples +```python +import kam.workload.container.probe as p + +tcpProbe = p.Tcp { + url: "tcp://localhost:1234" +} +``` + + diff --git a/docs_versioned_docs/version-v0.12/6-reference/2-modules/1-developer-schemas/internal/secret/secret.md b/docs_versioned_docs/version-v0.12/6-reference/2-modules/1-developer-schemas/internal/secret/secret.md new file mode 100644 index 00000000..1f13bb85 --- /dev/null +++ b/docs_versioned_docs/version-v0.12/6-reference/2-modules/1-developer-schemas/internal/secret/secret.md @@ -0,0 +1,29 @@ +# secret + +## Schema Secret + +Secrets are used to provide data that is considered sensitive like passwords, API keys,
TLS certificates, tokens or other credentials. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**data**|{str:str}|Data contains the non-binary secret data in string form.|| +|**immutable**|bool|Immutable, if set to true, ensures that data stored in the Secret cannot be updated.|| +|**params**|{str:str}|Collection of parameters used to facilitate programmatic handling of secret data.|| +|**type** `required`|"basic" | "token" | "opaque" | "certificate" | "external"|Type of secret, used to facilitate programmatic handling of secret data.|| + +### Examples +```python +import kam.workload.secret as sec + +basicAuth = sec.Secret { + type: "basic" + data: { + "username": "" + "password": "" + } +} +``` + + diff --git a/docs_versioned_docs/version-v0.12/6-reference/2-modules/1-developer-schemas/k8s_manifest/k8s_manifest.md b/docs_versioned_docs/version-v0.12/6-reference/2-modules/1-developer-schemas/k8s_manifest/k8s_manifest.md new file mode 100644 index 00000000..3e749af9 --- /dev/null +++ b/docs_versioned_docs/version-v0.12/6-reference/2-modules/1-developer-schemas/k8s_manifest/k8s_manifest.md @@ -0,0 +1,30 @@ +# k8s_manifest + +## Schema K8sManifest + +K8sManifest defines the paths of the YAML files, or the directories of the raw Kubernetes manifests, which will be jointly appended to the Resources of Spec. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**paths** `required`|[str]|The paths of the YAML files, or the directories of the raw Kubernetes manifests.|| + +### Examples + +``` +import k8s_manifest + +accessories: { + "k8s_manifest": k8s_manifest.K8sManifest { + paths: [ + # The path of a YAML file. + "/path/to/my/k8s_manifest.yaml", + # The path of a directory containing K8s manifests. + "/dir/to/my/k8s_manifests" + ] + } +} +``` + + diff --git a/docs_versioned_docs/version-v0.12/6-reference/2-modules/1-developer-schemas/monitoring/prometheus.md b/docs_versioned_docs/version-v0.12/6-reference/2-modules/1-developer-schemas/monitoring/prometheus.md new file mode 100644 index 00000000..bf2e551e --- /dev/null +++ b/docs_versioned_docs/version-v0.12/6-reference/2-modules/1-developer-schemas/monitoring/prometheus.md @@ -0,0 +1,24 @@ +# prometheus + +## Schema Prometheus + +Prometheus can be used to define monitoring requirements + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**path**|str|The path to scrape metrics from.|"/metrics"| +|**port**|str|The port to scrape metrics from. When using Prometheus operator, this needs to be the port NAME. Otherwise, this can be a port name or a number.|container ports when scraping pod (monitorType is pod) and service port when scraping service (monitorType is service)| + +### Examples +```python +import monitoring as m + +"monitoring": m.Prometheus { + path: "/metrics" + port: "web" +} +``` + + diff --git a/docs_versioned_docs/version-v0.12/6-reference/2-modules/1-developer-schemas/network/network.md b/docs_versioned_docs/version-v0.12/6-reference/2-modules/1-developer-schemas/network/network.md new file mode 100644 index 00000000..daa33121 --- /dev/null +++ b/docs_versioned_docs/version-v0.12/6-reference/2-modules/1-developer-schemas/network/network.md @@ -0,0 +1,51 @@ +# network + +## Schema Network + +Network defines the exposed port of Service, which can be used to describe how the Service
get accessed. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**ports**|[[Port](#schema-port)]|The list of ports which the Workload should get exposed.|| + +### Examples +```python +import network as n + +"network": n.Network { + ports: [ + n.Port { + port: 80 + public: True + } + ] +} +``` + +## Schema Port + +Port defines the exposed port of Workload, which can be used to describe how the Workload get accessed. + +| name | type | description | default value | +| --- | --- | --- | --- | +|**port** `required`|int|The exposed port of the Workload.|80| +|**protocol** `required`|"TCP" | "UDP"|The protocol to access the port.|"TCP"| +|**public** `required`|bool|Public defines whether the port can be accessed through Internet.|False| +|**targetPort**|int|The backend container port. If empty, set it the same as the port.|| + +### Examples + +```python +import network as n + +port = n.Port { + port: 80 + targetPort: 8080 + protocol: "TCP" + public: True +} +``` + + diff --git a/docs_versioned_docs/version-v0.12/6-reference/2-modules/1-developer-schemas/opsrule/opsrule.md b/docs_versioned_docs/version-v0.12/6-reference/2-modules/1-developer-schemas/opsrule/opsrule.md new file mode 100644 index 00000000..8313090a --- /dev/null +++ b/docs_versioned_docs/version-v0.12/6-reference/2-modules/1-developer-schemas/opsrule/opsrule.md @@ -0,0 +1,35 @@ +# opsrule + +## Schema OpsRule + +OpsRule describes operation rules for various Day-2 Operations. Once declared, these
operation rules will be checked before any Day-2 operations. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**maxUnavailable**|int | str|The maximum percentage of the total pod instances in the component that can be
simultaneously unhealthy.|"25%"| + +```python +import opsrule as o +import kam.v1.app_configuration +import kam.v1.workload as wl +import kam.v1.workload.container as c + +helloworld : ac.AppConfiguration { + workload: service.Service { + containers: { + "nginx": c.Container { + image: "nginx:v1" + } + } + } + accessories: { + "opsrule" : o.OpsRule { + maxUnavailable: "30%" + } + } +} +``` + + diff --git a/docs_versioned_docs/version-v0.12/6-reference/2-modules/1-developer-schemas/workload/job.md b/docs_versioned_docs/version-v0.12/6-reference/2-modules/1-developer-schemas/workload/job.md new file mode 100644 index 00000000..52194488 --- /dev/null +++ b/docs_versioned_docs/version-v0.12/6-reference/2-modules/1-developer-schemas/workload/job.md @@ -0,0 +1,251 @@ +# job + +## Schemas +- [Job](#schema-job) + - [Container](#schema-container) + - [Filespec](#schema-filespec) + - [LifeCycle](#schema-lifecycle) + - [Probe](#schema-probe) + - [Exec](#schema-exec) + - [Http](#schema-http) + - [Tcp](#schema-tcp) + - [Secret](#schema-secret) + +## Schema Job + +Job is a kind of workload profile that describes how to run your application code. This
is typically used for tasks that take from a few seconds to a few days to complete. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**annotations**|{str:str}|Annotations are key/value pairs that attach arbitrary non-identifying metadata to the workload.|| +|**containers** `required`|{str:[Container](../internal/container#schema-container)}|Containers defines the templates of containers to be ran.
More info: https://kubernetes.io/docs/concepts/containers|| +|**labels**|{str:str}|Labels are key/value pairs that are attached to the workload.|| +|**replicas**|int|Number of container replicas based on this configuration that should be ran.|| +|**schedule** `required`|str|The scheduling strategy in Cron format. More info: https://en.wikipedia.org/wiki/Cron.|| +|**secrets**|{str:[Secret](../internal/secret/secret.md#schema-secret)}|Secrets can be used to store small amount of sensitive data e.g. password, token.|| + +### Examples +```python +# Instantiate a job with busybox image and runs every hour + +import kam.workload as wl +import kam.workload.container as c + +echoJob : wl.Job { + containers: { + "busybox": c.Container{ + image: "busybox:1.28" + command: ["/bin/sh", "-c", "echo hello"] + } + } + schedule: "0 * * * *" +} +``` + +### Base Schema +[WorkloadBase](../internal/common#schema-workloadbase) + +## Schema Container + +Container describes how the Application's tasks are expected to be run. Depending on
the replicas parameter 1 or more containers can be created from each template. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**args**|[str]|Arguments to the entrypoint.
Args will overwrite the CMD value set in the Dockfile, otherwise the Docker
image's CMD is used if this is not provided.|| +|**command**|[str]|Entrypoint array. Not executed within a shell.
Command will overwrite the ENTRYPOINT value set in the Dockfile, otherwise the Docker
image's ENTRYPOINT is used if this is not provided.|| +|**dirs**|{str:str}|Collection of volumes mount into the container's filesystem.
The dirs parameter is a dict with the key being the folder name in the container and the value
being the referenced volume.|| +|**env**|{str:str}|List of environment variables to set in the container.
The value of the environment variable may be static text or a value from a secret.|| +|**files**|{str:[FileSpec](#filespec)}|List of files to create in the container.
The files parameter is a dict with the key being the file name in the container and the value
being the target file specification.|| +|**image** `required`|str|Image refers to the Docker image name to run for this container.
More info: https://kubernetes.io/docs/concepts/containers/images|| +|**lifecycle**|[lc.Lifecycle](../internal/container/lifecycle/lifecycle.md#schema-lifecycle)|Lifecycle refers to actions that the management system should take in response to container lifecycle events.|| +|**livenessProbe**|[p.Probe](../internal/container/probe/probe.md#schema-probe)|LivenessProbe indicates if a running process is healthy.
Container will be restarted if the probe fails.|| +|**readinessProbe**|[p.Probe](../internal/container/probe/probe.md#schema-probe)|ReadinessProbe indicates whether an application is available to handle requests.|| +|**resources**|{str:str}|Map of resource requirements the container should run with.
The resources parameter is a dict with the key being the resource name and the value being
the resource value.|| +|**startupProbe**|[p.Probe](../internal/container/probe/probe.md#schema-probe)|StartupProbe indicates that the container has started for the first time.
Container will be restarted if the probe fails.|| +|**workingDir**|str|The working directory of the running process defined in entrypoint.
Default container runtime will be used if this is not specified.|| + +### Examples +```python +import kam.workload.container as c + +web = c.Container { + image: "nginx:latest" + command: ["/bin/sh", "-c", "echo hi"] + env: { + "name": "value" + } + resources: { + "cpu": "2" + "memory": "4Gi" + } +} +``` + +## Schema FileSpec + +FileSpec defines the target file in a Container. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**content**|str|File content in plain text.|| +|**contentFrom**|str|Source for the file content, reference to a secret of configmap value.|| +|**mode** `required`|str|Mode bits used to set permissions on this file, must be an octal value
between 0000 and 0777 or a decimal value between 0 and 511|"0644"| + +### Examples +```python +import kam.workload.container as c + +tmpFile = c.FileSpec { + content: "some file contents" + mode: "0777" +} +``` + +### Schema Lifecycle + +Lifecycle describes actions that the management system should take in response to container lifecycle events. + +#### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**postStart**| | |The action to be taken after a container is created.
More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks|| +|**preStop**| | |The action to be taken before a container is terminated due to an API request or
management event such as liveness/startup probe failure, preemption, resource contention, etc.
More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks|| +#### Examples + +``` +import kam.workload.container.probe as p +import kam.workload.container.lifecycle as lc + +lifecycleHook = lc.Lifecycle { + preStop: p.Exec { + command: ["preStop.sh"] + } + postStart: p.Http { + url: "http://localhost:80" + } +} +``` + +### Schema Exec + +Exec describes a "run in container" action. + +#### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**command** `required`|[str]|The command line to execute inside the container.|| +#### Examples + +``` +import kam.workload.container.probe as p + +execProbe = p.Exec { + command: ["probe.sh"] +} +``` + +### Schema Http + +Http describes an action based on HTTP Get requests. + +#### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**headers**|{str:str}|Collection of custom headers to set in the request|| +|**url** `required`|str|The full qualified url to send HTTP requests.|| +#### Examples + +``` +import kam.workload.container.probe as p + +httpProbe = p.Http { + url: "http://localhost:80" + headers: { + "X-HEADER": "VALUE" + } +} +``` + +### Schema Probe + +Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic. There are three probe types: readiness, liveness, and startup. + +#### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**failureThreshold**|int|Minimum consecutive failures for the probe to be considered failed after having succeeded.|| +|**initialDelaySeconds**|int|The number of seconds before health checking is activated.
More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes|| +|**periodSeconds**|int|How often (in seconds) to perform the probe.|| +|**probeHandler** `required`|[Exec](#exec) | [Http](#http) | [Tcp](#tcp)|The action taken to determine the alive or health of a container|| +|**successThreshold**|int|Minimum consecutive successes for the probe to be considered successful after having failed.|| +|**terminationGracePeriod**|int|Duration in seconds before terminate gracefully upon probe failure.|| +|**timeoutSeconds**|int|The number of seconds after which the probe times out.
More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes|| +#### Examples + +``` +import kam.workload.container.probe as p + +probe = p.Probe { + probeHandler: p.Http { + path: "/healthz" + } + initialDelaySeconds: 10 +} +``` + +### Schema Tcp + +Tcp describes an action based on opening a socket. + +#### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**url** `required`|str|The full qualified url to open a socket.|| +#### Examples + +``` +import kam.workload.container.probe as p + +tcpProbe = p.Tcp { + url: "tcp://localhost:1234" +} +``` + +## Schema Secret + +Secret can be used to store sensitive data. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**data**|{str:str}|Data contains the non-binary secret data in string form.|| +|**immutable**|bool|Immutable, if set to true, ensures that data stored in the Secret cannot be updated.|| +|**params**|{str:str}|Collection of parameters used to facilitate programmatic handling of secret data.|| +|**type** `required`|"basic" | "token" | "opaque" | "certificate" | "external"|Type of secret, used to facilitate programmatic handling of secret data.|| + +### Examples +```python +import kam.workload.secret as sec + +basicAuth = sec.Secret { + type: "basic" + data: { + "username": "" + "password": "" + } +} +``` + + diff --git a/docs_versioned_docs/version-v0.12/6-reference/2-modules/1-developer-schemas/workload/service.md b/docs_versioned_docs/version-v0.12/6-reference/2-modules/1-developer-schemas/workload/service.md new file mode 100644 index 00000000..8dc74ccf --- /dev/null +++ b/docs_versioned_docs/version-v0.12/6-reference/2-modules/1-developer-schemas/workload/service.md @@ -0,0 +1,248 @@ +# service + +## Schemas +- [Service](#schema-service) + - [Container](#schema-container) + - [Filespec](#schema-filespec) + - [LifeCycle](#schema-lifecycle) + - [Probe](#schema-probe) + - [Exec](#schema-exec) + - [Http](#schema-http) + - [Tcp](#schema-tcp) + - [Secret](#schema-secret) + +## Schema Service + +Service is a kind of workload profile that describes how to run your application code. This
is typically used for long-running web applications that should "never" go down, and handle
short-lived latency-sensitive web requests, or events. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**annotations**|{str:str}|Annotations are key/value pairs that attach arbitrary non-identifying metadata to the workload.|| +|**containers** `required`|{str:}|Containers defines the templates of containers to be ran.
More info: https://kubernetes.io/docs/concepts/containers|| +|**labels**|{str:str}|Labels are key/value pairs that are attached to the workload.|| +|**replicas**|int|Number of container replicas based on this configuration that should be ran.|| +|**secrets**|{str:[Secret](../internal/secret/secret.md#schema-secret)}|Secrets can be used to store small amount of sensitive data e.g. password, token.|| + +### Examples +```python +# Instantiate a long-running service and its image is "nginx:v1" + +import kam.workload as wl +import kam.workload.container as c + +nginxSvc : service.Service { + containers: { + "nginx": c.Container { + image: "nginx:v1" + } + } +} +``` + +### Base Schema +[WorkloadBase](../internal/common#schema-workloadbase) + +## Schema Container + +Container describes how the Application's tasks are expected to be run. Depending on
the replicas parameter 1 or more containers can be created from each template. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**args**|[str]|Arguments to the entrypoint.
Args will overwrite the CMD value set in the Dockfile, otherwise the Docker
image's CMD is used if this is not provided.|| +|**command**|[str]|Entrypoint array. Not executed within a shell.
Command will overwrite the ENTRYPOINT value set in the Dockfile, otherwise the Docker
image's ENTRYPOINT is used if this is not provided.|| +|**dirs**|{str:str}|Collection of volumes mount into the container's filesystem.
The dirs parameter is a dict with the key being the folder name in the container and the value
being the referenced volume.|| +|**env**|{str:str}|List of environment variables to set in the container.
The value of the environment variable may be static text or a value from a secret.|| +|**files**|{str:[FileSpec](#filespec)}|List of files to create in the container.
The files parameter is a dict with the key being the file name in the container and the value
being the target file specification.|| +|**image** `required`|str|Image refers to the Docker image name to run for this container.
More info: https://kubernetes.io/docs/concepts/containers/images|| +|**lifecycle**|[lc.Lifecycle](../internal/container/lifecycle/lifecycle.md#schema-lifecycle)|Lifecycle refers to actions that the management system should take in response to container lifecycle events.|| +|**livenessProbe**|[p.Probe](../internal/container/probe/probe.md#schema-probe)|LivenessProbe indicates if a running process is healthy.
Container will be restarted if the probe fails.|| +|**readinessProbe**|[p.Probe](../internal/container/probe/probe.md#schema-probe)|ReadinessProbe indicates whether an application is available to handle requests.|| +|**resources**|{str:str}|Map of resource requirements the container should run with.
The resources parameter is a dict with the key being the resource name and the value being
the resource value.|| +|**startupProbe**|[p.Probe](../internal/container/probe/probe.md#schema-probe)|StartupProbe indicates that the container has started for the first time.
Container will be restarted if the probe fails.|| +|**workingDir**|str|The working directory of the running process defined in entrypoint.
Default container runtime will be used if this is not specified.|| + +### Examples +```python +import kam.workload.container as c + +web = c.Container { + image: "nginx:latest" + command: ["/bin/sh", "-c", "echo hi"] + env: { + "name": "value" + } + resources: { + "cpu": "2" + "memory": "4Gi" + } +} +``` + +## Schema FileSpec + +FileSpec defines the target file in a Container. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**content**|str|File content in plain text.|| +|**contentFrom**|str|Source for the file content, reference to a secret of configmap value.|| +|**mode** `required`|str|Mode bits used to set permissions on this file, must be an octal value
between 0000 and 0777 or a decimal value between 0 and 511|"0644"| + +### Examples +```python +import kam.workload.container as c + +tmpFile = c.FileSpec { + content: "some file contents" + mode: "0777" +} +``` + +### Schema Lifecycle + +Lifecycle describes actions that the management system should take in response to container lifecycle events. + +#### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**postStart**| | |The action to be taken after a container is created.
More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks|| +|**preStop**| | |The action to be taken before a container is terminated due to an API request or
management event such as liveness/startup probe failure, preemption, resource contention, etc.
More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks|| +#### Examples + +``` +import kam.workload.container.probe as p +import kam.workload.container.lifecycle as lc + +lifecycleHook = lc.Lifecycle { + preStop: p.Exec { + command: ["preStop.sh"] + } + postStart: p.Http { + url: "http://localhost:80" + } +} +``` + +### Schema Exec + +Exec describes a "run in container" action. + +#### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**command** `required`|[str]|The command line to execute inside the container.|| +#### Examples + +``` +import kam.workload.container.probe as p + +execProbe = p.Exec { + command: ["probe.sh"] +} +``` + +### Schema Http + +Http describes an action based on HTTP Get requests. + +#### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**headers**|{str:str}|Collection of custom headers to set in the request|| +|**url** `required`|str|The full qualified url to send HTTP requests.|| +#### Examples + +``` +import kam.workload.container.probe as p + +httpProbe = p.Http { + url: "http://localhost:80" + headers: { + "X-HEADER": "VALUE" + } +} +``` + +### Schema Probe + +Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic. There are three probe types: readiness, liveness, and startup. + +#### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**failureThreshold**|int|Minimum consecutive failures for the probe to be considered failed after having succeeded.|| +|**initialDelaySeconds**|int|The number of seconds before health checking is activated.
More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes|| +|**periodSeconds**|int|How often (in seconds) to perform the probe.|| +|**probeHandler** `required`|[Exec](#exec) | [Http](#http) | [Tcp](#tcp)|The action taken to determine the alive or health of a container|| +|**successThreshold**|int|Minimum consecutive successes for the probe to be considered successful after having failed.|| +|**terminationGracePeriod**|int|Duration in seconds before terminate gracefully upon probe failure.|| +|**timeoutSeconds**|int|The number of seconds after which the probe times out.
More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes|| +#### Examples + +``` +import kam.workload.container.probe as p + +probe = p.Probe { + probeHandler: p.Http { + path: "/healthz" + } + initialDelaySeconds: 10 +} +``` + +### Schema Tcp + +Tcp describes an action based on opening a socket. + +#### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**url** `required`|str|The full qualified url to open a socket.|| +#### Examples + +``` +import kam.workload.container.probe as p + +tcpProbe = p.Tcp { + url: "tcp://localhost:1234" +} +``` + +## Schema Secret + +Secret can be used to store sensitive data. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**data**|{str:str}|Data contains the non-binary secret data in string form.|| +|**immutable**|bool|Immutable, if set to true, ensures that data stored in the Secret cannot be updated.|| +|**params**|{str:str}|Collection of parameters used to facilitate programmatic handling of secret data.|| +|**type** `required`|"basic" | "token" | "opaque" | "certificate" | "external"|Type of secret, used to facilitate programmatic handling of secret data.|| + +### Examples +```python +import kam.workload.secret as sec + +basicAuth = sec.Secret { + type: "basic" + data: { + "username": "" + "password": "" + } +} +``` + + diff --git a/docs_versioned_docs/version-v0.12/6-reference/2-modules/2-workspace-configs/_category_.json b/docs_versioned_docs/version-v0.12/6-reference/2-modules/2-workspace-configs/_category_.json new file mode 100644 index 00000000..81444988 --- /dev/null +++ b/docs_versioned_docs/version-v0.12/6-reference/2-modules/2-workspace-configs/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Workspace Configs" +} \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.12/6-reference/2-modules/2-workspace-configs/database/mysql.md b/docs_versioned_docs/version-v0.12/6-reference/2-modules/2-workspace-configs/database/mysql.md new file mode 100644 index 00000000..66225f5b --- /dev/null +++ b/docs_versioned_docs/version-v0.12/6-reference/2-modules/2-workspace-configs/database/mysql.md @@ -0,0 +1,52 @@ +# mysql + +## Module MySQL + +MySQL describes the attributes to locally deploy or create a cloud provider managed mysql database instance for the workload. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**cloud**
Cloud specifies the type of the cloud vendor. |"aws" \| "alicloud"|Undefined|**required**| +|**username**
Username specifies the operation account for the mysql database. |str|"root"|optional| +|**category**
Category specifies the edition of the mysql instance provided by the cloud vendor. |str|"Basic"|optional| +|**securityIPs**
SecurityIPs specifies the list of IP addresses allowed to access the mysql instance provided by the cloud vendor. |[str]|["0.0.0.0/0"]|optional| +|**privateRouting**
PrivateRouting specifies whether the host address of the cloud mysql instance for the workload to connect with is via public network or private network of the cloud vendor. |bool|true|optional| +|**size**
Size specifies the allocated storage size of the mysql instance. |int|10|optional| +|**subnetID**
SubnetID specifies the virtual subnet ID associated with the VPC that the cloud mysql instance will be created in. |str|Undefined|optional| +|**databaseName**
databaseName specifies the database name. |str|Undefined|optional| + +### Examples + +```yaml +# MySQL workspace configs for AWS RDS +modules: + mysql: + path: oci://ghcr.io/kusionstack/mysql + version: 0.2.0 + configs: + default: + cloud: aws + size: 20 + instanceType: db.t3.micro + privateRouting: false + databaseName: "my-mysql" +``` + +```yaml +# MySQL workspace configs for Alicloud RDS +modules: + mysql: + path: oci://ghcr.io/kusionstack/mysql + version: 0.2.0 + configs: + default: + cloud: alicloud + size: 20 + instanceType: mysql.n2.serverless.1c + category: serverless_basic + privateRouting: false + subnetID: [your-subnet-id] + databaseName: "my-mysql" +``` \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.12/6-reference/2-modules/2-workspace-configs/database/postgres.md b/docs_versioned_docs/version-v0.12/6-reference/2-modules/2-workspace-configs/database/postgres.md new file mode 100644 index 00000000..aed20616 --- /dev/null +++ b/docs_versioned_docs/version-v0.12/6-reference/2-modules/2-workspace-configs/database/postgres.md @@ -0,0 +1,55 @@ +# postgres + +## Module PostgreSQL + +PostgreSQL describes the attributes to locally deploy or create a cloud provider managed postgres database instance for the workload. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**cloud**
Cloud specifies the type of the cloud vendor. |"aws" \| "alicloud"|Undefined|**required**| +|**username**
Username specifies the operation account for the postgres database. |str|"root"|optional| +|**category**
Category specifies the edition of the postgres instance provided by the cloud vendor. |str|"Basic"|optional| +|**securityIPs**
SecurityIPs specifies the list of IP addresses allowed to access the postgres instance provided by the cloud vendor. |[str]|["0.0.0.0/0"]|optional| +|**privateRouting**
PrivateRouting specifies whether the host address of the cloud postgres instance for the workload to connect with is via public network or private network of the cloud vendor. |bool|true|optional| +|**size**
Size specifies the allocated storage size of the postgres instance. |int|10|optional| +|**subnetID**
SubnetID specifies the virtual subnet ID associated with the VPC that the cloud postgres instance will be created in. |str|Undefined|optional| +|**databaseName**
databaseName specifies the database name. |str|Undefined|optional| + +### Examples + +```yaml +# PostgreSQL workspace configs for AWS RDS +modules: + postgres: + path: oci://ghcr.io/kusionstack/postgres + version: 0.2.0 + configs: + default: + cloud: aws + size: 20 + instanceType: db.t3.micro + securityIPs: + - 0.0.0.0/0 + databaseName: "my-postgres" +``` + +```yaml +# PostgreSQL workspace configs for Alicloud RDS +modules: + postgres: + path: oci://ghcr.io/kusionstack/postgres + version: 0.2.0 + configs: + default: + cloud: alicloud + size: 20 + instanceType: pg.n2.serverless.1c + category: serverless_basic + privateRouting: false + subnetID: [your-subnet-id] + securityIPs: + - 0.0.0.0/0 + databaseName: "my-postgres" +``` \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.12/6-reference/2-modules/2-workspace-configs/k8s_manifest/k8s_manifest.md b/docs_versioned_docs/version-v0.12/6-reference/2-modules/2-workspace-configs/k8s_manifest/k8s_manifest.md new file mode 100644 index 00000000..c741c7f6 --- /dev/null +++ b/docs_versioned_docs/version-v0.12/6-reference/2-modules/2-workspace-configs/k8s_manifest/k8s_manifest.md @@ -0,0 +1,26 @@ +# k8s_manifest + +## Module K8sManifest + +K8sManifest defines the paths of the YAML files, or the directories of the raw Kubernetes manifests, which will be jointly appended to the Resources of Spec. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**paths**
The paths of the YAML files, or the directories of the raw Kubernetes manifests. |[str]|Undefined|**optional**| + +### Examples + +```yaml +modules: + k8s_manifest: + path: oci://ghcr.io/kusionstack/k8s_manifest + version: 0.1.0 + configs: + default: + paths: + # The default paths to apply for the raw K8s manifest YAML files. + - /path/to/k8s_manifest.yaml + - /dir/to/k8s_manifest/ +``` \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.12/6-reference/2-modules/2-workspace-configs/monitoring/prometheus.md b/docs_versioned_docs/version-v0.12/6-reference/2-modules/2-workspace-configs/monitoring/prometheus.md new file mode 100644 index 00000000..55628423 --- /dev/null +++ b/docs_versioned_docs/version-v0.12/6-reference/2-modules/2-workspace-configs/monitoring/prometheus.md @@ -0,0 +1,43 @@ +# monitoring + +`monitoring` can be used to define workspace-level monitoring configurations. + +## Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**operatorMode**
Whether the Prometheus instance installed in the cluster runs as a Kubernetes operator or not. This determines the different kinds of resources Kusion manages.|true \| false|false|optional| +|**monitorType**
The kind of monitor to create. It only applies when operatorMode is set to True.|"Service" \| "Pod"|"Service"|optional| +|**interval**
The time interval which Prometheus scrapes metrics data. Only applicable when operator mode is set to true.
When operator mode is set to false, the scraping interval can only be set in the scraping job configuration, which kusion does not have permission to manage directly.|str|30s|optional| +|**timeout**
The timeout when Prometheus scrapes metrics data. Only applicable when operator mode is set to true.
When operator mode is set to false, the scraping timeout can only be set in the scraping job configuration, which kusion does not have permission to manage directly.|str|15s|optional| +|**scheme**
The scheme to scrape metrics from. Possible values are http and https.|"http" \| "https"|http|optional| + +### Examples +```yaml +modules: + monitoring: + path: oci://ghcr.io/kusionstack/monitoring + version: 0.2.0 + configs: + default: + operatorMode: True + monitorType: Pod + scheme: http + interval: 30s + timeout: 15s + low_frequency: + operatorMode: False + interval: 2m + timeout: 1m + projectSelector: + - foo + - bar + high_frequency: + monitorType: Service + interval: 10s + timeout: 5s + projectSelector: + - helloworld + - wordpress + - prometheus-sample-app +``` \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.12/6-reference/2-modules/2-workspace-configs/networking/network.md b/docs_versioned_docs/version-v0.12/6-reference/2-modules/2-workspace-configs/networking/network.md new file mode 100644 index 00000000..05609acc --- /dev/null +++ b/docs_versioned_docs/version-v0.12/6-reference/2-modules/2-workspace-configs/networking/network.md @@ -0,0 +1,26 @@ +# network + +`network` can be used to define workspace-level networking configurations. + +## Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**type**
The specific cloud vendor that provides load balancer.| "alicloud" \| "aws"|Undefined|**required**| +| **labels**
The attached labels of the port.|{str:str}|Undefined|optional| +| **annotations**
The attached annotations of the port.|{str:str}|Undefined|optional| + +### Examples + +```yaml +modules: + path: oci://ghcr.io/kusionstack/network + version: 0.2.0 + configs: + default: + type: alicloud + labels: + kusionstack.io/control: "true" + annotations: + service.beta.kubernetes.io/alibaba-cloud-loadbalancer-spec: slb.s1.small +``` \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.12/6-reference/2-modules/2-workspace-configs/opsrule/opsrule.md b/docs_versioned_docs/version-v0.12/6-reference/2-modules/2-workspace-configs/opsrule/opsrule.md new file mode 100644 index 00000000..0c3d29c1 --- /dev/null +++ b/docs_versioned_docs/version-v0.12/6-reference/2-modules/2-workspace-configs/opsrule/opsrule.md @@ -0,0 +1,22 @@ +# opsrule + +`opsrule` can be used to define workspace-level operational rule configurations. + +## Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**maxUnavailable**
The maximum percentage of the total pod instances in the component that can be
simultaneously unhealthy.|int \| str|Undefined|optional| + + +### Examples + +```yaml +modules: + opsrule: + path: oci://ghcr.io/kusionstack/opsrule + version: 0.2.0 + configs: + default: + maxUnavailable: "40%" +``` \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.12/6-reference/2-modules/2-workspace-configs/workload/job.md b/docs_versioned_docs/version-v0.12/6-reference/2-modules/2-workspace-configs/workload/job.md new file mode 100644 index 00000000..da659136 --- /dev/null +++ b/docs_versioned_docs/version-v0.12/6-reference/2-modules/2-workspace-configs/workload/job.md @@ -0,0 +1,26 @@ +# job + +`job` can be used to define workspace-level job configuration. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +| **replicas**
Number of container replicas based on this configuration that should be ran. |int|2| optional | +| **labels**
Labels are key/value pairs that are attached to the workload. |{str: str}|Undefined| optional | +| **annotations**
Annotations are key/value pairs that attach arbitrary non-identifying metadata to the workload. |{str: str}|Undefined| optional | + +### Examples +```yaml +modules: + job: + path: oci://ghcr.io/kusionstack/job + version: 0.1.0 + configs: + default: + replicas: 3 + labels: + label-key: label-value + annotations: + annotation-key: annotation-value +``` \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.12/6-reference/2-modules/2-workspace-configs/workload/service.md b/docs_versioned_docs/version-v0.12/6-reference/2-modules/2-workspace-configs/workload/service.md new file mode 100644 index 00000000..9c76a44c --- /dev/null +++ b/docs_versioned_docs/version-v0.12/6-reference/2-modules/2-workspace-configs/workload/service.md @@ -0,0 +1,28 @@ +# service + +`service` can be used to define workspace-level service configuration. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +| **replicas**
Number of container replicas based on this configuration that should be ran. |int|2| optional | +| **labels**
Labels are key/value pairs that are attached to the workload. |{str: str}|Undefined| optional | +| **annotations**
Annotations are key/value pairs that attach arbitrary non-identifying metadata to the workload. |{str: str}|Undefined| optional | +| **type**
Type represents the type of workload used by this Service. Currently, it supports several
types, including Deployment and CollaSet. |"Deployment" \| "CollaSet"| Deployment |**required**| + +### Examples +```yaml +modules: + service: + path: oci://ghcr.io/kusionstack/service + version: 0.2.0 + configs: + default: + replicas: 3 + labels: + label-key: label-value + annotations: + annotation-key: annotation-value + type: CollaSet +``` \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.12/6-reference/2-modules/3-naming-conventions.md b/docs_versioned_docs/version-v0.12/6-reference/2-modules/3-naming-conventions.md new file mode 100644 index 00000000..ab7f668c --- /dev/null +++ b/docs_versioned_docs/version-v0.12/6-reference/2-modules/3-naming-conventions.md @@ -0,0 +1,34 @@ +--- +id: naming-conventions +sidebar_label: Resource Naming Conventions +--- + +# Resource Naming Conventions + +Kusion will automatically create Kubernetes or Terraform resources for the applications, many of which do not require users' awareness. This document will introduce the naming conventions for these related resources. + +## Kubernetes Resources + +Kusion adheres to specific rules when generating the Kubernetes resources for users' applications. The table below lists some common Kubernetes resource naming conventions. Note that `Namespace` can now be specified by users. + +| Resource | Concatenation Rule | Example ID | +| -------- | ------------------ | ---------- | +| Namespace | `` | v1:Namespace:wordpress-local-db | +| Deployment | ``-``-`` | apps/v1:Deployment:wordpress-local-db:wordpress-local-db-dev-wordpress | +| CronJob | ``-``-`` | batch/v1:CronJob:helloworld:helloworld-dev-helloworld | +| Service | ``-``-``-` or ` | v1:Service:helloworld:helloworld-dev-helloworld-public | + +## Terraform Resources + +Similarly, Kusion also adheres to specific naming conventions when generating the Terraform Resources. Some common resources are listed below. + +| Resource | Concatenation Rule | Example ID | +| -------- | ------------------ | ---------- | +| random_password | ``-`` | hashicorp:random:random_password:wordpress-db-mysql | +| aws_security_group | ``-`` | hashicorp:aws:aws_security_group:wordpress-db-mysql | +| aws_db_instance | `` | hashicorp:aws:aws_db_instance:wordpress-db | +| alicloud_db_instance | `` | aliyun:alicloud:alicloud_db_instance:wordpress-db | +| alicloud_db_connection | `` | aliyun:alicloud:alicloud_db_connection:wordpress | +| alicloud_rds_account | `` | aliyun:alicloud:alicloud_rds_account:wordpress | + +The `` is composed of two parts, one of which is the `key` of database declared in `AppConfiguration` and the other is the `suffix` declared in `workspace` configuration. Kusion will concatenate the database key and suffix, convert them to uppercase, and replace `-` with `_`. And the `` supported now includes `mysql` and `postgres`. diff --git a/docs_versioned_docs/version-v0.12/6-reference/2-modules/_category_.json b/docs_versioned_docs/version-v0.12/6-reference/2-modules/_category_.json new file mode 100644 index 00000000..4dadaa75 --- /dev/null +++ b/docs_versioned_docs/version-v0.12/6-reference/2-modules/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Kusion Modules" +} diff --git a/docs_versioned_docs/version-v0.12/6-reference/2-modules/index.md b/docs_versioned_docs/version-v0.12/6-reference/2-modules/index.md new file mode 100644 index 00000000..744892c4 --- /dev/null +++ b/docs_versioned_docs/version-v0.12/6-reference/2-modules/index.md @@ -0,0 +1,45 @@ +# Kusion Modules + +KusionStack presets application configuration models described by KCL, where the model is called **Kusion Model**. The GitHub repository [KusionStack/catalog](https://github.com/KusionStack/catalog) is used to store these models, which is known as **Kusion Model Library**. + +The original intention of designing Kusion Model is to enhance the efficiency and improve the experience of YAML users. Through the unified application model defined by code, abstract and encapsulate complex configuration items, omit repetitive and derivable configurations, and supplement with necessary verification logic. Only the necessary attributes get exposed, users get an out-of-the-box, easy-to-understand configuration interface, which reduces the difficulty and improves the reliability of the configuration work. + +Kusion Model Library currently provides the Kusion Model `AppConfiguration`. The design of `AppConfiguration` is developer-centric, based on Ant Group's decades of practice in building and managing hyperscale IDP (Internal Developer Platform), and the best practice of community. `AppConfiguration` describes the full lifecycle of an application. + +A simple example of using `AppConfiguration` to describe an application is as follows: + +```bash +wordpress: ac.AppConfiguration { + workload: service.Service { + containers: { + "wordpress": c.Container { + image: "wordpress:latest" + env: { + "WORDPRESS_DB_HOST": "secret://wordpress-db/hostAddress" + "WORDPRESS_DB_PASSWORD": "secret://wordpress-db/password" + } + resources: { + "cpu": "1" + "memory": "2Gi" + } + } + } + replicas: 2 + ports: [ + n.Port { + port: 80 + public: True + } + ] + } + + database: db.Database { + type: "alicloud" + engine: "MySQL" + version: "5.7" + size: 20 + instanceType: "mysql.n2.serverless.1c" + category: "serverless_basic" + } +} +``` \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.12/6-reference/3-roadmap.md b/docs_versioned_docs/version-v0.12/6-reference/3-roadmap.md new file mode 100644 index 00000000..f411009e --- /dev/null +++ b/docs_versioned_docs/version-v0.12/6-reference/3-roadmap.md @@ -0,0 +1,15 @@ +# Roadmap + +For a finer-grained view of our roadmap and what is being worked on for a release, please refer to the [Roadmap](https://github.com/orgs/KusionStack/projects/24) + +## Expand Kusion Module Ecosystem to meet more scenarios + +We plan to expand the range of Kusion modules. This includes not only cloud services but also popular cloud-native projects such as Prometheus, Backstage, Crossplane, etc. By leveraging the ecosystem of CNCF projects and Terraform providers, we aim to enrich the Kusion module ecosystem to meet more scenarios. + +## LLM (Large Language Models) Operation + +Kusion is essentially designed to tackle team collaboration challenges. The LLM operations also involve many collaborative tasks. We believe Kusion can boost the operational efficiency of LLM engineers in this setting as well. + +## Kusion Server + +Currently, Kusion is a command-line tool, which has its pros and cons. Through our discussions with community users, we‘ve discovered that some of them prefer a long-running service with a web portal. We’re planning to build this form of Kusion, and have already started developing some features. diff --git a/docs_versioned_docs/version-v0.12/6-reference/_category_.json b/docs_versioned_docs/version-v0.12/6-reference/_category_.json new file mode 100644 index 00000000..a3b4dd92 --- /dev/null +++ b/docs_versioned_docs/version-v0.12/6-reference/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Reference" +} diff --git a/docs_versioned_docs/version-v0.12/7-faq/1-install-error.md b/docs_versioned_docs/version-v0.12/7-faq/1-install-error.md new file mode 100644 index 00000000..a0fde76a --- /dev/null +++ b/docs_versioned_docs/version-v0.12/7-faq/1-install-error.md @@ -0,0 +1,39 @@ +--- +sidebar_position: 1 +--- + +# Installation + +## 1. Could not find `libintl.dylib` + +This problem is that some tools depends on the `Gettext` library, but macOS does not have this library by default. You can try to solve it in the following ways: + +1. (Skip this step for non-macOS m1) For macOS m1 operating system, make sure you have a homebrew arm64e-version installed in /opt/homebrew, otherwise install the arm version of brew with the following command + +``` +/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" +# add to path +export PATH=/opt/homebrew/bin:$PATH +``` + +2. `brew install gettext` +3. Make sure `libintl.8.dylib` exists in `/usr/local/opt/gettext/lib` directory +4. If brew is installed in another directory, the library can be created by copying it to the corresponding directory + +## 2. macOS system SSL related errors + +Openssl dylib library not found or SSL module is not available problem + +1. (Skip this step for non-macOS m1) For macOS m1 operating system, make sure you have a homebrew arm64e-version installed in /opt/homebrew, otherwise install the arm version of brew with the following command + +``` +/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" +# add to path +export PATH=/opt/homebrew/bin:$PATH +``` + +2. Install openssl (version 1.1) via brew + +``` +brew install openssl@1.1 +``` diff --git a/docs_versioned_docs/version-v0.12/7-faq/2-kcl.md b/docs_versioned_docs/version-v0.12/7-faq/2-kcl.md new file mode 100644 index 00000000..596aa881 --- /dev/null +++ b/docs_versioned_docs/version-v0.12/7-faq/2-kcl.md @@ -0,0 +1,7 @@ +--- +sidebar_position: 2 +--- + +# KCL + +Visit the [KCL website](https://kcl-lang.io/docs/user_docs/support/faq-kcl) for more documents. \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.12/7-faq/_category_.json b/docs_versioned_docs/version-v0.12/7-faq/_category_.json new file mode 100644 index 00000000..7c4b229f --- /dev/null +++ b/docs_versioned_docs/version-v0.12/7-faq/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "FAQ" +} diff --git a/docs_versioned_docs/version-v0.13/1-what-is-kusion/1-overview.md b/docs_versioned_docs/version-v0.13/1-what-is-kusion/1-overview.md new file mode 100644 index 00000000..bbbc5fbb --- /dev/null +++ b/docs_versioned_docs/version-v0.13/1-what-is-kusion/1-overview.md @@ -0,0 +1,62 @@ +--- +id: overview +title: Overview +slug: / +--- + +# Overview + +Welcome to Kusion! This introduction section covers what Kusion is, the Kusion workflow, and how Kusion compares to other software. If you just want to dive into using Kusion, feel free to skip ahead to the [Getting Started](getting-started/install-kusion) section. + +## What is Kusion? + +Kusion is an intent-driven [Platform Orchestrator](https://internaldeveloperplatform.org/platform-orchestrators/), which sits at the core of an [Internal Developer Platform (IDP)](https://internaldeveloperplatform.org/what-is-an-internal-developer-platform/). With Kusion you can enable app-centric development, your developers only need to write a single application specification - [AppConfiguration](https://www.kusionstack.io/docs/concepts/app-configuration). [AppConfiguration](https://www.kusionstack.io/docs/concepts/app-configuration) defines the workload and all resource dependencies without needing to supply environment-specific values, Kusion ensures it provides everything needed for the application to run. + +Kusion helps app developers who are responsible for creating applications and the platform engineers responsible for maintaining the infrastructure the applications run on. These roles may overlap or align differently in your organization, but Kusion is intended to ease the workload for any practitioner responsible for either set of tasks. + +![arch](https://raw.githubusercontent.com/KusionStack/kusion/main/docs/overview.jpg) + + +## How does Kusion work? + +As a Platform Orchestrator, Kusion enables you to address challenges often associated with Day 0 and Day 1. Both platform engineers and application engineers can benefit from Kusion. + +There are two key workflows for Kusion: + +1. **Day 0 - Set up the modules and workspaces:** Platform engineers create shared modules for deploying applications and their underlying infrastructure, and workspace definitions for target landing zone. These standardized, shared modules codify the requirements from stakeholders across the organization including security, compliance, and finance. + + Kusion modules abstract the complexity of underlying infrastructure tooling, enabling app developers to deploy their applications using a self-service model. + +
+ + ![workflow](https://raw.githubusercontent.com/KusionStack/kusion/main/docs/platform_workflow.jpg) +
+ +2. **Day 1 - Set up the application:** Application developers leverage the workspaces and modules created by the platform engineers to deploy applications and their supporting infrastructure. The platform team maintains the workspaces and modules, which allows application developers to focus on building applications using a repeatable process on standardized infrastructure. + +
+ + ![workflow](https://raw.githubusercontent.com/KusionStack/kusion/main/docs/app_workflow.jpg) +
+ +## Kusion Highlights + +* **Platform as Code** + + Specify desired application intent through declarative configuration code, drive continuous deployment with any CI/CD systems or GitOps to match that intent. No ad-hoc scripts, no hard maintain custom workflows, just declarative configuration code. + +* **Dynamic Configuration Management** + + Enable platform teams to set baseline-templates, control how and where to deploy application workloads and provision accessory resources. While still enabling application developers freedom via workload-centric specification and deployment. + +* **Security & Compliance Built In** + + Enforce security and infrastructure best practices with out-of-box [base models](https://github.com/KusionStack/catalog), create security and compliance guardrails for any Kusion deploy with third-party Policy as Code tools. All accessory resource secrets are automatically injected into Workloads. + +* **Lightweight and Open Model Ecosystem** + + Pure client-side solution ensures good portability and the rich APIs make it easier to integrate and automate. Large growing model ecosystem covers all stages in application lifecycle, with extensive connections to various infrastructure capabilities. + +:::tip + +**Kusion is an early project.** The end goal of Kusion is to boost [Internal Developer Platform](https://internaldeveloperplatform.org/) revolution, and we are iterating on Kusion quickly to strive towards this goal. For any help or feedback, please contact us in [Slack](https://github.com/KusionStack/community/discussions/categories/meeting) or [issues](https://github.com/KusionStack/kusion/issues). diff --git a/docs_versioned_docs/version-v0.13/1-what-is-kusion/2-kusion-vs-x.md b/docs_versioned_docs/version-v0.13/1-what-is-kusion/2-kusion-vs-x.md new file mode 100644 index 00000000..a5ed333d --- /dev/null +++ b/docs_versioned_docs/version-v0.13/1-what-is-kusion/2-kusion-vs-x.md @@ -0,0 +1,37 @@ +--- +id: kusion-vs-x +--- + +# Kusion vs Other Software + +It can be difficult to understand how different software compare to each other. Is one a replacement for the other? Are they complementary? etc. In this section, we compare Kusion to other software. + +**vs. GitOps (ArgoCD, FluxCD, etc.)** + +According to the [open GitOps principles](https://opengitops.dev/), GitOps systems typically have its desired state expressed declaratively, continuously observe actual system state and attempt to apply the desired state. In the design of Kusion toolchain, we refer to those principles but have no intention to reinvent any GitOps systems wheel. + +Kusion adopts your GitOps process and improves it with richness of features. The declarative [AppConfiguration](../concepts/app-configuration) model can be used to express desired intent, once intent is declared [Kusion CLI](../reference/commands) takes the role to make production match intent as safely as possible. + +**vs. PaaS (Heroku, Vercel, etc.)** + +Kusion shares the same goal with traditional PaaS platforms to provide application delivery and management capabilities. The intuitive difference from the full functionality PaaS platforms is that Kusion is a client-side toolchain, not a complete PaaS platform. + +Also traditional PaaS platforms typically constrain the type of applications they can run but there is no such constrain for Kusion which means Kusion provides greater flexibility. + +Kusion allows you to have platform-like features without the constraints of a traditional PaaS. However, Kusion is not attempting to replace any PaaS platforms, instead Kusion can be used to deploy to a platform such as Heroku. + +**vs. KubeVela** + +KubeVela is a modern software delivery and management control plane which makes it easier to deploy and operate applications on top of Kubernetes. + +Although some might initially perceive an overlap between Kusion and KubeVela, they are in fact complementary and can be integrated to work together. As a lightweight, purely client-side tool, coupled with corresponding [Generator](https://github.com/KusionStack/kusion-module-framework) implementation, Kusion can render [AppConfiguration](../concepts/app-configuration) schema to generate CRD resources for KubeVela and leverage KubeVela's control plane to implement application delivery. + +**vs. Helm** + +The concept of Helm originates from the [package management](https://en.wikipedia.org/wiki/Package_manager) mechanism of the operating system. It is a package management tool based on templated YAML files and supports the execution and management of resources in the package. + +Kusion is not a package manager. Kusion naturally provides a superset of Helm capabilities with the modeled key-value pairs, so that developers can use Kusion directly as a programable alternative to avoid the pain of writing text templates. For users who have adopted Helm, the stack compilation results in Kusion can be packaged and used in Helm format. + +**vs. Kubernetes** + +Kubernetes(K8s) is a container scheduling and management runtime widely used around the world, an "operating system" core for containers, and a platform for building platforms. Above the Kubernetes API layer, Kusion aims to provide app-centric **abstraction** and unified **workspace**, better **user experience** and automation **workflow**, and helps developers build the app delivery model easily and collaboratively. diff --git a/docs_versioned_docs/version-v0.13/1-what-is-kusion/_category_.json b/docs_versioned_docs/version-v0.13/1-what-is-kusion/_category_.json new file mode 100644 index 00000000..0817eb90 --- /dev/null +++ b/docs_versioned_docs/version-v0.13/1-what-is-kusion/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "What is Kusion?" +} diff --git a/docs_versioned_docs/version-v0.13/2-getting-started/1-install-kusion.md b/docs_versioned_docs/version-v0.13/2-getting-started/1-install-kusion.md new file mode 100644 index 00000000..540881d6 --- /dev/null +++ b/docs_versioned_docs/version-v0.13/2-getting-started/1-install-kusion.md @@ -0,0 +1,144 @@ +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Install Kusion + +You can install the latest Kusion CLI on MacOS, Linux and Windows. + +## MacOs/Linux + +For the MacOs and Linux, Homebrew and sh script are supported. Choose the one you prefer from the methods below. + + + + +The recommended method for installing on MacOS and Linux is to use the brew package manager. + +**Install Kusion** + +```bash +# tap formula repository Kusionstack/tap +brew tap KusionStack/tap + +# install Kusion +brew install KusionStack/tap/kusion +``` + +**Update Kusion** + +```bash +# update formulae from remote +brew update + +# update Kusion +brew upgrade KusionStack/tap/kusion +``` + +**Uninstall Kusion** + +```bash +# uninstall Kusion +brew uninstall KusionStack/tap/kusion +``` + +```mdx-code-block + + +``` + +**Install Kusion** + +```bash +# install Kusion, default latest version +curl https://www.kusionstack.io/scripts/install.sh | sh +``` + +**Install the Specified Version of Kusion** + +You can also install the specified version of Kusion by appointing the version as shell script parameter, where the version is the [available tag](https://github.com/KusionStack/kusion/tags) trimming prefix "v", such as 0.11.0, 0.10.0, etc. In general, you don't need to specify Kusion version, just use the command above to install the latest version. + +```bash +# install Kusion of specified version 0.11.0 +curl https://www.kusionstack.io/scripts/install.sh | sh -s 0.11.0 +``` + +**Uninstall Kusion** + +```bash +# uninstall Kusion +curl https://www.kusionstack.io/scripts/uninstall.sh | sh +``` + +```mdx-code-block + + +``` + +## Windows + +For the Windows, Scoop and Powershell script are supported. Choose the one you prefer from the methods below. + + + + +The recommended method for installing on Windows is to use the scoop package manager. + +**Install Kusion** + +```bash +# add scoop bucket KusionStack +scoop bucket add KusionStack https://github.com/KusionStack/scoop-bucket.git + +# install kusion +scoop install KusionStack/kusion +``` + +**Update Kusion** + +```bash +# update manifest from remote +scoop update + +# update Kusion +scoop install KusionStack/kusion +``` + +**Uninstall Kusion** + +```bash +# uninstall Kusion +brew uninstall KusionStack/kusion +``` + +```mdx-code-block + + +``` + +**Install Kusion** + +```bash +# install Kusion, default latest version +powershell -Command "iwr -useb https://www.kusionstack.io/scripts/install.ps1 | iex" +``` + +**Install the Specified Version of Kusion** + +You can also install the specified version of Kusion by appointing the version as shell script parameter, where the version is the [available tag](https://github.com/KusionStack/kusion/tags) trimming prefix "v", such as 0.11.0, etc. In general, you don't need to specify Kusion version, just use the command above to install the latest version. + +```bash +# install Kusion of specified version 0.10.0 +powershell {"& { $(irm https://www.kusionstack.io/scripts/install.ps1) } -Version 0.11.0" | iex} +``` + +**Uninstall Kusion** + +```bash +# uninstall Kusion +powershell -Command "iwr -useb https://www.kusionstack.io/scripts/uninstall.ps1 | iex" +``` + +```mdx-code-block + + +``` diff --git a/docs_versioned_docs/version-v0.13/2-getting-started/2-deliver-quickstart.md b/docs_versioned_docs/version-v0.13/2-getting-started/2-deliver-quickstart.md new file mode 100644 index 00000000..7b89b4fa --- /dev/null +++ b/docs_versioned_docs/version-v0.13/2-getting-started/2-deliver-quickstart.md @@ -0,0 +1,221 @@ +--- +id: deliver-quickstart +--- + +# Run Your First App on Kubernetes with Kusion + +In this tutorial, we will walk through how to deploy a quickstart application on Kubernetes with Kusion. The demo application can interact with a locally deployed MySQL database, which is declared as an accessory in the config codes and will be automatically created and managed by Kusion. + +## Prerequisites + +Before we start to play with this example, we need to have the Kusion CLI installed and run an accessible Kubernetes cluster. Here are some helpful documents: + +- Install [Kusion CLI](./1-install-kusion.md). +- Run a [Kubernetes](https://kubernetes.io) cluster. Some light and convenient options for Kubernetes local deployment include [k3s](https://docs.k3s.io/quick-start), [k3d](https://k3d.io/v5.4.4/#installation), and [MiniKube](https://minikube.sigs.k8s.io/docs/tutorials/multi_node). + +## Initialize Project + +We can start by initializing this tutorial project with `kusion init` cmd. + +```shell +# Create a new directory and navigate into it. +mkdir quickstart && cd quickstart + +# Initialize the demo project with the name of the current directory. +kusion init +``` + +The created project structure looks like below: + +```shell +tree +. +├── default +│   ├── kcl.mod +│   ├── main.k +│   └── stack.yaml +└── project.yaml + +2 directories, 4 files +``` + +:::info +More details about the project and stack structure can be found in [Project](../3-concepts/1-project/1-overview.md) and [Stack](../3-concepts/2-stack/1-overview.md). +::: + +### Review Configuration Files + +Now let's have a glance at the configuration codes of `default` stack: + +```shell +cat default/main.k +``` + +```python +import kam.v1.app_configuration as ac +import service +import service.container as c +import network as n + +# main.k declares the customized configuration codes for default stack. +quickstart: ac.AppConfiguration { + workload: service.Service { + containers: { + quickstart: c.Container { + image: "kusionstack/kusion-quickstart:latest" + } + } + } + accessories: { + "network": n.Network { + ports: [ + n.Port { + port: 8080 + } + ] + } + } +} +``` + +The configuration file `main.k`, usually written by the **App Developers**, declares the customized configuration codes for `default` stack, including an `AppConfiguration` instance with the name of `quickstart`. The `quickstart` application consists of a `Workload` with the type of `service.Service`, which runs a container named `quickstart` using the image of `kusionstack/kusion-quickstart:latest`. + +Besides, it declares a **Kusion Module** with the type of `network.Network`, exposing `8080` port to be accessed for the long-running service. + +The `AppConfiguration` model can hide the major complexity of Kubernetes resources such as `Namespace`, `Deployment`, and `Service` which will be created and managed by Kusion, providing the concepts that are **application-centric** and **infrastructure-agnostic** for a more developer-friendly experience. + +:::info +More details about the `AppConfiguration` model and built-in Kusion Module can be found in [kam](https://github.com/KusionStack/kam) and [catalog](https://github.com/KusionStack/catalog). +::: + +The declaration of the dependency packages can be found in `default/kcl.mod`: + +```shell +cat default/kcl.mod +``` + +```shell +[dependencies] +kam = { git = "https://github.com/KusionStack/kam.git", tag = "0.2.0" } +service = {oci = "oci://ghcr.io/kusionstack/service", tag = "0.1.0" } +network = { oci = "oci://ghcr.io/kusionstack/network", tag = "0.2.0" } +``` + +:::info +More details about the application model and module dependency declaration can be found in [Kusion Module guide for app dev](../3-concepts/3-module/3-app-dev-guide.md). +::: + +:::tip +The specific module versions we used in the above demonstration is only applicable for Kusion CLI after **v0.12.0**. +::: + +## Application Delivery + +Use the following command to deliver the quickstart application in `default` stack on your accessible Kubernetes cluster, while watching the resource creation and automatically port-forwarding the specified port (8080) from local to the Kubernetes Service of the application. We can check the details of the resource preview results before we confirm to apply the diffs. + +```shell +cd default && kusion apply --port-forward 8080 +``` + +![](/img/docs/user_docs/getting-started/kusion_apply_quickstart_0.12.gif) + +:::info +During the first apply, the models and modules that the application depends on will be downloaded, so it may take some time (usually within one minute). You can take a break and have a cup of coffee. +::: + +:::info +Kusion by default will create the Kubernetes resources of the application in the namespace the same as the project name. If you want to customize the namespace, please refer to [Project Namespace Extension](../3-concepts/1-project/2-configuration.md#kubernetesnamespace) and [Stack Namespace Extension](../3-concepts/2-stack/2-configuration.md#kubernetesnamespace). +::: + +Now we can visit [http://localhost:8080](http://localhost:8080) in our browser and play with the demo application! + +![](/img/docs/user_docs/getting-started/quickstart_page.png) + +## Add MySQL Accessory + +As you can see, the demo application page indicates that the MySQL database is not ready yet. Hence, we will now add a MySQL database as an accessory for the workload. + +We can add the Kusion-provided built-in dependency in the `default/kcl.mod`, so that we can use the `MySQL` module in the configuration codes. + +```shell +[dependencies] +kam = { git = "https://github.com/KusionStack/kam.git", tag = "0.2.0" } +service = {oci = "oci://ghcr.io/kusionstack/service", tag = "0.1.0" } +network = { oci = "oci://ghcr.io/kusionstack/network", tag = "0.2.0" } +mysql = { oci = "oci://ghcr.io/kusionstack/mysql", tag = "0.2.0" } +``` + +We can update the `default/main.k` with the following configuration codes: + +```python +import kam.v1.app_configuration as ac +import service +import service.container as c +import network as n +import mysql + +# main.k declares the customized configuration codes for default stack. +quickstart: ac.AppConfiguration { + workload: service.Service { + containers: { + quickstart: c.Container { + image: "kusionstack/kusion-quickstart:latest" + env: { + "DB_HOST": "$(KUSION_DB_HOST_QUICKSTART_DEFAULT_QUICKSTART_MYSQL)" + "DB_USERNAME": "$(KUSION_DB_USERNAME_QUICKSTART_DEFAULT_QUICKSTART_MYSQL)" + "DB_PASSWORD": "$(KUSION_DB_PASSWORD_QUICKSTART_DEFAULT_QUICKSTART_MYSQL)" + } + } + } + } + accessories: { + "network": n.Network { + ports: [ + n.Port { + port: 8080 + } + ] + } + "mysql": mysql.MySQL { + type: "local" + version: "8.0" + } + } +} +``` + +The configuration codes above declare a local `mysql.MySQL` with the engine version of `8.0` as an accessory for the application workload. The necessary Kubernetes resources for deploying and using the local MySQL database will be generated and users can get the `host`, `username` and `password` of the database through the [MySQL Credentials And Connectivity](../6-reference/2-modules/1-developer-schemas/database/mysql.md#credentials-and-connectivity) of Kusion in application containers. + +:::info +For more information about the naming convention of Kusion built-in MySQL module, you can refer to [Module Naming Convention](../6-reference/2-modules/3-naming-conventions.md). +::: + +After that, we can re-apply the application, and we can set the `--watch=false` to skip watching the resources to be reconciled: + +```shell +kusion apply --port-forward 8080 --watch=false +``` + +![](/img/docs/user_docs/getting-started/kusion_re_apply_quickstart_0.12.gif) + +:::info +You may wait another minute to download the MySQL Module. +::: + +Let's visit [http://localhost:8080](http://localhost:8080) in our browser, and we can find that the application has successfully connected to the MySQL database. The connection information is also printed on the page. + +![](/img/docs/user_docs/getting-started/quickstart_page_with_mysql.png) + +Now please feel free to enjoy the demo application! + +![](/img/docs/user_docs/getting-started/quickstart_mysql_validation.gif) + +## Delete Application + +We can delete the quickstart demo workload and related accessory resources with the following cmd: + +```shell +kusion destroy --yes +``` + +![](/img/docs/user_docs/getting-started/kusion_destroy_quickstart.gif) diff --git a/docs_versioned_docs/version-v0.13/2-getting-started/_category_.json b/docs_versioned_docs/version-v0.13/2-getting-started/_category_.json new file mode 100644 index 00000000..41f4c00e --- /dev/null +++ b/docs_versioned_docs/version-v0.13/2-getting-started/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Getting Started" +} diff --git a/docs_versioned_docs/version-v0.13/3-concepts/0-overview.md b/docs_versioned_docs/version-v0.13/3-concepts/0-overview.md new file mode 100644 index 00000000..44aa634e --- /dev/null +++ b/docs_versioned_docs/version-v0.13/3-concepts/0-overview.md @@ -0,0 +1,21 @@ +--- +id: overview +--- + +# Overview + +In this article, we will provide an overview of the core concepts of Kusion from the perspective of the Kusion workflow. + +![kusion workflow](/img/docs/concept/kusion_workflow.png) + +The workflow of Kusion is illustrated in the diagram above, which consists of three steps. + +The first step is **Write**, where the platform engineers build the [Kusion Modules](./3-module/1-overview.md) and initialize a [Workspace](./4-workspace.md). And the application developers declare their operational intent in [AppConfiguration](./5-appconfiguration.md) under a specific [Project](./1-project/1-overview.md) and [Stack](./2-stack/1-overview.md) path. + +The second step is the **Build** process, which results in the creation of the **SSoT** (Single Source of Truth), also known as the [Spec](./6-spec.md) of the current operational task. If you need version management of the SSoT, we recommend you manage the `Spec` with a VCS (Version Control System) tool like **Git**. + +The third step is **Apply**, which makes the `Spec` effective. Kusion parses the operational intent based on the `Spec` produced in the previous step. Before applying the `Spec`, Kusion will execute the `Preview` command (you can also execute this command manually) which will use a three-way diff algorithm to preview changes and prompt users to make sure all changes meet their expectations. And the `Apply` command will then actualize the operation intent onto various infrastructure platforms, currently supporting **Kubernetes**, **Terraform**, and **On-Prem** infrastructures. A [Release](./9-release.md) file will be created in the [Storage Backend](./7-backend.md) to record an operation. The `Destroy` command will delete the resources recorded in the `Release` file of a project in a specific workspace. + +A more detailed demonstration of the Kusion engine can be seen below. + +![kusion engine](/img/docs/concept/kusion_engine.png) \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.13/3-concepts/1-project/1-overview.md b/docs_versioned_docs/version-v0.13/3-concepts/1-project/1-overview.md new file mode 100644 index 00000000..edcc84d7 --- /dev/null +++ b/docs_versioned_docs/version-v0.13/3-concepts/1-project/1-overview.md @@ -0,0 +1,12 @@ +--- +sidebar_label: Overview +id: overview +--- + +# Overview + +A project in Kusion is defined as a folder that contains a `project.yaml` file and is generally recommended to be linked to a Git repository. Typically, the mapping between a project and a repository is 1:1, however, it is possible to have multiple projects connected to a single repository — for example, in the case of a monorepo. A project consists of one or more applications. + +The purpose of the project is to bundle application configurations there are relevant. Specifically, it organizes logical configurations for internal components to orchestrate the application and assembles these configurations to suit different roles, such as developers and SREs, thereby covering the entire lifecycle of application development. + +From the perspective of the application development lifecycle, the configurations delineated by the project is decoupled from the application code. It takes an immutable image as input, allowing users to perform operations and maintain the application within an independent configuration codebase. \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.13/3-concepts/1-project/2-configuration.md b/docs_versioned_docs/version-v0.13/3-concepts/1-project/2-configuration.md new file mode 100644 index 00000000..b5823df8 --- /dev/null +++ b/docs_versioned_docs/version-v0.13/3-concepts/1-project/2-configuration.md @@ -0,0 +1,38 @@ +--- +id: configuration +sidebar_label: Project file reference +--- + +# Kusion project file reference + +Every Kusion project has a project file, `project.yaml`, which specifies metadata about your project, such as the project name and project description. The project file must begin with lowercase `project` and have an extension of either `.yaml` or `.yml`. + +## Attributes + +| Name | Required | Description | Options | +|:------------- |:--------------- |:------------- |:------------- | +| `name` | required | Name of the project containing alphanumeric characters, hyphens, underscores. | None | +| `description` | optional | A brief description of the project. | None | +| `extensions` | optional | List of extensions on the project. | [See blow](#extensions) | + +### Extensions + +Extensions allow you to customize how resources are generated or customized as part of release. + +#### kubernetesNamespace + +The Kubernetes namespace extension allows you to customize namespace within your application generate Kubernetes resources. + +| Key | Required | Description | Example | +|:------|:--------:|:-------------|:---------| +| kind | y | The kind of extension being used. Must be 'kubernetesNamespace' | `kubernetesNamespace` | +| namespace | y | The namespace where all application-scoped resources generate Kubernetes objects. | `default` | + +```yaml +# Example `project.yaml` file with customized namespace of `test`. +name: example +extensions: + - kind: kubernetesNamespace + kubernetesNamespace: + namespace: test +``` diff --git a/docs_versioned_docs/version-v0.13/3-concepts/1-project/_category_.json b/docs_versioned_docs/version-v0.13/3-concepts/1-project/_category_.json new file mode 100644 index 00000000..b62ac774 --- /dev/null +++ b/docs_versioned_docs/version-v0.13/3-concepts/1-project/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Projects" +} diff --git a/docs_versioned_docs/version-v0.13/3-concepts/2-stack/1-overview.md b/docs_versioned_docs/version-v0.13/3-concepts/2-stack/1-overview.md new file mode 100644 index 00000000..c6dcd2b5 --- /dev/null +++ b/docs_versioned_docs/version-v0.13/3-concepts/2-stack/1-overview.md @@ -0,0 +1,16 @@ +--- +sidebar_label: Overview +id: overview +--- + +# Overview + +A stack in Kusion is defined as a folder within the project directory that contains a `stack.yaml` file. Stacks provide a mechanism to isolate multiple sets of different configurations in the same project. It is also the smallest unit of operation that can be configured and deployed independently. + +The most common way to leverage stacks is to denote different phases of the software development lifecycle, such as `development`, `staging`, `production`, etc. For instance, in the case where the image and resource requirements for an application workload might be different across different phases in the SDLC, they can be represented by different stacks in the same project, namely `dev`, `stage` and `prod`. + +To distinguish this from the deploy-time concept of a "target environment" - which Kusion defines as `workspaces`, **stack** is a development-time concept for application developers to manage different configurations. One way to illustrate the difference is that you can easily be deploying the `prod` stack to multiple target environments, for example, `aws-prod-us-east`, `aws-prod-us-east-2` and `azure-prod-westus`. + +## High Level Schema + +![High_Level_Schema](/img/docs/user_docs/concepts/high-level-schema.png) \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.13/3-concepts/2-stack/2-configuration.md b/docs_versioned_docs/version-v0.13/3-concepts/2-stack/2-configuration.md new file mode 100644 index 00000000..b09a5c43 --- /dev/null +++ b/docs_versioned_docs/version-v0.13/3-concepts/2-stack/2-configuration.md @@ -0,0 +1,38 @@ +--- +id: configuration +sidebar_label: Stack file reference +--- + +# Kusion stack file reference + +Every Kusion project's stack has a stack file, `stack.yaml`, which specifies metadata about your stack, such as the stack name and stack description. The stack file must begin with lowercase `stack` and have an extension of either `.yaml` or `.yml`. + +## Attributes + +| Name | Required | Description | Options | +|:------------- |:--------------- |:------------- |:------------- | +| `name` | required | Name of the stack containing alphanumeric characters, hyphens, underscores. | None | +| `description` | optional | A brief description of the stack. | None | +| `extensions` | optional | List of extensions on the stack. | [See blow](#extensions) | + +### Extensions + +Extensions allow you to customize how resources are generated or customized as part of release. + +#### kubernetesNamespace + +The Kubernetes namespace extension allows you to customize namespace within your application generate Kubernetes resources. + +| Key | Required | Description | Example | +|:------|:--------:|:-------------|:---------| +| kind | y | The kind of extension being used. Must be 'kubernetesNamespace' | `kubernetesNamespace` | +| namespace | y | The namespace where all application-scoped resources generate Kubernetes objects. | `default` | + +```yaml +# Example `stack.yaml` file with customized namespace of `test`. +name: dev +extensions: + - kind: kubernetesNamespace + kubernetesNamespace: + namespace: test +``` diff --git a/docs_versioned_docs/version-v0.13/3-concepts/2-stack/_category_.json b/docs_versioned_docs/version-v0.13/3-concepts/2-stack/_category_.json new file mode 100644 index 00000000..914c863f --- /dev/null +++ b/docs_versioned_docs/version-v0.13/3-concepts/2-stack/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Stacks" +} diff --git a/docs_versioned_docs/version-v0.13/3-concepts/3-module/1-overview.md b/docs_versioned_docs/version-v0.13/3-concepts/3-module/1-overview.md new file mode 100644 index 00000000..b6487117 --- /dev/null +++ b/docs_versioned_docs/version-v0.13/3-concepts/3-module/1-overview.md @@ -0,0 +1,16 @@ +# Overview + +A Kusion module is a reusable building block designed by platform engineers to standardize application deployments and enable app developers to self-service. It consists of two parts: + +- App developer-oriented schema: It is a [KCL schema](https://kcl-lang.io/docs/user_docs/guides/schema-definition/). Fields in this schema are recommended to be understandable to application developers and workspace-agnostic. For example, a database Kusion module schema only contains fields like database engine type and database version. +- Kusion module generator: It is a piece of logic that generates the Intent with an instantiated schema mentioned above, along with platform configurations ([workspace](../workspace)). As a building block, Kusion module hides the complexity of infrastructures. A database Kusion module not only represents a cloud RDS, but it also contains logic to configure other resources such as security groups and IAM policies. Additionally, it seamlessly injects the database host address, username, and password into the workload's environment variables. The generator logic can be very complex in some situations so we recommend implementing it in a GPL like [go](https://go.dev/). + +Here are some explanations of the Kusion Module: + +1. It represents an independent unit that provides a specific capability to the application with clear business semantics. +2. It consists of one or multiple infrastructure resources (K8s/Terraform resources), but it is not merely a collection of unrelated resources. For instance, a database, monitoring capabilities, and network access are typical Kusion Modules. +3. Modules should not have dependencies or be nested within each other. +4. AppConfig is not a Module. +5. Kusion Module is a superset of [KPM](https://www.kcl-lang.io/docs/user_docs/guides/package-management/quick-start). It leverages the KPM to manage KCL schemas in the Kusion module. + +![kusion-module](/img/docs/concept/kusion-module.png) \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.13/3-concepts/3-module/2-develop-guide.md b/docs_versioned_docs/version-v0.13/3-concepts/3-module/2-develop-guide.md new file mode 100644 index 00000000..e4a076e9 --- /dev/null +++ b/docs_versioned_docs/version-v0.13/3-concepts/3-module/2-develop-guide.md @@ -0,0 +1,256 @@ +# Platform Engineer Develop Guide + +## Prerequisites + +To follow this guide, you will need: + +- Go 1.22 or higher installed and configured +- Kusion v0.12 or higher installed locally + +## Workflow + +As a platform engineer, the workflow of developing a Kusion module looks like this: + +1. Communicate with app developers and identify the fields that should exposed to them in the dev-orient schema +2. Identify module input parameters that should be configured by platform engineers in the [workspace](../workspace) +3. Define the app dev-orient schema +4. Develop the module by implementing gRPC interfaces + +The first two steps primarily involve communication with the application development team, and the specific details are not included in this tutorial. This tutorial begins with the subsequent two steps. + +## Set up a developing environment + +Developing a Kusion module includes defining a KCL schema and developing a module binary in golang. We will provide a scaffold repository and a new command `kusion mod init` to help developers set up the developing environment easily. + +After executing the command + +```shell +kusion mod init +``` + +Kusion will download a [scaffold repository](https://github.com/KusionStack/kusion-module-scaffolding) and rename this project with your module name. The scaffold contains code templates and all files needed for developing a Kusion module. + +## Developing + +The scaffold repository directory structure is shown below: + +```shell +$ tree kawesome/ +. +├── example +│   ├── dev +│   │   ├── example_workspace.yaml +│   │   ├── kcl.mod +│   │   ├── main.k +│   │   └── stack.yaml +│   └── project.yaml +├── kawesome.k +├── kcl.mod +└── src + ├── Makefile + ├── go.mod + ├── go.sum + ├── kawesome_generator.go + └── kawesome_generator_test.go +``` + +When developing a Kusion module with the scaffold repository, you could follow the steps below: + +1. **Define the module name and version** + - For go files. Rename the module name in the `go.mod` and related files to your Kusion module name. + ```yaml + module kawsome + go 1.22 + require ( + ... + ) + ``` + - For KCL files. Rename package name and version in the `kcl.mod` + ```toml + [package] + name = "kawesome" + version = 0.2.0 + ``` + + We assume the module named is `kawesome` and the version is `0.2.0` in this guide. + +2. **Define the dev-orient schemas**. They would be initialized by app developers. In this scaffold repository, we've defined a schema named Kawesome in `kawesome.k` that consists of two resources `Service` and `RandomPassword` and they will be generated into a Kubernetes Service and a Terraform RandomPassword later. + +```python +schema Kawesome: +""" Kawesome is a sample module schema consisting of Service +and RandomPassword + +Attributes +---------- +service: Service, default is Undefined, required. + The exposed port of Workload, which will be generated into Kubernetes Service. +randomPassword: RandomPassword, default is Undefined, required. + The sensitive random string, which will be generated into Terraform random_password. + +Examples +-------- +import kawesome as ks + +... ... + +accessories: { + "kawesome": kawesome.Kawesome { + service: kawesome.Service{ + port: 5678 + } + randomPassword: kawesome.RandomPassword { + length: 20 + } + } +} +""" + +# The exposed port of Workload, which will be generated into Kubernetes Service. +service: Service + +# The sensitive random string, which will be generated into Terraform random_password. +randomPassword: RandomPassword +``` + +3. **Implement the [gRPC proto](https://github.com/KusionStack/kusion/blob/main/pkg/modules/proto/module.proto) generate interface.** The `generate` interface consumes the application developer's config described in the [`AppConfiguration`](../app-configuration) and the platform engineer's config described in the [`workspace`](../workspace) to generate all infrastructure resources represented by this module. + +```go +func (k *Kawesome) Generate(_ context.Context, request *module.GeneratorRequest) (*module.GeneratorResponse, error) { + // generate your infrastructure resoruces +} + +// Patcher primarily contains patches for fields associated with Workloads, and additionally offers the capability to patch other resources. +type Patcher struct { + // Environments represent the environment variables patched to all containers in the workload. + Environments []v1.EnvVar `json:"environments,omitempty" yaml:"environments,omitempty"` + // Labels represent the labels patched to the workload. + Labels map[string]string `json:"labels,omitempty" yaml:"labels,omitempty"` + // PodLabels represent the labels patched to the pods. + PodLabels map[string]string `json:"podLabels,omitempty" yaml:"podLabels,omitempty"` + // Annotations represent the annotations patched to the workload. + Annotations map[string]string `json:"annotations,omitempty" yaml:"annotations,omitempty"` + // PodAnnotations represent the annotations patched to the pods. + PodAnnotations map[string]string `json:"podAnnotations,omitempty" yaml:"podAnnotations,omitempty"` + // JSONPatchers represents patchers that can be patched to an arbitrary resource. + // The key of this map represents the ResourceId of the resource to be patched. + JSONPatchers map[string]JSONPatcher `json:"jsonPatcher,omitempty" yaml:"jsonPatcher,omitempty"` +} +``` + +The `GeneratorRequest` contains the application developer's config, platform engineer's config, workload config and related metadata a module could need to generate infrastructure resources. +In the `GeneratorResponse`, there are two fields, `Resources` and `Patchers`. The `Resource` represents resources that should operated by Kusion and they will be appended into the [Spec](../spec). The `Patchers` are used to patch the workload and other resources. + +### Workload + +Workload in the AppConfiguration is also a Kusion module. If the workload module only generates one resource, this resource will be regarded as the workload resource. However, if the workload module generates more than one resource, one and only one of them must contain a key-value pair in the 'extension' field, where the key is 'kusion.io/is-workload' and the value is 'true' and this resource will be regarded as the workload resource. + +### Implicit Resource Dependency + +When you need to use an attribute of another resource as the value of a specific resource attribute, Kusion supports declaring the implicit resource dependencies with the `$kusion_path` prefix. You can concatenate the implicit resource dependency path with the resource `id`, attribute `name` and the `$kusion_path` prefix, for example: + +```yaml +# Dependency path as an attribute value. +spec: + resources: + - id: v1:Service:test-ns:test-service + type: Kubernetes + attributes: + metadata: + annotations: + deployment-name: $kusion_path.v1:Deployment:test-ns:test-deployment.metadata.name +``` + +In addition, please note that: + +- The implicit resource dependency path can only be used to replace the value in `Attributes` field of the `Resource`, but not the key. For example, the following `Spec` is **invalid**: + +```yaml +# Dependency path not in `attributes`. +spec: + resources: + - id: v1:Service:test:$kusion_path.apps/v1:Deployment:test-ns:test-deployment.metadata.name +``` + +```yaml +# Dependency path in the key, but not in the value. +spec: + resources: + - id: apps/v1:Deployment:test-ns:test-deployment + type: Kubernetes + attributes: + metadata: + annotations: + $kusion_path.v1:Service:test-ns:test-service.metadata.name: test-svc +``` + +- The implicit resource dependency path can only be used as a standalone value and cannot be combined with other string. For example, the following `Spec` is **invalid**: + +```yaml +# Dependency path combined with other string. +spec: + resources: + - id: apps/v1:Deployment:test-ns:test-deployment + type: Kubernetes + attributes: + metadata: + annotations: + test-svc: $kusion_path.v1:Service:test-ns:test-service.metadata.name + "-test" +``` + +- The impliciy resource dependency path does not support accessing the value in an array, so the following is currently **invalid**: + +```yaml +# Dependency path accessing the value in an array. +spec: + resources: + - id: apps/v1:Deployment:test-ns:test-deployment + type: Kubernetes + attributes: + metadata: + annotations: + test-svc: $kusion_path.v1:Service:test-ns:test-service.spec.ports[0].name +``` + +## Publish + +Publish the Kusion module to an OCI registry with the command `kusion mod push`. If your module is open to the public, we **welcome and highly encourage** you to contribute it to the module registry [catalog](https://github.com/KusionStack/catalog), so that more people can benefit from the module. Submit a pull request to this repository, once it is merged, it will be published to the [KusionStack GitHub container registry](https://github.com/orgs/KusionStack/packages). + +Publish a stable version +```shell +kusion mod push /path/to/my-module oci:/// --creds +``` + +Publish a module of a specific OS arch +```shell +kusion mod push /path/to/my-module oci:/// --os-arch==darwin/arm64 --creds +``` + +Publish a pre-release version +```shell +kusion mod push /path/to/my-module oci:/// --latest=false --creds +``` + +:::info +The OCI URL format is `oci:///` and please ensure that your token has permissions to write to the registry. +::: + +More details can be found in the `kusion mod push` reference doc. + +## Register to the workspace + +```yaml +modules: + kawesome: + path: oci://ghcr.io/kusionstack/kawesome + version: 0.2.0 + configs: + default: + service: + labels: + kusionstack.io/module-name: kawesome + annotations: + kusionstack.io/module-version: 0.2.0 +``` + +Register module platform configuration in the `workspace.yaml` to standardize the module's behavior. App developers can list all available modules registered in the workspace. \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.13/3-concepts/3-module/3-app-dev-guide.md b/docs_versioned_docs/version-v0.13/3-concepts/3-module/3-app-dev-guide.md new file mode 100644 index 00000000..3169c67c --- /dev/null +++ b/docs_versioned_docs/version-v0.13/3-concepts/3-module/3-app-dev-guide.md @@ -0,0 +1,127 @@ +# Application Developer User Guide + +## Prerequisites + +To follow this guide, you will need: + +- Kusion v0.12 or higher installed locally + +## Workflow + +As an application developer, the workflow of using a Kusion module looks like this: + +1. Browse available modules registered by platform engineers in the workspace +2. Add modules you need to your Stack +3. Initialize modules +4. Apply the AppConfiguration + +## Browse available modules + +For all KusionStack built-in modules, you can find all available modules and documents in the [reference](../../6-reference/2-modules/index.md) + +Since the platform engineers have already registered the available modules in the workspace, app developers can execute `kusion mod list` to list the available modules. + +```shell +kusion mod list --workspace dev + +Name Version URL +kawesome 0.2.0 oci://ghcr.io/kusionstack/kawesome +``` + +## Add modules to your Stack + +Taking `kawesome` as an example, the directory structure is shown below: + +```shell +example +├── dev +│   ├── example_workspace.yaml +│   ├── kcl.mod +│   ├── main.k +│   └── stack.yaml +└── project.yaml +``` + +Select the module you need from the result of `kusion mod list` and execute `kusion mod add kawesome` to add `kawesome` into your Stack. + +Once you have added the `kawesome` module, the `kcl.mod` file will be updated to look like this. + +``` toml +[package] +name = "example" + +[dependencies] +kawesome = { oci = "oci://ghcr.io/kusionstack/kawesome", tag = "0.2.0" } +service = {oci = "oci://ghcr.io/kusionstack/service", tag = "0.1.0" } +kam = { git = "https://github.com/KusionStack/kam.git", tag = "0.2.0" } + +[profile] +entries = ["main.k"] +``` + +- The `kam` dependency represents the [Kusion Application Module](https://github.com/KusionStack/kam.git) which contains the AppConfiguration. +- The `service` dependency represents the service workload module. +- The `kawesome` is the Kusion module we are going to use in the AppConfiguration. + +## Initialize modules + +```python +# The configuration codes in perspective of developers. +import kam.v1.app_configuration as ac +import service +import service.container as c +import kawesome.v1.kawesome + +kawesome: ac.AppConfiguration { + # Declare the workload configurations. + workload: service.Service { + containers: { + kawesome: c.Container { + image: "hashicorp/http-echo" + env: { + "ECHO_TEXT": "$(KUSION_KAWESOME_RANDOM_PASSWORD)" + } + } + } + replicas: 1 + } + # Declare the kawesome module configurations. + accessories: { + "kawesome": kawesome.Kawesome { + service: kawesome.Service{ + port: 5678 + } + randomPassword: kawesome.RandomPassword { + length: 20 + } + } + } +} +``` + +Initialize the `kawesome` module in the `accessories` block of the AppConfiguration. The key of the `accessories` item represents the module name and the value represents the actual module you required. + +## Apply the result + +Execute the preview command to validate the result. + +```shell +kusion apply + ✔︎ Generating Spec in the Stack dev... +Stack: dev +ID Action +hashicorp:random:random_password:example-dev-kawesome Create +v1:Namespace:example Create +v1:Service:example:example-dev-kawesome Create +apps/v1:Deployment:example:example-dev-kawesome Create + + +Do you want to apply these diffs?: + > details +Which diff detail do you want to see?: +> all + hashicorp:random:random_password:example-dev-kawesome Create + v1:Namespace:example Create + v1:Service:example:example-dev-kawesome Create + apps/v1:Deployment:example:example-dev-kawesome Create +``` \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.13/3-concepts/3-module/_category_.json b/docs_versioned_docs/version-v0.13/3-concepts/3-module/_category_.json new file mode 100644 index 00000000..5952a21e --- /dev/null +++ b/docs_versioned_docs/version-v0.13/3-concepts/3-module/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Modules" +} diff --git a/docs_versioned_docs/version-v0.13/3-concepts/4-workspace.md b/docs_versioned_docs/version-v0.13/3-concepts/4-workspace.md new file mode 100644 index 00000000..daedd84f --- /dev/null +++ b/docs_versioned_docs/version-v0.13/3-concepts/4-workspace.md @@ -0,0 +1,222 @@ +--- +id: workspace +sidebar_label: Workspace +--- + +# Workspace + +Workspace is a logical concept that maps to an actual target environment to deploy a stack to. In today's context, this _usually_ represents a Kubernetes cluster for the application workload and an optional cloud account to provision infrastructure resources that the workload depends on (A database, for example). Aside from the deployment destination, workspaces are also designed to be associated with a series of platform configurations that are used by all the stacks deployed to said workspace. + +When executing the command `kusion generate`, Kusion will "match" the AppConfiguration and the approriate workspace configuration to dynamically generate the `Spec`, which contains the complete manifest to describe the resources in the stack. The relationship of the Project, Stack and Workspace is shown as below. Notice that all three ways to organize stacks are valid. + +![project-stack-workspace](/img/docs/concept/project-stack-workspace.png) + +Workspace is designed to address separation of concerns. As opposed to the development-time concept of a "stack", a workspace is a deploy-time concept that represents a deployment target, a.k.a an actual runtime environment. Workspaces should be entirely managed by Platform Engineers to alleviate the burden on developers to understand environment-specific details. + +To dig a little deeper, a workspace represents the need for a distinct set of "platform opinions". That includes things that application developer either don't want to or shouldn't need to worry about, such as which Kubernetes cluster to deploy to, the credentials to deploy to said clusters, and other infrastructure details like what database instance to provision. + +Workspace is intended to be flexible so you can map them as your see fit to the boundaries that are relevant to your use case. For example, you can map a workspace to a cloud region (aws-us-east-1), provided that regional isolation is sufficient for you (this is an extreme case). Alternatively, a workspace can be map to the combination of a cloud region and an SDLC phase (aws-dev-us-east-1), provided that it represents the right boundary from a platform perspective. + +The workspace configuration is in a deterministic format and currently written in YAML. The subcommands of `kusion workspace` are provided to manage the workspaces. When using `kusion workspace`, the workspace configuration will be saved as YAML file in local file system. To avoid the possible risks, the environment variables are provided to hold the sensitive data such as Access Keys and Secret keys. + +## Workspace Configuration + +The configuration of a Workspace is stored in a single YAML file, which consists of `modules`, `secretStore`, and `context`. An example of Workspace configuration is shown as below. + +```yaml +# The platform configuration for Modules or KAMs. +# For each Module or KAM, the configuration format is as below. +# # ${module_identifier} or ${KAM_name}: +# # path: oci://ghcr.io/kusionstack/module-name # url of the module artifact +# # version: 0.2.0 # version of the module +# # configs: +# # default: # default configuration, applied to all projects +# # ${field1}: ${value1} +# # #{field2}: ${value2} +# # ... +# # ${patcher_name}: #patcher configuration, applied to the projects assigned in projectSelector +# # ${field1}: ${value1_override} +# # ... +# # projectSelector: +# # - ${project1_name} +# # - ${project2_name} +# # ... +modules: + mysql: + path: oci://ghcr.io/kusionstack/mysql + version: 0.2.0 + configs: + default: + cloud: alicloud + size: 20 + instanceType: mysql.n2.serverless.1c + category: serverless_basic + privateRouting: false + subnetID: ${mysql_subnet_id} + databaseName: kusion + largeSize: + size: 50 + projectSelector: + - foo + - bar + importDBInstance: + importedResources: + "aliyun:alicloud:alicloud_db_instance:wordpress-demo": "your-imported-resource-id" + projectSelector: + - baz + +secretStore: + provider: + aws: + region: us-east-1 + profile: The optional profile to be used to interact with AWS Secrets Manager. + +context: + KUBECONFIG_PATH: $HOME/.kube/config + AWS_ACCESS_KEY_ID: ref://secrets-manager-name/key-for-ak + AWS_SECRET_ACCESS_KEY: ref://secrets-manager-name/key-for-sk +``` + +### modules + +The `modules` are the platform-part configurations of Modules and KAMs, where the identifier of them are `${namespace}/${module_name}@${module_tag}` and `${kam_name}`. For each Module or KAM configuration, it is composed of a `default` and several `patcher` blocks. The `default` block contains the universal configuration of the Workspace, and can be applied to all Stacks in the Workspace, which is composed of the values of the Module's or KAM's fields. The `patcher` block contains the exclusive configuration for certain Stacks, which includes not only the fields' values, but also the applied Projects. + +The `patcher` block is designed to increase the flexibility for platform engineers managing Workspaces. Cause the Workspace should map to the real physical environment, in the actual production practice, it's almost impossible that all the Stacks share the same platform configuration, although we want them the same. + +The values of the same fields in `patcher` will override the `default`, and one field in multiple patchers is forbidden to assign to the same Project. That is, if there are more than one `patcher` declaring the same field with different values, the applied Projects are prohibited to overlap. And, The name of `patcher` must not be `default`. + +In the `patcher`, the applied Projects are assigned by the field `ProjectSelector`, which is an array of the Project names. The `ProjectSelector` is provided rather than something may like `StackSelector`, which specifies the applied Stacks. Here are the reasons. Explaining from the perspective of using Workspace, the mapping of Workspace and Stack is specified by the Kusion operation commands' users. While explaining from the perspective of the relationship among Project, Stack and Workspace, Workspace is designed for the reuse of platform-level configuration among multiple Projects. When a Project "encounters" a Workspace, it becomes a "Stack instance", which can be applied to a series of real resources. If using something like `StackSelector`, the reuse would not get realized, and Workspace would also lose its relevance. For more information of the relationship, please refer to [Project](project/overview) and [Stack](stack/overview). + +Different Module and KAM has different name, fields, and corresponding format and restrictions. When writing the configuration, check the corresponding Module's or KAM's description, and make sure all the requisite Modules and KAMs have correctly configured. Please refer to [Kuiosn Module](module/overview) and find more information. The example above gives a sample of the Module `mysql`. + +The `importedResources` block is designed to declare the import of existing cloud resources. The `importedResources` is a `map` where you can declare the mapping from `id` of the resource in Kusion `Spec` to the Terraform ID of the resource to be imported. Kusion will automatically synchronize the state of the existing cloud resource for the Kusion resource. + +### secretStore + +The `secretStore` field can be used to access the sensitive data stored in a cloud-based secrets manager. More details can be found in [here](../5-user-guides/4-secrets-management/1-using-cloud-secrets.md). + +### context + +The `context` field can be used to declare the information such as Kubernetes `kubeconfig` path or content, and the AK/SK of the Terraform providers. Below shows the configurable attributes. + +- `KUBECONFIG_PATH`: the local path of the `kubeConfig` file +- `KUBECONFIG_CONTENT`: the content of the `kubeConfig` file, can be used with cloud-based secrets management (e.g. `ref://secrets-management-name/secrets-key-for-kubeconfig`) +- `AWS_ACCESS_KEY_ID`: the access key ID of the AWS provider +- `AWS_SECRET_ACCESS_KEY`: the secret key of the AWS provider +- `ALICLOUD_ACCESS_KEY`: the access key ID of the Alicloud provider +- `ALICLOUD_SECRET_KEY`: the secret key of the Alicloud provider + +## Managing Workspace + +The subcommands of `kusion workspace` are used to manage Workspaces, including `create`, `show`, `list`, `switch`, `update` and `delete`. Cause the Workspace configurations are stored persistently, the current or a specified Backend will be used. For more information of Backend, please refer to [Backend](backend). + +Kusion will create a `default` Workspace with empty configuration in every Backend automatically, and set it as the current. When first using Kusion, or no configuration of Workspace, the `default` Workspace will be used. + +### Creating Workspace + +Use `kusion workspace create ${name} -f ${configuration_file_path}` to create a new Workspace with the configuration in a YAML file. The Workspace is identified by the `name`, and must be a new one, while the configuration must be written in a YAML file with correct format. + +The command above will create the Workspace in current Backend. If to create a Workspace in another backend, please use flag `--backend` to specify. The Workspace names in a Backend must be different, but allow the same in different Backends. + +In some scenarios, when a Workspace is created, it is expected to be the current. For simplification, the flag `--current` is provided to set the Workspace current alongside the creation. + +Be attention, creating `default` Workspace is not allowed, because it's created by Kusion automatically. + +The example is shown as below. + +```shell +# create a workspace in current backend +kusion workspace create dev -f dev.yaml + +# create a workspace in current backend ans set it as current +kusion workspace create dev -f dev.yaml --current + +# create a workspace in specified backend +kusion workspace create dev -f dev.yaml --backend oss-pre +``` + +The Workspaces to create are decided by the platform engineers. We recommend that they are organized by the following rules: + +- **SDLC phases**, such as `dev`, `pre`, `prod`; +- **cloud vendors**, such as `aws`, `alicloud`; +- combination of the two above, such as `dev-aws`, `prod-alicloud`. + +In design, Kusion does not support deploying Stack to multiple clouds or regions within a single Workspace. While users can technically define a Module that provisions resources across multiple clouds or regions, Kusion does not recommend this practice, and will not provide technical support for such configuration. If the platform engineers need to manage resources across multiple clouds or regions, they should create separate Workspaces. + +### Listing Workspace + +Use `kusion workspace list` to get all the workspace names. + +The example is shown as below. In order to simplify, The following examples will not give using specified backend, which is supported by `--backend` flag. + +```shell +# list all the workspace names +kusion workspace list +``` + +### Switching Workspace + +In order not to specify the Workspace name for each Kusion operation command, `kusion workspace switch ${name}` is provided to switch the current Workspace. Then when executing `kusion generate`, the current Workspace will be used. The to-switch Workspace must be created. + +The example is shown as below. + +```shell +# switch workspace +kusion workspace switch dev +``` + +### Showing Workspace + +Use `kusion workspace show ${name}` to get the Workspace configuration. If the `name` is not specified, the configuration of current Workspace will get returned. + +The example is shown as below. + +```shell +# show a specified workspace configuration +kusion workspace show dev + +# show the current workspace configuration +kusion workspace show +``` + +### Updating Workspace + +When the Workspace needs to update, use `kusion workspace update ${name} -f ${configuration_file_path}` to update with the new configuration file. The whole updated configuration is asked to provide, and the Workspace must be created. Get the Workspace configuration first, then refresh the configuration and execute the command, which are the recommended steps. If the `name` is not specified, the current Workspace will be used. + +Updating the `default` Workspace is allowed. And the flag `--current` is also supported to set it as the current. + +The example is shown as below. + +```shell +# update a specified workspace +kusion workspace update dev -f dev_new.yaml + +# update a specified workspace and set it as current +kusion workspace update dev -f dev_new.yaml --current + +# update the current workspace +kusion workspace update -f dev_new.yaml +``` + +### Deleting Workspace + +When a Workspace is not in use anymore, use `kusion workspace delete ${name}` to delete a Workspace. If the `name` is not specified, the current Workspace will get deleted, and the `default` Workspace will be set as the current Workspace. Therefore, deleting the `default` Workspace is not allowed. + +The example is shown as below. + +```shell +# delete a specified workspace +kusion workspace delete dev + +# delete the current workspace +kusion workspace delete +``` + +## Using Workspace + +Workspace is used in the command `kusion generate`, the following steps help smooth the operation process. + +1. Write the Workspace configuration file with the format shown above, and fulfill all the necessary fields; +2. Create the workspace with `kusion workspace create`, then Kusion perceives the Workspace. The flag `--current` can be used to set it as the current. +3. Execute `kusion generate` in a Stack to generate the whole Spec, the AppConfiguration and Workspace configuration get rendered automatically, and can be applied to the real infrastructure. If the appointed Workspace or Backend is asked, the flags `--workspace` and `--backend` will help achieve that. +4. If the Workspace needs to update, delete, switch, etc. Use the above commands to achieve that. diff --git a/docs_versioned_docs/version-v0.13/3-concepts/5-appconfiguration.md b/docs_versioned_docs/version-v0.13/3-concepts/5-appconfiguration.md new file mode 100644 index 00000000..570d1ac0 --- /dev/null +++ b/docs_versioned_docs/version-v0.13/3-concepts/5-appconfiguration.md @@ -0,0 +1,38 @@ +--- +id: app-configuration +sidebar_label: AppConfiguration +--- + +# AppConfiguration + +As a modern cloud-native application delivery toolchain, declarative intent-based actuation is the central idea of Kusion, and `AppConfiguration` model plays the role of describing the intent, which provides a simpler path for on-boarding developers to the platform without leaking low-level details in runtime infrastructure and allows developers to fully focus on the application logic itself. + +The `AppConfiguration` model consolidates workload and their dependent accessories for the application deployment, along with any pipeline and operational requirements into one standardized, infrastructure-independent declarative specification. This declarative specification represents the intuitive user intent for the application, which drives a standardized and efficient application delivery and operation process in a hybrid environment. + +![appconfig.png](/img/docs/concept/appconfig.png) + +AppConfiguration consists of four core concepts, namely `Workload`, `Accessory`, `Pipeline`, and `Dependency`. Each of them represents a [Kusion module](./3-module/1-overview.md). We will walk through these concepts one by one. + +#### Workload + +Workload is a representation of the business logic that runs in the cluster. Common workload types include long-running services that should “never” go down and batch jobs that take from a few seconds to a few days to complete. + +In most cases, a Workload is a backend service or the frontend of an Application. For example, in a micro-service architecture, each service would be represented by a distinct Workload. This allows developers to manage and deploy their code in a more organized and efficient manner. + +#### Accessory + +Using the analogy of a car, workload is the core engine of the application, but only having the engine isn’t enough for the application to function properly. In most cases, there must be other supporting parts for the workload to operate as intended. For those supporting parts, we call them Accessory. Accessory refers to various runtime capabilities and operational requirements provided by the underlying infrastructure, such as database, network load-balancer, storage and so on. + +From the perspective of team collaboration, the platform team should be responsible for creating and maintaining various accessory definitions, providing reusable building blocks out-of-the-box. Application developers just need to leverage the existing accessories to cover the evolving application needs. This helps software organizations achieve separation of concern so that different roles can focus on the subject matter they are an expert in. + +#### Pipeline + +Running reliable applications requires reliable delivery pipelines. By default, Kusion provides a relatively fixed built-in application delivery pipeline, which should be sufficient for most use cases. However, as the application scale and complexity grow, so does the need for a customizable delivery pipeline. Developers wish for more fine-tuned control and customization over the workflow to deliver their applications. That’s why we introduced the Pipeline section in AppConfiguration model. + +A customized delivery pipeline is made of several steps, each corresponding to an operation that needs to be executed, such as running certain tests after a deployment, scanning artifacts for vulnerabilities prior to deployment, and so on. Implementation-wise, the execution of each step should be carried out in the form of a plugin, developed and managed by the platform owners. + +#### Topologies + +Application dependencies refer to the external services or other software that an application relies on to function properly. These dependencies may be required to provide certain functionality or to use certain features in the application. + +Similar to declaring a dependency from an application to an accessory, AppConfiguration lets you declare the dependencies between different applications in the same way. diff --git a/docs_versioned_docs/version-v0.13/3-concepts/6-spec.md b/docs_versioned_docs/version-v0.13/3-concepts/6-spec.md new file mode 100644 index 00000000..0c3de9d4 --- /dev/null +++ b/docs_versioned_docs/version-v0.13/3-concepts/6-spec.md @@ -0,0 +1,123 @@ +--- +id: spec +sidebar_label: Spec +--- + +# Spec + +The Spec represents the operational intentions that you aim to deliver using Kusion. These intentions are expected to contain all components throughout the DevOps lifecycle, including resources (workload, database, load balancer, etc.), dependencies, and policies. The Kusion module generators are responsible for converting all AppConfigurations and environment configurations into the Spec. Once the Spec is generated, the Kusion Engine takes charge of updating the actual infrastructures to match the Spec. + +## Purpose + +### Single Source of Truth + +In Kusion's workflow, the platform engineer builds Kusion modules and provides environment configurations, application developers choose Kusion modules they need and deploy operational intentions to an environment with related environment configurations. They can also input dynamic parameters like the container image when executing the `kusion generate` command. So the final operational intentions include configurations written by application developers, environment configurations and dynamic inputs. Due to this reason, we introduce **Spec** to represent the SSoT(Single Source of Truth) of Kusion. It is the result of `kusion generate` which contains all operational intentions from different sources. + +### Consistency + +Delivering an application to different environments with identical configurations is a common practice, especially for applications that require scalable distribution. In such cases, an immutable configuration package is helpful. By utilizing the Spec, all configurations and changes are stored in a single file. As the Spec is the input of Kusion, it ensures consistency across different environments whenever you execute Kusion with the same Spec file. + +### Rollback and Disaster Recovery + +The ability to roll back is crucial in reducing incident duration. Rolling back the system to a previously validated version is much faster compared to attempting to fix it during an outage. We regard a validated Spec as a snapshot of the system and recommend storing the Spec in a version control system like Git. This enables better change management practices and makes it simpler to roll back to previous versions if needed. In case of a failure or outage, having a validated Spec simplifies the rollback process, ensuring that the system can be quickly recovered. + +## Example + +The API definition of the `Spec` structure in Kusion can be found [here](https://github.com/KusionStack/kusion/blob/main/pkg/apis/api.kusion.io/v1/types.go#L862). Below is an example `Spec` file generated from the `quickstart` demo application (more details can be found [here](../2-getting-started/2-deliver-quickstart.md)). + +```yaml +resources: + - id: v1:Namespace:quickstart + type: Kubernetes + attributes: + apiVersion: v1 + kind: Namespace + metadata: + creationTimestamp: null + name: quickstart + spec: {} + status: {} + extensions: + GVK: /v1, Kind=Namespace + - id: apps/v1:Deployment:quickstart:quickstart-default-quickstart + type: Kubernetes + attributes: + apiVersion: apps/v1 + kind: Deployment + metadata: + creationTimestamp: null + labels: + app.kubernetes.io/name: quickstart + app.kubernetes.io/part-of: quickstart + name: quickstart-default-quickstart + namespace: quickstart + spec: + selector: + matchLabels: + app.kubernetes.io/name: quickstart + app.kubernetes.io/part-of: quickstart + strategy: {} + template: + metadata: + creationTimestamp: null + labels: + app.kubernetes.io/name: quickstart + app.kubernetes.io/part-of: quickstart + spec: + containers: + - image: kusionstack/kusion-quickstart:latest + name: quickstart + resources: {} + status: {} + dependsOn: + - v1:Namespace:quickstart + - v1:Service:quickstart:quickstart-default-quickstart-private + extensions: + GVK: apps/v1, Kind=Deployment + - id: v1:Service:quickstart:quickstart-default-quickstart-private + type: Kubernetes + attributes: + apiVersion: v1 + kind: Service + metadata: + creationTimestamp: null + labels: + app.kubernetes.io/name: quickstart + app.kubernetes.io/part-of: quickstart + name: quickstart-default-quickstart-private + namespace: quickstart + spec: + ports: + - name: quickstart-default-quickstart-private-8080-tcp + port: 8080 + protocol: TCP + targetPort: 8080 + selector: + app.kubernetes.io/name: quickstart + app.kubernetes.io/part-of: quickstart + type: ClusterIP + status: + loadBalancer: {} + dependsOn: + - v1:Namespace:quickstart + extensions: + GVK: /v1, Kind=Service +secretStore: null +context: {} +``` + +From the example above, we can see that the `Spec` contains a list of `resources` required by the application. + +A `resource` is a concept in `Kusion` that abstract infrastructure. It represents an individual unit of infrastructure or application component managed by the `Kusion`, serving as a fundamental building block for defining the desired state of the infrastructure. They provide a unified way to define various types of resources, including `Kubernetes` objects and `Terraform` resources. Each `resource` in the `Spec` needs to have `id`, `type`, `attributes`, `dependsOn`, and `extensions` fields: + +- `id` is the unique key of this resource. An idiomatic way for `Kubernetes` resources is `apiVersion:kind:namespace:name`, and for `Terraform` resources is `providerNamespace:providerName:resourceType:resourceName`. +- `type` represents the type of runtime Kusion supports, and currently includes `Kubernetes` and `Terraform`. +- `attributes` represents all specified attributes of this resource, basically the manifest and argument attributes for the `Kubernetes` and `Terraform` resources. +- `dependsOn` contains all the other resources the resource depends on. +- `extensions` specifies the arbitrary metadata of the resource, where you can declare information such as Kubernetes GVK, Terraform provider, and imported resource id, etc. + +Besides the `resources`, Spec also records the `secretStore` and `context` field in the corresponding workspace. The former can be used to access sensitive data stored in an external secrets manager, while the latter can be used to declare the workspace-level configurations such as Kubernetes `kubeconfig` file path or content, and Terraform providers' AK/SK. More information can be found [here](4-workspace.md#secretstore). + +## Apply with Spec File + +Kusion supports using the Spec file directly as input. Users can place the Spec file in the stack directory and execute `kusion preview --spec-file spec.yaml` and `kusion apply --spec-file spec.yaml` to preview and apply the resources. diff --git a/docs_versioned_docs/version-v0.13/3-concepts/7-backend.md b/docs_versioned_docs/version-v0.13/3-concepts/7-backend.md new file mode 100644 index 00000000..5262f7ac --- /dev/null +++ b/docs_versioned_docs/version-v0.13/3-concepts/7-backend.md @@ -0,0 +1,228 @@ +--- +id: backend +sidebar_label: Backends +--- + +# Backend + +Backend is Kusion's storage, which defines the place to store Workspace and Release. By default, Kusion uses the `local` type of backend to store on the local disk. While in the scenario of team collaboration, the Workspace and Release can be stored on a remote backend, such as `oss` and `s3`, to allow multiple users' access. + +The command `kusion config` is used to configure the backend configuration. Configuring a whole backend or an individual config item are both supported. For the sensitive data, the environment variables are supported, and with higher priority. + +Furthermore, Kusion provides the operation of setting current backend. Thus, the trouble of specifying backend can be saved when executing operation commands and managing `workspace`. + +## Available Backend Types + +There are three available backend types: `local`, `oss`, `s3`. + +### local + +The `local` type backend uses local file system as storage, which is suitable for local operations, but not ideal for multi-user collaboration. The supported config items are as below. + +- **path**: `type string`, `optional`, specify the directory to store the Workspace and Release files. The subdirectories `workspaces` and `releases` are used to store the corresponding files separately. It's recommended to use an empty or a Kusion exclusive directory as the local backend path. If not set, the default path `${KUSION_HOME}` is in use. + +The whole local type backend configuration is as below. + +```yaml +{ + "type": "local", + "configs": { + "path": "${local_path}" # type string, optional, the directory to store files. + } +} +``` + +### oss + +The `oss` type backend uses the Alicloud Object Storage Service (OSS) as storage. The supported config items are as below. + +- **endpoint**: `type string`, `required`, specify the access endpoint for alicloud oss bucket. +- **accessKeyID**: `type string`, `required`, specify the alicloud account accessKeyID, support declaring by environment variable `OSS_ACCESS_KEY_ID`. +- **accessKeySecret**: `type string`, `required`, specify the alicloud account accessKeySecret, support declaring by environment variable `OSS_ACCESS_KEY_SECRET`. +- **bucket**: `type string`, `required`, specify the name of the alicloud oss bucket. +- **prefix**: `type string`, `optional`, constitute the prefix to store the Workspace and Release files, whose prefixes are `${prefix}/workspaces` and `${prefix}/releases` respectively. Using prefix can create a "dedicated space" for the Kusion data, which is beneficial for the management and reuse of the bucket. If not set, there is no prefix, the files are stored in the root path of the bucket if analogy to a file system. + +Noted that `accessKeyID` and `accessKeySecret` are required for the whole configuration combined by the configuration managed by the command `kusion config` and the environment variables. For the `kusion config` alone, they are not obligatory. And for the safety reason, using environment variables is the recommended way. + +The whole oss type backend configuration is as below. + +```yaml +{ + "type": "oss", + "configs": { + "endpoint": "${oss_endpoint}", # type string, required, the oss endpoint. + "accessKeyID": "${oss_access_key_id}", # type string, ooptional for the command "kusion config", the oss access key id. + "accessKeySecret": "${oss_access_key_secret}", # type string, optional for the command "kusion config", the oss access key secret. + "bucket": "${oss_bucket}", # type string, required, the oss bucket. + "prefix": "${oss_prefix}" # type string, optional, the prefix to store the files. + } +} +``` + +The supported environment variables are as below. + +```bash +export OSS_ACCESS_KEY_ID="${oss-access-key-id}" # configure accessKeyID +export OSS_ACCESS_KEY_SECRET="${oss-access-key-secret}" # configure accessKeySecret +``` + +### s3 + +The `s3` type backend uses the AWS Simple Storage Service (S3) as storage. The supported config items are as below. + +- **region**: `type string`, `required`, specify the region of aws s3 bucket, support declaring by environment variable `AWS_DEFAULT_REGION` or `AWS_REGION`, where the latter has higher priority. +- **endpoint**: `type string`, `optional`, specify the access endpoint for aws s3 bucket. +- **accessKeyID**: `type string`, `required`, specify the aws account accessKeyID, support declaring by environment variable `AWS_ACCESS_KEY_ID`. +- **accessKeySecret**: `type string`, `required`, specify the aws account.accessKeySecret, support declaring by environment variable `AWS_SECRET_ACCESS_KEY`. +- **bucket**: `type string`, `required`, specify the name of the aws s3 bucket. +- **prefix**: `type string`, `optional`, constitute the prefix to store the Workspace and Release files, whose prefixes are `${prefix}/workspaces` and `${prefix}/releases` respectively. + +Noted that `region`, `accessKeyID` and `accessKeySecret` are optional for the `kusion config` command. + +The whole s3 type backend configuration is as below. + +```yaml +{ + "type": "s3", + "configs": { + "region": "${s3_region}", # type string, optional for the command "kusion config", the aws region. + "endpoint": "${s3_endpoint}", # type string, optional, the aws endpoint. + "accessKeyID": "${s3_access_key_id}", # type string, optional for the command "kusion config", the aws access key id. + "accessKeySecret": "${s3_access_key_secret}", # type string, optional for the command "kusion config", the aws access key secret. + "bucket": "${s3_bucket}", # type string, required, the s3 bucket. + "prefix": "${s3_prefix}" # type string, optional, the prefix to store the files. + } +} +``` + +The supported environment variables are as below. + +```bash +export AWS_DEFAULT_REGION="${s3_region}" # configure region, lower priority than AWS_REGION +export AWS_REGION="${s3_region}" # configure region, higher priority than AWS_DEFAULT_REGION +export AWS_ACCESS_KEY_ID="${s3_access_key_id}" # configure accessKeyID +export AWS_SECRET_ACCESS_KEY="${s3_access_key_secret}" # configure accessKeySecret +``` + + +## Setting a Backend + +When there is a new backend or the backend configuration needs to update, use the command `kusion config set ${key} ${value}` to set a backend. A backend is identified by a unique name, and its whole configuration is made up of the backend type and its corresponding config items. + +Be attention, do not confuse backend with backend type. For example, a backend named `s3_prod` uses `s3` as its storage, the `s3_prod` is the backend, while the `s3` is the backend type. + +There are four configuration modes: + +- setting a whole backend +- setting a backend type +- setting a whole set of backend config items +- setting a backend config item + +A unique backend name is required to do the configuration. Take `s3` type backend with name `s3_prod` for an example to explain how these modes work. + +### Setting a Whole Backend + +The key to configure a whole backend is `backends.${name}`, whose value must be the JSON marshal result in a specified format, which is determined by the backend type. Enclosing the value in single quotation marks is a good choice, which can keep the format correct. + +```shell +# set a whole backend +kusion config set backends.s3_prod '{"type":"s3","configs":{"bucket":"kusion"}}' +``` + +### Setting a Backend Type + +The key to set a backend type is `backends.${name}.type`, whose value must be `local`, `oss` or `s3`. + +```shell +# set a backend type +kusion config set backends.s3_prod.type s3 +``` + +### Setting a Whole Set of Backend Config Items + +The key to set a whole set of backend config items is `backends.${name}.configs`, whose value must be the JSON marshal result in a specified format, which is determined by the backend type. The backend config must be set after the backend type, and corresponds to the backend type. + +```shell +# set a whole backend config +kusion config set backends.s3_prod.configs '{"bucket":"kusion"}' +``` + +### Setting a Backend Config Item + +The key to set a backend config item is `backends.${name}.configs.${item}`. The item name and value type both depend on the backend type. Like the whole backend config, the config item must be valid and set after the backend type. + +```shell +# set a backend config item +kusion config set backends.s3_prod.configs.bucket kusion +``` + +When executing `kusion config set`, the configuration will be stored in a local file. For security reason, the environment variables are supported to configure some config items, such as `password`, `accessKeyID`, `accessKeySecret`. Using environment variables rather than `kusion config` set to set sensitive data is the best practice. If both configured, the environment variables have higher priority. For details about the supported environment variables, please see above. + +Kusion has a default backend with `local` type and the path is `$KUSION_HOME`, whose name is exactly `default`. The `default` backend is forbidden to modification, that is setting or unsetting the default backend is not allowed. Besides, the keyword `current` is also used by Kusion itself, please do not use it as the backend name. + +## Unsetting a Backend + +When a backend is not in use, or the configuration is out of date, use the command `kusion config unset ${key}` to unset a backend or a specified config item. Same as the setting, there are also four modes of unsetting. + +- unsetting a whole backend +- unsetting a backend type +- unsetting a whole set of backend config items +- unsetting a backend config item + +When unsetting a whole backend, the backend must not be the current backend. When unsetting the backend type, the config items must be empty and the backend not be the current. + +Unsetting the `default` backend is forbidden. + +## Setting the Current Backend + +In order not to specify backend for every operation command. Kusion provides the mechanism of setting current backend, then the current workspace will be use by default. This is very useful when you execute a series of Kusion operation commands, for they usually use the same backend. + +Use the command `kusion config set backends.current ${name}` to set the current backend, where the `name` must be the already set backend. + +```shell +# set the current workspace +kusion config set backends.current s3_prod +``` + +Setting the current backend to `default` is legal. Actually, if there is no backend related configuration, the current backend is the `default` backend. + +## Getting Backend Configuration + +Use the command `kusion config get ${key}` to get a whole backend configuration or a specified backend config item. The `key` is same as setting and unsetting operation, the whole list can be found in the [Configuration](configuration). + +```shell +# get a whole backend +kusion config get backends.s3_prod + +# get a specified config item +kusion config get backends.s3_prod.configs.bucekt +``` + +Besides, the command `kusion config list` can also be used, which returns the whole kusion configuration, while the backend configuration is included. + +```shell +# get the whole Kusion configuration +kusion config list +``` + +## Using Backend + +The backend is used to store Workspace and Release. Thus, the following commands use the backend, shown as below. + +- subcommands of `kusion workspace`: use to store the Workspace; +- `kusion apply`, `kusion destroy`: use to store the Release; + +For all the commands above, the flag `--backend` is provided to specify the backend, or using the current backend. When using backend, you usually need to specify the sensitive data by environment variables. The example is shown below. + +```shell +# set environment variables of sensitive and other necessary data +export AWS_REGION="${s3_region}" +export AWS_ACCESS_KEY_ID="${s3_access_key_id}" +export AWS_SECRET_ACCESS_KEY="${s3_access_key_secret}" + +# use current backend +kusion apply + +# use a specified backend +kusion apply --backend s3_prod +``` diff --git a/docs_versioned_docs/version-v0.13/3-concepts/8-configuration.md b/docs_versioned_docs/version-v0.13/3-concepts/8-configuration.md new file mode 100644 index 00000000..2ad72e6b --- /dev/null +++ b/docs_versioned_docs/version-v0.13/3-concepts/8-configuration.md @@ -0,0 +1,114 @@ +--- +id: configuration +sidebar_label: Configuration +--- + +# Configuration + +Kusion can be configured with some global settings, which are separate from the AppConfiguration written by the application developers and the workspace configurations written by the platform engineers. + +The configurations are only relevant to the Kusion itself, and can be managed by command `kusion config`. The configuration items are specified, which are in the hierarchical format with full stop for segmentation, such as `backends.current`. For now, only the backend configurations are included. + +The configuration is stored in the file `${KUSION_HOME}/config.yaml`. For sensitive data, such as password, access key id and secret, setting them in the configuration file is not recommended, using the corresponding environment variables is safer. + +## Configuration Management + +Kusion provides the command `kusion config`, and its sub-commands `get`, `list`, `set`, `unset` to manage the configuration. The usages are shown as below: + +### Get a Specified Configuration Item + +Use `kusion config get` to get the value of a specified configuration item, only the registered item can be obtained correctly. The example is as below. + +```shell +# get a configuration item +kusion config get backends.current +``` + +### List the Configuration Items + +Use `kusion config list` to list all the Kusion configurations, where the result is in the YAML format. The example is as below. + +```shell +# list all the Kusion configurations +kusion config list +``` + +### Set a Specified Configuration Item + +Use `kusion config set` to set the value of a specified configuration item, where the type of the value of is also determinate. Kusion supports `string`, `int`, `bool`, `array` and `map` as the value type, which should be conveyed in the following format through CLI. + +- `string`: the original format, such as `local-dev`, `oss-pre`; +- `int`: convert to string, such as `3306`, `80`; +- `bool`: convert to string, only support `true` and `false`; +- `array`: convert to string with JSON marshal, such as `'["s3","oss"]'`. To preserve the format, enclosing the string content in single quotes is a good idea, or there may be unexpected errors; +- `map`: convert to string with JSON marshal, such as `'{"path":"\etc"}'`. + +Besides the type, some configuration items have more setting requirements. The configuration item dependency may exist, that is, a configuration item must be set after another item. And there may exist more restrictions for the configuration values themselves. For example, the valid keys for the map type value, the data range for the int type value. For detailed configuration item information, please refer to the following content of this article. + +The example of setting configuration item is as blow. + +```shell +# set a configuration item of type string +kusion config set backends.pre.type s3 + +# set a configuration item of type map +kusion config set backends.prod `{"configs":{"bucket":"kusion"},"type":"s3"}` +``` + +### Unset a Specified Configuration Item + +Use `kusion config unset` to unset a specified configuration item. Be attention, some items have dependencies, which must be unset in a correct order. The example is as below. + +```shell +# unset a specified configuration item +kusion config unset backends.pre +``` + +## Backend Configurations + +The backend configurations define the place to store Workspace, Spec and State files. Multiple backends and current backend are supported to set. + +### Available Configuration Items + +- **backends.current**: type `string`, the current used backend name. It can be set as the configured backend name. If not set, the default local backend will be used. +- **backends.${name}**: type `map`, a total backend configuration, contains type and config items, whose format is as below. It can be unset when the backend is not the current. +```yaml +{ + "type": "${backend_type}", # type string, required, support local, oss, s3. + "configs": ${backend_configs} # type map, optional for type local, required for the others, the specific keys depend on the type, refer to the description of backends.${name}.configs. +} +``` +- **backends.${name}.type**: type `string`, the backend type, support `local`, `s3` and `oss`. It can be unset when the backend is not the current, and the corresponding `backends.${name}.configs` are empty. +- **backends.${name}.configs**: type `map`, the backend config items, whose format depends on the backend type and is as below. It must be set after `backends.${name}.type`. +```yaml +# type local +{ + "path": "${local_path}" # type string, optional, the directory to store the files. If not set, use the default path ${KUSION_HOME}. +} + +# type oss +{ + "endpoint": "${oss_endpoint}", # type string, required, the oss endpoint. + "accessKeyID": "${oss_access_key_id}", # type string, optional, the oss access key id, which can be also obtained by environment variable OSS_ACCESS_KEY_ID. + "accessKeySecret": "${oss_access_key_secret}", # type string, optional, the oss access key secret, which can be also obtained by environment variable OSS_ACCESS_KEY_SECRET + "bucket": "${oss_bucket}", # type string, required, the oss bucket. + "prefix": "${oss_prefix}" # type string, optional, the prefix to store the files. +} + + # type s3 +{ + "region": "${s3_region}", # type string, optional, the aws region, which can be also obtained by environment variables AWS_REGION and AWS_DEFAULT_REGION. + "endpoint": "${s3_endpoint}", # type string, optional, the aws endpoint. + "accessKeyID": "${s3_access_key_id}", # type string, optional, the aws access key id, which can be also obtained by environment variable AWS_ACCESS_KEY_ID. + "accessKeySecret": "${s3_access_key_secret}", # type string, optional, the aws access key secret, which can be also obtained by environment variable AWS_SECRET_ACCESS_KEY + "bucket": "${s3_bucket}", # type string, required, the s3 bucket. + "prefix": "${s3_prefix}" # type string, optional, the prefix to store the files. +} +``` +- **backends.${name}.configs.path**: type `string`, the path of local type backend. It must be set after `backends.${name}.type` and which must be `local`. +- **backends.${name}.configs.endpoint**: type `string`, the endpoint of oss or s3 type backend. It must be set after `backends.${name}.type` and which must be `oss` or `s3`. +- **backends.${name}.configs.accessKeyID**: type `string`, the access key id of oss or s3 type backend. It must be set after `backends.${name}.type` and which must be `oss` or `s3`. For `oss`, it can be also obtained by environment variable `OSS_ACCESS_KEY_ID`; while for s3, it is `AWS_ACCESS_KEY_ID`. +- **backends.${name}.configs.accessKeySecret**: type `string`, the access key secret of oss or s3 type backend. It must be set after `backends.${name}.type` and which must be `oss` or `s3`. For `oss`, it can be also obtained by environment variable `OSS_ACCESS_KEY_SECRET`; while for s3, it is `AWS_SECRET_ACCESS_KEY`. +- **backends.${name}.configs.bucket**: type `string`, the bucket of oss or s3 type backend. It must be set after `backends.${name}.type` and which must be `oss` or `s3`. +- **backends.${name}.configs.prefix**: type `string`, the prefix to store the files of oss or s3 type backend. It must be set after `backends.${name}.type` and which must be `oss` or `s3`. +- **backends.${name}.configs.region**: type `string`, the aws region of s3 type backend. It must be set after `backends.${name}.type` and which must be `s3`. It can be also obtained by environment variables `AWS_REGION` and `AWS_DEFAULT_REGION`, where the former is priority. diff --git a/docs_versioned_docs/version-v0.13/3-concepts/9-release.md b/docs_versioned_docs/version-v0.13/3-concepts/9-release.md new file mode 100644 index 00000000..05f27f71 --- /dev/null +++ b/docs_versioned_docs/version-v0.13/3-concepts/9-release.md @@ -0,0 +1,24 @@ +--- +id: release +sidebar_label: Releases +--- + +# Release + +Release is used to indicate a single operation, triggered by `kusion apply` and `kusion destroy`, providing users with a more coherent and consistent operation experience with Kusion. Release also provides audit and rollback capabilities, which is currently under development. + +Every time `kusion apply` or `kusion destroy` is executed, it will trigger the generation of a `release` file. The combination of a `project` and `workspace` corresponds to a set of `release` files, which also relates to a set of the real application resources. The `release` file is stored in the same `backend` as the `workspace`, and the default path is `$HOME/.kusion/releases/$PROJECT_NAME/$WORKSPACE_NAME`, whose revision starts from 1 and increments. + +The release file contains the [Spec](./6-spec.md) and [State](./9-release.md#state) of an application, both of which are composed of `Resources`, representing the expected description from the configuration code and the actual state of the resources respectively. In addition, the release file also contains the information of creation and modification time, operation phase, and application metadata, etc. + +## State + +State is a record of an operation's result. It is a mapping between `resources` managed by `Kusion` and the actual infra resources. State is often used as a data source for three-way merge/diff in operations like `Apply` and `Preview`. + +A `resource` here represents an individual unit of infrastructure or application component, serving as a fundamental building block for defining and managing the actual state of your `project`. These `resources` are defined within the `State` and accurately reflect the actual states of the infrastructure. By providing a unified and consistent approach, `Kusion` enables seamless management of diverse resource types, encompassing Kubernetes objects and Terraform resources.Importantly, the structure of these resources in the `State` mirrors that of the `resources` in the `Spec`, ensuring coherence and facilitating efficient state management throughout the lifecycle of your `project`. + +State can be stored in many storage [backend](./7-backend.md) mediums like filesystems, S3, and OSS, etc. + +## Concurrency Control + +Release supports collaboration among multiple users and implements the concurrency control through operation `phase`. When the field of `phase` in the release file is not `succeeded` or `failed`, kusion will not be able to execute `kusion apply` or `kusion destroy` operation to the corresponding stack. For example, if a user unexpectedly exits during the `kusion apply` or `kusion destroy` process, the `phase` of the release file may be kept as `applying` or `destroying`. In this case, the user can use the command of `kusion release unlock` to unlock the release file for a specified application and workspace, setting the `phase` to `failed`. diff --git a/docs_versioned_docs/version-v0.13/3-concepts/_category_.json b/docs_versioned_docs/version-v0.13/3-concepts/_category_.json new file mode 100644 index 00000000..bccddbf1 --- /dev/null +++ b/docs_versioned_docs/version-v0.13/3-concepts/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Concepts" +} diff --git a/docs_versioned_docs/version-v0.13/4-configuration-walkthrough/1-overview.md b/docs_versioned_docs/version-v0.13/4-configuration-walkthrough/1-overview.md new file mode 100644 index 00000000..e7339ec9 --- /dev/null +++ b/docs_versioned_docs/version-v0.13/4-configuration-walkthrough/1-overview.md @@ -0,0 +1,223 @@ +--- +id: overview +--- + +# Configuration File Overview + +Kusion consumes one or more declarative configuration files (written in KCL) that describe the application, and delivers intent to the target runtime including Kubernetes, clouds, or on-prem infrastructure. + +This documentation series walks you through the odds and ends of managing such configuration files. + +## Table of Content + +- [Configuration File Overview](#configuration-file-overview) + - [Table of Content](#table-of-content) + - [Directory Structure](#directory-structure) + - [AppConfiguration Model](#appconfiguration-model) + - [Authoring Configuration Files](#authoring-configuration-files) + - [Identifying KCL file](#identifying-kcl-file) + - [KCL Schemas and KAM](#kcl-schemas-and-kam) + - [Kusion Modules](#kusion-modules) + - [Import Statements](#import-statements) + - [Understanding kcl.mod](#understanding-kclmod) + - [Building Blocks](#building-blocks) + - [Instantiating an application](#instantiating-an-application) + - [Using `kusion init`](#using-kusion-init) + - [Using references](#using-references) + +## Directory Structure + +Kusion expects the configuration file to be placed in a certain directory structure because it might need some metadata (that is not stored in the application configuration itself) in order to proceed. + +:::info + +See [Project](../concepts/project/overview) and [Stack](../concepts/stack/overview) for more details about Project and Stack. +::: + +A sample multi-stack directory structure looks like the following: +``` +~/playground$ tree multi-stack-project/ +multi-stack-project/ +├── README.md +├── base +│   └── base.k +├── dev +│   ├── kcl.mod +│   ├── main.k +│   └── stack.yaml +├── prod +│   ├── kcl.mod +│   ├── main.k +│   └── stack.yaml +└── project.yaml +``` + +In general, the directory structure follows a hierarchy where the top-level is the project configurations, and the sub-directories represent stack-level configurations. + +You may notice there is a `base` directory besides all the stacks. The `base` directory is not mandatory, but rather a place to store common configurations between different stacks. A common pattern we observed is to use stacks to represent different stages (dev, stage, prod, etc.) in the software development lifecycle, and/or different deployment targets (azure-eastus, aws-us-east-1, etc). A project can have as many stacks as needed. + +In practice, the applications deployed into dev and prod might very likely end up with a similar set of configurations except a few fields such as the application image (dev might be on newer versions), resource requirements (prod might require more resources), etc. + +As a general best practice, we recommend managing the common configurations in `base.k` as much as possible to minimize duplicate code. We will cover how override works in [Base and Override](base-override). + +## AppConfiguration Model + +`AppConfiguration` is the out-of-the-box model we build that describes an application. It serves as the declarative intent for a given application. + +The schema for `AppConfiguration` is defined in the [KusionStack/kam](https://github.com/KusionStack/kam/blob/main/v1/app_configuration.k) repository. It is designed as a unified, application-centric model that encapsulates the comprehensive configuration details and in the meantime, hides the complexity of the infrastructure as much as possible. + +`AppConfiguration` consists of multiple sub-components that each represent either the application workload itself, its dependencies (in the form of [Kusion Modules](../concepts/module/overview)), relevant workflows or operational expectations. We will deep dive into the details on how to author each of these elements in this upcoming documentation series. + +For more details on the `AppConfiguration`, please refer to the [design documentation](../concepts/app-configuration). + +## Authoring Configuration Files + +[KCL](https://kcl-lang.io/) is the choice of configuration language consumed by Kusion. KCL is an open-source constraint-based record and functional language. KCL works well with a large number of complex configurations via modern programming language technology and practice, and is committed to provide better modularity, scalability, stability and extensibility. + +### Identifying KCL file + +KCL files are identified with `.k` suffix in the filename. + +### KCL Schemas and KAM + +Similar to most modern General Programming Languages (GPLs), KCL provide packages that are used to organize collections of related KCL source files into modular and re-usable units. + +In the context of Kusion, we abstracted a core set of KCL Schemas (such as the aforementioned `AppConfiguration`, `Workload`, `Container`, etc)that represent the concepts that we believe that are relatively universal and developer-friendly, also known as [Kusion Application Model](https://github.com/KusionStack/kam), or KAM. + +### Kusion Modules + +To extend the capabilities beyond the core KAM model, we use a concept known as [Kusion Modules](../concepts/module/overview) to define components that could best abstract the capabilities during an application delivery. We provide a collection of official out-of-the-box Kusion Modules that represents the most common capabilities. They are maintained in [KusionStack's GitHub container registry](https://github.com/orgs/KusionStack/packages). When authoring an application configuration file, you can simply declare said Kusion Modules as dependencies and import them to declare ship-time capabilities that the application requires. + +If the modules in the KusionStack container registry does not meet the needs of your applications, Kusion provides the necessary mechanisms to extend with custom-built Kusion Modules. You can always create and publish your own module, then import the new module in your application configuration written in KCL. + +For the steps to develop your own module, please refer to the Module developer guide. + +### Import Statements + +An example of the import looks like the following: +``` +### import from the official kam package +import kam.v1.app_configuration as ac + +### import kusion modules +import service +import service.container as c +import monitoring as m +import network as n +``` + +Take `import kam.v1.app_configuration as ac` as an example, the `.v1.app_configuration` part after `import kam` represents the relative path of a specific schema to import. In this case, the `AppConfiguration` schema is defined under `v1/app_configuration` directory in the `kam` package. + +### Understanding kcl.mod + +Much similar to the concept of `go.mod`, Kusion uses `kcl.mod` as the source of truth to manage metadata (such as package name, dependencies, etc.) for the current package. Kusion will also auto-generate a `kcl.mod.lock` as the dependency lock file. + +The most common usage for `kcl.mod` is to manage the dependency of your application configuration file. + +:::info + +Please note this `kcl.mod` will be automatically generated if you are using `kusion init` to initialize a project with a template. You will only need to modify this file if you are modifying the project metadata outside the initialization process, such as upgrading the dependency version or adding a new dependency altogether, etc. +:::info + +There are 3 sections in a `kcl.mod` file: +- `package`, representing the metadata for the current package. +- `dependencies`, describing the packages the current package depends on. Supports referencing either a git repository or an OCI artifact. +- `profile`, defining the behavior for Kusion. In the example below, it describes the list of files Kusion should look for when parsing the application configuration. + +An example of `kcl.mod`: +``` +[package] +name = "multi-stack-project" +edition = "0.5.0" +version = "0.1.0" + +[dependencies] +monitoring = { oci = "oci://ghcr.io/kusionstack/monitoring", tag = "0.1.0" } +kam = { git = "https://github.com/KusionStack/kam.git", tag = "0.1.0" } +# Uncomment the line below to use your own modified module +# my-module = { oci = "oci://ghcr.io/my-repository/my-package", tag = "my-version" } + +[profile] +entries = ["../base/base.k", "main.k"] +``` + +### Building Blocks + +Configuration files consist of building blocks that are made of instances of schemas. An `AppConfiguration` instance consists of several child schemas, most of which are optional. The only mandatory one is the `workload` instance. We will take a closer look in the [workload walkthrough](workload). The order of the building blocks does NOT matter. + +The major building blocks as of version `0.12.0`: +``` +myapp: ac.AppConfiguration { + workload: service.Service { + containers: { + "myapp": c.Container {} + ... + } + secrets: {} + ... + } + # optional dependencies, usually expressed in kusion modules + accessories: { + ... + } + ... +} +``` + +We will deep dive into each one of the building blocks in this documentation series. + +### Instantiating an application + +In Kusion's out-of-the-box experience, an application is identified with an instance of `AppConfiguration`. You may have more than one application in the same project or stack. + +Here's an example of a configuration that can be consumed by Kusion (assuming it is placed inside the proper directory structure that includes project and stack configurations, with a `kcl.mod` present): + +``` +import kam.v1.app_configuration as ac +import service +import service.container as c +import network as n + +gocity: ac.AppConfiguration { + workload: service.Service { + containers: { + "gocity": c.Container { + image = "howieyuen/gocity:latest" + resources: { + "cpu": "500m" + "memory": "512Mi" + } + } + } + replicas: 1 + } + accessories: { + "network": n.Network { + ports: [ + n.Port { + port: 80 + public: True + } + ] + } + } +} +``` + +Don't worry about what `workload` or `n.Network` stand for at the moment. We will deep dive into each one of them in this upcoming documentation series. + +### Using `kusion init` + +Kusion offers a `kusion init` sub-command which initializes a new project using a pre-built template, which saves you from the hassle of manually building the aforementioned directory structure that Kusion expects. + +There is a built-in template `quickstart` in the Kusion binary that can be used offline. + +The pre-built templates are meant to help you get off the ground quickly with some simple out-of-the-box examples. You can refer to the [QuickStart documentation](../getting-started/deliver-quickstart) for some step-by-step tutorials. + +### Using references + +The reference documentation for the `kam` package and the official Kusion Modules is located in [Reference](../reference/modules/developer-schemas/app-configuration). + +If you are using them out of the box, the reference documentation provides a comprehensive view for each schema involved, including all the attribute names and description, their types, default value if any, and whether a particular attribute is required or not. There will also be an example attached to each schema reference. + +We will also deep dive into some common examples in the upcoming sections. \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.13/4-configuration-walkthrough/2-kcl-basics.md b/docs_versioned_docs/version-v0.13/4-configuration-walkthrough/2-kcl-basics.md new file mode 100644 index 00000000..aaa80366 --- /dev/null +++ b/docs_versioned_docs/version-v0.13/4-configuration-walkthrough/2-kcl-basics.md @@ -0,0 +1,144 @@ +--- +id: kcl-basics +--- + +# KCL Basics + +## Table of Content +- [Variable assignments](#variable-assignments) +- [Common built-in types](#common-built-in-types) +- [Lists and maps](#lists-and-maps) +- [Conditional statements](#conditional-statements) +- [The : and = operator](#the--and--operator) +- [Advanced KCL capabilities](#advanced-kcl-capabilities) + +[KCL](https://kcl-lang.io/) is the choice of configuration language consumed by Kusion. KCL is an open source constraint-based record and functional language. KCL works well with a large number of complex configurations via modern programming language technology and practice, and is committed to provide better modularity, scalability, stability and extensibility. + +## Variable assignments + +There are two ways to initialize a variable in KCL. You can either use the `:` operator or the `=` operator. We will discuss the difference between them in [this section later](#the--and--operator). + +Here are the two ways to create a variable and initialize it: +``` +foo = "Foo" # Declare a variable named `foo` and its value is a string literal "Foo" +bar: "Bar" # Declare a variable named `bar` and its value is a string literal "Bar" +``` + +You will be able to override a variable assignment via the `=` operator. We will discuss this in depth in the [`:` and `=` operator section](#the--and--operator). + +## Common built-in types + +KCL supports `int`, `float`, `bool` and `string` as the built-in types. + +Other types are defined in the packages that are imported into the application configuration files. One such example would be the `AppConfiguration` object (or `Container`, `Probe`, `Port` object, etc) that are defined in the `kam` repository. + +## Lists and maps + +Lists are represented using the `[]` notation. +An example of lists: +``` +list0 = [1, 2, 3] +list1 = [4, 5, 6] +joined_list = list0 + list1 # [1, 2, 3, 4, 5, 6] +``` + +Maps are represented using the `{}` notation. +An example of maps: +``` +a = {"one" = 1, "two" = 2, "three" = 3} +b = {'one' = 1, 'two' = 2, 'three' = 3} +assert a == b # True +assert len(a) == 3 # True +``` + +## Conditional statements +You can also use basic control flow statements when writing the configuration file. + +An example that sets the value of `replicas` conditionally based on the value of `containers.myapp.resources.cpu`: +``` +import kam.v1.app_configuration as ac +import service +import service.container as c + +myapp: ac.AppConfiguration { + workload: service.Service { + containers: { + "myapp": c.Container { + image: "" + resources: { + "cpu": "500m" + "memory": "512Mi" + } + } + } + replicas: 1 if containers.myapp.resources.cpu == "500m" else 2 + } +} +``` + +For more details on KCL's control flow statements, please refer to the [KCL documentation](https://kcl-lang.io/docs/reference/lang/tour#control-flow-statements). + +## The `:` and `=` operator + +You might have noticed there is a mixed usage of the `:` and `=` in the samples above. + +:::info + +**TLDR: The recommendation is to use `:` in the common configurations, and `=` for override in the environment-specific configurations.** +::: + +In KCL: +- `:` represents a union-ed value assignment. In the pattern `identifier: E` or `identifier: T E`, the value of the expression `E` with optional type annotation `T` will be merged and union-ed into the element value. +- `=` represents a value override. In the pattern `identifier = E` or `identifier = T E`, The value of the expression `E` with optional type annotation `T` will override the `identifier` attribute value. + +Let's take a look at an example: +``` +# This is one configuration that will be merged. +config: Config { + data.d1 = 1 +} +# This is another configuration that will be merged. +config: Config { + data.d2 = 2 +} +``` + +The above is equivalent to the snippet below since the two expressions for `config` get merged/union-ed into one: +``` +config: Config { + data.d1 = 1 + data.d2 = 1 +} +``` + +whereas using the `=` operators will result in a different outcome: +``` +# This is first configuration. +config = Config { + data.d1 = 1 +} +# This is second configuration that will override the prior one. +config = Config { + data.d2 = 2 +} +``` + +The config above results in: +``` +config: Config { + data.d2 = 2 +} +``` + +Please note that the `:` attribute operator represents an idempotent merge operation, and an error will be thrown when the values that need to be merged conflict with each other. + +``` +data0 = {id: 1} | {id: 2} # Error:conflicting values between {'id': 2} and {'id': 1} +data1 = {id: 1} | {id = 2} # Ok, the value of `data` is {"id": 2} +``` + +More about `:` and `=` operator can be found in the [KCL documentation](https://kcl-lang.io/docs/reference/lang/tour#config-operations). + +## Advanced KCL capabilities + +For more advanced KCL capabilities, please visit the [KCL website](https://kcl-lang.io/docs/user_docs/support/faq-kcl). \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.13/4-configuration-walkthrough/3-base-override.md b/docs_versioned_docs/version-v0.13/4-configuration-walkthrough/3-base-override.md new file mode 100644 index 00000000..f14af112 --- /dev/null +++ b/docs_versioned_docs/version-v0.13/4-configuration-walkthrough/3-base-override.md @@ -0,0 +1,94 @@ +--- +id: base-override +--- + +# Base and Override + +In practice, what we have observed for production-grade applications is that they usually need to be deployed to a wide range of different targets, be it different environments in the SDLC, or different clouds, regions or runtimes for cost/regulation/performance or disaster recovery related reasons. + +In that context, we advocate for a pattern where you can leverage some Kusion and KCL features to minimize the amount of duplicate configurations, by separating the common base application configuration and environment-specific ones. + +:::info + +The file names in the below examples don't matter as long as they are called out and appear in the correct order in the `entries` field (the field is a list) in `kcl.mod`. The files with common configurations should appear first in the list and stack-specific ones last. The latter one takes precedence. + +The configurations also don't have be placed into a single `.k` file. For complex projects, they can be broken down into smaller organized `.k` files for better readability. +::: + +Base configuration defined in `base/base.k`: +``` +import kam.v1.app_configuration as ac +import service +import service.container as c +import network.network as n + +myapp: ac.AppConfiguration { + workload: service.Service { + containers: { + "myapp": c.Container { + image: "" + resources: { + "cpu": "500m" + "memory": "512Mi" + } + } + } + replicas: 1 + } + accessories: { + "network": n.Network { + ports: [ + n.Port { + port: 80 + public: True + } + ] + } + } +} +``` + +Environment-specific configuration defined in `dev/main.k`: +``` +import kam.v1.app_configuration as ac +import service +import service.container as c + +# main.k declares customized configurations for dev stack. +myapp: ac.AppConfiguration { + workload: service.Service { + containers: { + "myapp": c.Container { + # dev stack has different app configuration from the base + image = "gcr.io/google-samples/gb-frontend:v5" + resources = { + "cpu": "250m" + "memory": "256Mi" + } + } + } + replicas = 2 + } +} +``` + +Alternatively, you could locate a specific property (in this case below, the `Container` object) in the `AppConfiguration` object using the dot selector shorthand(such as `workload.containers.myapp` or `workload.replicas` below): +``` +import kam.v1.app_configuration as ac + +# main.k declares customized configurations for dev stack. +myapp: ac.AppConfiguration { + workload.replicas = 2 + workload.containers.myapp: { + # dev stack has different app configuration + image = "gcr.io/google-samples/gb-frontend:v5" + resources = { + "cpu": "250m" + "memory": "256Mi" + } + } +} +``` +This is especially useful when the application configuration is complex but the override is relatively straightforward. + +The two examples above are equivalent when overriding the base. \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.13/4-configuration-walkthrough/4-workload.md b/docs_versioned_docs/version-v0.13/4-configuration-walkthrough/4-workload.md new file mode 100644 index 00000000..2b880df0 --- /dev/null +++ b/docs_versioned_docs/version-v0.13/4-configuration-walkthrough/4-workload.md @@ -0,0 +1,373 @@ +# Workload + +The `workload` attribute in the `AppConfiguration` instance is used to describe the specification for the application workload. The application workload generally represents the computing component for the application. + +A `workload` maps to an `AppConfiguration` instance 1:1. If there are more than one workload, they should be considered different applications. + +## Table of Content +- [Import](#import) +- [Types of workloads](#types-of-workloads) +- [Configure containers](#configure-containers) + - [Application image](#application-image) + - [Resource Requirements](#resource-requirements) + - [Health Probes](#health-probes) + - [Lifecycle Hooks](#lifecycle-hooks) + - [Create Files](#create-files) + - [Customize container initialization](#customize-container-initialization) +- [Configure Replicas](#configure-replicas) +- [Differences between Service and Job](#differences-between-service-and-job) +- [Workload References](#workload-references) + +## Import + +In the examples below, we are using schemas defined in the `catalog` package. For more details on KCL package import, please refer to the [Configuration File Overview](overview). + +The `import` statements needed for the following walkthrough: +``` +import kam.v1.app_configuration as ac +import service +import service.container as c +import service.container.probe as p +import service.container.lifecycle as lc +``` + +## Types of Workloads + +There are currently two types of workloads: + +- `Service`, representing a long-running, scalable workload type that should "never" go down and respond to short-lived latency-sensitive requests. This workload type is commonly used for web applications and services that expose APIs. +- `Job`, representing batch tasks that take from a few seconds to days to complete and then stop. These are commonly used for batch processing that is less sensitive to short-term performance fluctuations. + +To instantiate a `Service`: +``` +import kam.v1.app_configuration as ac +import service +import service.container as c + +myapp: ac.AppConfiguration { + workload: service.Service {} +} +``` + +To instantiate a `Job`: +``` +import kam.v1.app_configuration as ac +import job +import job.container as c + +myapp: ac.AppConfiguration { + workload: job.Job {} +} +``` + +Of course, the `AppConfiguration` instances above is not sufficient to describe an application. We still need to provide more details in the `workload` section. + +## Configure containers + +Kusion is built on top of cloud-native philosophies. One of which is that applications should run as loosely coupled microservices on abstract and self-contained software units, such as containers. + +The `containers` attribute in a workload instance is used to define the behavior for the containers that run application workload. The `containers` attribute is a map, from the name of the container to the `catalog.models.schema.v1.workload.container.Container` Object which includes the container configurations. + +:::info + +The name of the container is in the context of the configuration file, so you could refer to it later. It's not referring to the name of the container in the Kubernetes cluster (or any other runtime). +::: + +Everything defined in the `containers` attribute is considered an application container, as opposed to a sidecar container. Sidecar containers will be introduced in a different attribute in a future version. + +In most of the cases, only one application container is needed. Ideally, we recommend mapping an `AppConfiguration` instance to a microservice in the microservice terminology. + +We will walk through the details of configuring a container using an example of the `Service` type. + +To add an application container: +``` +import kam.v1.app_configuration as ac +import service +import service.container as c + +myapp: ac.AppConfiguration { + workload: service.Service { + containers: { + "myapp": c.Container {} + } + } +} +``` + +### Application image + +The `image` attribute in the `Container` schema specifies the application image to run. This is the only required field in the `Container` schema. + +To specify an application image: +``` +import kam.v1.app_configuration as ac +import service +import service.container as c + +myapp: ac.AppConfiguration { + workload: service.Service { + containers: { + "myapp": c.Container { + image: "gcr.io/google-samples/gb-frontend:v5" + } + # ... + } + } +} +``` + +### Resource Requirements + +The `resources` attribute in the `Container` schema specifies the application resource requirements such as cpu and memory. + +You can specify an upper limit (which maps to resource limits only) or a range as the resource requirements (which maps to resource requests and limits in Kubernetes). + +To specify an upper bound (only resource limits): +``` +import kam.v1.app_configuration as ac +import service +import service.container as c + +myapp: ac.AppConfiguration { + workload: service.Service { + containers: { + "myapp": c.Container { + image: "gcr.io/google-samples/gb-frontend:v5" + resources: { + "cpu": "500m" + "memory": "512Mi" + } + # ... + } + } + } +} +``` + +To specify a range (both resource requests and limits): +``` +import kam.v1.app_configuration as ac +import service +import service.container as c + +myapp: ac.AppConfiguration { + workload: service.Service { + containers: { + "myapp": c.Container { + image: "gcr.io/google-samples/gb-frontend:v5" + # Sets requests to cpu=250m and memory=256Mi + # Sets limits to cpu=500m and memory=512Mi + resources: { + "cpu": "250m-500m" + "memory": "256Mi-512Mi" + } + # ... + } + } + } +} +``` + +### Health Probes + +There are three types of `Probe` defined in a `Container`: + +- `livenessProbe` - used to determine if the container is healthy and running +- `readinessProbe` - used to determine if the container is ready to accept traffic +- `startupProbe` - used to determine if the container has started properly. Liveness and readiness probes don't start until `startupProbe` succeeds. Commonly used for containers that takes a while to start + +The probes are optional. You can only have one Probe of each kind for a given `Container`. + +To configure a `Http` type `readinessProbe` that probes the health via HTTP request and a `Exec` type `livenessProbe` which executes a command: +``` +import kam.v1.app_configuration as ac +import service +import service.container as c + +myapp: ac.AppConfiguration { + workload: service.Service { + containers: { + "myapp": c.Container { + image: "gcr.io/google-samples/gb-frontend:v5" + # ... + # Configure an Http type readiness probe at /healthz + readinessProbe: p.Probe { + probeHandler: p.Http { + url: "/healthz" + } + initialDelaySeconds: 10 + timeoutSeconds: 5 + periodSeconds: 15 + successThreshold: 3 + failureThreshold: 1 + } + # Configure an Exec type liveness probe that executes probe.sh + livenessProbe: p.Probe { + probeHandler: p.Exec { + command: ["probe.sh"] + } + initialDelaySeconds: 10 + } + } + } + } +} +``` + +### Lifecycle Hooks + +You can also configure lifecycle hooks that triggers in response to container lifecycle events such as liveness/startup probe failure, preemption, resource contention, etc. + +There are two types that is currently supported: + +- `PreStop` - triggers before the container is terminated. +- `PostStart` - triggers after the container is initialized. + +``` +import kam.v1.app_configuration as ac +import service +import service.container as c + +myapp: ac.AppConfiguration { + workload: service.Service { + containers: { + "myapp": c.Container { + image: "gcr.io/google-samples/gb-frontend:v5" + # ... + # Configure lifecycle hooks + lifecycle: lc.Lifecycle { + # Configures an Exec type pre-stop hook that executes preStop.sh + preStop: p.Exec { + command: ["preStop.sh"] + } + # Configures an Http type pre-stop hook at /post-start + postStart: p.Http { + url: "/post-start" + } + } + } + } + } +} +``` + +### Create Files + +You can also create files on-demand during the container initialization. + +To create a custom file and mount it to `/home/admin/my-file` when the container starts: +``` +import kam.v1.app_configuration as ac +import service +import service.container as c + +myapp: ac.AppConfiguration { + workload: service.Service { + containers: { + "myapp": c.Container { + image: "gcr.io/google-samples/gb-frontend:v5" + } + # ... + # Creates a file during container startup + files: { + "/home/admin/my-file": c.FileSpec { + content: "some file contents" + mode: "0777" + } + } + } + } +} +``` + +### Customize container initialization + +You can also customize the container entrypoint via `command`, `args`, and `workingDir`. These should **most likely not be required**. In most of the cases, the entrypoint details should be baked into the application image itself. + +To customize the container entrypoint: +``` +import kam.v1.app_configuration as ac +import service +import service.container as c + +myapp: ac.AppConfiguration { + workload: service.Service { + containers: { + "myapp": c.Container { + image: "gcr.io/google-samples/gb-frontend:v5" + # ... + # This command will overwrite the entrypoint set in the image Dockerfile + command: ["/usr/local/bin/my-init-script.sh"] + # Extra arguments append to command defined above + args: [ + "--log-dir=/home/my-app/logs" + "--timeout=60s" + ] + # Run the command as defined above, in the directory "/tmp" + workingDir: "/tmp" + } + } + } +} +``` + +## Configure Replicas + +The `replicas` field in the `workload` instance describes the number of identical copies to run at the same time. It is generally recommended to have multiple replicas in production environments to eliminate any single point of failure. In Kubernetes, this corresponds to the `spec.replicas` field in the relevant workload manifests. + +To configure a workload to have a replica count of 3: +``` +import kam.v1.app_configuration as ac +import service +import service.container as c + +myapp: ac.AppConfiguration { + workload: service.Service { + containers: { + # ... + } + replicas: 3 + # ... + } + # ... +} +``` + +## Differences between Service and Job + +The two types of workloads, namely `Service` and `Job`, share a majority of the attributes with some minor differences. + +### Exposure + +A `Service` usually represents a long-running, scalable workload that responds to short-lived latency-sensitive requests and never go down. Hence, a `Service` has an additional attribute that determines how it is exposed and can be accessed. A `Job` does NOT have the option to be exposed. We will explore more in the [application networking walkthrough](networking). + +### Job Schedule + +A `Job` can be configured to run in a recurring manner. In this case, the job will have a cron-format schedule that represents its recurring schedule. + +To configure a job to run at 21:00 every night: +``` +import kam.v1.app_configuration as ac +import job +import job.container as c + +myjob: ac.AppConfiguration { + workload: job.Job { + containers: { + "busybox": c.Container { + image: "busybox:1.28" + # Run the following command as defined + command: ["/bin/sh", "-c", "echo hello"] + } + } + # Run every hour. + schedule: "0 * * * *" + } +} +``` + +## Workload References + +You can find workload references [here](../reference/modules/developer-schemas/workload/service). + +You can find workload schema source [here](https://github.com/KusionStack/catalog/tree/main/models/schema/v1/workload). \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.13/4-configuration-walkthrough/5-networking.md b/docs_versioned_docs/version-v0.13/4-configuration-walkthrough/5-networking.md new file mode 100644 index 00000000..adaa9904 --- /dev/null +++ b/docs_versioned_docs/version-v0.13/4-configuration-walkthrough/5-networking.md @@ -0,0 +1,174 @@ +--- +id: networking +--- + +# Application Networking + +In addition to configuring application's [container specifications](workload#configure-containers), you can also configure its networking behaviors, including how to expose the application and how it can be accessed. You can specify a `network` module in the `accessories` field in `AppConfiguration` to achieve that. + +In future versions, this will also include ingress-based routing strategy and DNS configurations. + +## Import + +In the examples below, we are using schemas defined in the `kam` package and the `network` Kusion Module. For more details on KCL package and module import, please refer to the [Configuration File Overview](overview). + +The `import` statements needed for the following walkthrough: +``` +import kam.v1.app_configuration as ac +import service +import service.container as c +import network as n +``` + +The `kcl.mod` must contain reference to the network module: +``` +#... + +[dependencies] +network = { oci = "oci://ghcr.io/kusionstack/network", tag = "0.2.0" } + +#... +``` + +## Private vs Public Access + +Private network access means the service can only be access from within the target cluster. + +Public access is implemented using public load balancers on the cloud. This generally requires a Kubernetes cluster that is running on the cloud with a vendor-specific service controller. + +Any ports defined default to private access unless explicitly specified. + +To expose port 80 to be accessed privately: +``` +import kam.v1.app_configuration as ac +import service +import service.container as c +import network as n + +myapp: ac.AppConfiguration { + workload: service.Service { + # ... + } + accessories: { + "network": n.Network { + ports: [ + n.Port { + port: 80 + } + ] + } + } +} +``` + +To expose port 80 to be accessed publicly: +``` +import kam.v1.app_configuration as ac +import service +import service.container as c +import network as n + +myapp: ac.AppConfiguration { + workload: service.Service { + # ... + } + accessories: { + "network": n.Network { + ports: [ + n.Port { + port: 80 + public: True + } + ] + } + } +} +``` + +:::info +The CSP (Cloud Service Provider) used to provide load balancers is defined by platform engineers in workspace. +::: + +## Mapping ports + +To expose a port `80` that maps to a different port `8088` on the container: +``` +import kam.v1.app_configuration as ac +import service +import service.container as c +import network as n + +myapp: ac.AppConfiguration { + workload: service.Service { + # ... + } + accessories: { + "network": n.Network { + ports: [ + n.Port { + port: 80 + targetPort: 8088 + } + ] + } + } +} +``` + +## Exposing multiple ports + +You can also expose multiple ports and configure them separately. + +To expose port 80 to be accessed publicly, and port 9099 for private access (to be scraped by Prometheus, for example): +``` +import kam.v1.app_configuration as ac +import service +import service.container as c +import network as n + +myapp: ac.AppConfiguration { + workload: service.Service { + # ... + } + accessories: { + "network": n.Network { + ports: [ + n.Port { + port: 80 + public: True + } + n.Port { + port: 9099 + } + ] + } + } +} +``` + +## Choosing protocol + +To expose a port using the `UDP` protocol: +``` +import kam.v1.app_configuration as ac +import service +import service.container as c +import network as n + +myapp: ac.AppConfiguration { + workload: service.Service { + # ... + } + accessories: { + "network": n.Network { + ports: [ + n.Port { + port: 80 + targetPort: 8088 + protocol: "UDP" + } + ] + } + } +} +``` \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.13/4-configuration-walkthrough/6-database.md b/docs_versioned_docs/version-v0.13/4-configuration-walkthrough/6-database.md new file mode 100644 index 00000000..6a8dedab --- /dev/null +++ b/docs_versioned_docs/version-v0.13/4-configuration-walkthrough/6-database.md @@ -0,0 +1,467 @@ +--- +id: databse +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Managed Databases + +You could also specify a database needed for the application. That can be achieved via a `mysql` or a `postgres` module (or bring-your-own-module) in the `accessories` field in `AppConfiguration` to achieve that. + +You can currently have several databases with **different database names** for an application at the same time. + +## Import + +In the examples below, we are using schemas defined in the `kam` package and the `mysql` Kusion Module. For more details on KCL package and module import, please refer to the [Configuration File Overview](./1-overview.md#configuration-file-overview). + +The `import` statements needed for the following walkthrough: +``` +import kam.v1.app_configuration as ac +import service +import service.container as c +import mysql +import postgres +``` + +The `kcl.mod` must contain reference to the `mysql` module or `postgres` module: +``` +#... + +[dependencies] +mysql = { oci = "oci://ghcr.io/kusionstack/mysql", tag = "0.2.0" } +postgres = { oci = "oci://ghcr.io/kusionstack/postgres", tag = "0.2.0" } +#... +``` + +## Types of Database offerings + +As of version 0.11.0, Kusion supports the following database offerings on the cloud: +- MySQL and PostgreSQL Relational Database Service (RDS) on [AWS](https://aws.amazon.com/rds/) +- MySQL and PostgreSQL Relational Database Service (RDS) on [AliCloud](https://www.alibabacloud.com/product/databases) + +More database types on more cloud vendors will be added in the future. + +Alternatively, Kusion also supports creating a database at `localhost` for local testing needs. A local database is quicker to stand up and easier to manage. It also eliminates the need for an account and any relevant costs with the cloud providers in the case that a local testing environment is sufficient. + +:::info +You do need a local Kubernetes cluster to run the local database workloads. You can refer to [Minikube](https://minikube.sigs.k8s.io/docs/start/) or [Kind](https://kind.sigs.k8s.io/docs/user/quick-start/) to get started. +To see an end-to-end use case for standing up a local testing environment including a local database, please refer to the [Kusion Quickstart](../2-getting-started/2-deliver-quickstart.md). +::: + +## Cloud Credentials and Permissions + +Kusion provisions databases on the cloud via [terraform](https://www.terraform.io/) providers. For it to create _any_ cloud resources, it requires a set of credentials that belongs to an account that has the appropriate write access so the terraform provider can be initialized properly. + +For AWS, the environment variables needed: +``` +export AWS_REGION=us-east-1 # replace it with your region +export AWS_ACCESS_KEY_ID="xxxxxxxxxxx" # replace it with your AccessKey +export AWS_SECRET_ACCESS_KEY="xxxxxxx" # replace it with your SecretKey +``` + +For AliCloud, the environment variables needed: +``` +export ALICLOUD_REGION=cn-shanghai # replace it with your region +export ALICLOUD_ACCESS_KEY="xxxxxxxxx" # replace it with your AccessKey +export ALICLOUD_SECRET_KEY="xxxxxxxxx" # replace it with your SecretKey +``` + +The user account that owns these credentials would need to have the proper permission policies attached to create databases and security groups. If you are using the cloud-managed policies, the policies needed to provision a database and configure firewall rules are listed below. + +For AWS: +- `AmazonVPCFullAccess` for creating and managing database firewall rules via security group +- `AmazonRDSFullAccess` for creating and managing RDS instances + +For AliCloud: +- `AliyunVPCFullAccess` for creating and managing database firewall rules via security group +- `AliyunRDSFullAccess` for creating and managing RDS instances + +Alternatively, you can use customer managed policies if the cloud provider built-in policies don't meet your needs. The list of permissions needed are in the [AmazonRDSFullAccess Policy Document](https://docs.aws.amazon.com/aws-managed-policy/latest/reference/AmazonRDSFullAccess.html#AmazonRDSFullAccess-json) and [AmazonVPCFullAccess Policy Document](https://docs.aws.amazon.com/aws-managed-policy/latest/reference/AmazonVPCFullAccess.html). It will most likely be a subset of the permissions in the policy documents. + +## Configure Database + +### Provision a Cloud Database + +Assuming the steps in the [Cloud Credentials and Permissions](#cloud-credentials-and-permissions) section is setup properly, you can now provision cloud databases via Kusion. + +#### AWS RDS Instance +To provision an AWS RDS instance with MySQL v8.0 or PostgreSQL v14.0, you can append the following YAML file to your own workspace configurations and update the corresponding workspace with command `kusion workspace update`. + + + + +```yaml +runtimes: + terraform: + random: + version: 3.5.1 + source: hashicorp/random + aws: + version: 5.0.1 + source: hashicorp/aws + region: us-east-1 # Please replace with your own aws provider region + +# MySQL configurations for AWS RDS +modules: + kusionstack/mysql@0.1.0: + default: + cloud: aws + size: 20 + instanceType: db.t3.micro + securityIPs: + - 0.0.0.0/0 + suffix: "-mysql" +``` + +```mdx-code-block + + +``` +```yaml +runtimes: + terraform: + random: + version: 3.5.1 + source: hashicorp/random + aws: + version: 5.0.1 + source: hashicorp/aws + region: us-east-1 # Please replace with your own aws provider region + +# PostgreSQL configurations for AWS RDS +modules: + kusionstack/postgres@0.1.0: + default: + cloud: aws + size: 20 + instanceType: db.t3.micro + securityIPs: + - 0.0.0.0/0 + suffix: "-postgres" +``` + +```mdx-code-block + + +``` + +For KCL configuration file declarations: + + + + +```python +wordpress: ac.AppConfiguration { + # ... + accessories: { + "mysql": mysql.MySQL { + type: "cloud" + version: "8.0" + } + } +} +``` + +```mdx-code-block + + +``` + +```python +pgadmin: ac.AppConfiguration { + # ... + accessories: { + "postgres": postgres.PostgreSQL { + type: "cloud" + version: "14.0" + } + } +} +``` + +```mdx-code-block + + +``` + +It's highly recommended to replace `0.0.0.0/0` and closely manage the whitelist of IPs that can access the database for security purposes. The `0.0.0.0/0` in the example above or if `securityIPs` is omitted altogether will allow connections from anywhere which would typically be a security bad practice. + +The `instanceType` field determines the computation and memory capacity of the RDS instance. The `db.t3.micro` instance type in the example above represents the `db.t3` instance class with a size of `micro`. In the same `db.t3` instance family there are also `db.t3.small`, `db.t3.medium`, `db.t3.2xlarge`, etc. + +The full list of supported `instanceType` values can be found [here](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html#Concepts.DBInstanceClass.Support). + +You can also adjust the storage capacity for the database instance by changing the `size` field which is storage size measured in gigabytes. The minimum is 20. More details can be found [here](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Storage.html#Concepts.Storage.GeneralSSD). + +#### AliCloud RDS Instance + +To provision an Alicloud RDS instance with MySQL or PostgreSQL, you can append the following YAML file to your own workspace configurations and update the corresponding workspace with command `kusion workspace update`. Note that AliCloud RDS has several additional fields such as `category`, `subnetID` and `privateRouting`: + + + + +```yaml +runtimes: + terraform: + random: + version: 3.5.1 + source: hashicorp/random + alicloud: + version: 1.209.1 + source: aliyun/alicloud + region: cn-beijing # Please replace with your own alicloud provider region + +# MySQL configurations for Alicloud RDS +modules: + kusionstack/mysql@0.1.0: + default: + cloud: alicloud + size: 20 + instanceType: mysql.n2.serverless.1c + category: serverless_basic + privateRouting: false + subnetID: [your-subnet-id] + securityIPs: + - 0.0.0.0/0 + suffix: "-mysql" +``` + +```mdx-code-block + + +``` +```yaml +runtimes: + terraform: + random: + version: 3.5.1 + source: hashicorp/random + alicloud: + version: 1.209.1 + source: aliyun/alicloud + region: cn-beijing # Please replace with your own alicloud provider region + +# PostgreSQL configurations for Alicloud RDS +modules: + kusionstack/postgres@0.1.0: + default: + cloud: alicloud + size: 20 + instanceType: pg.n2.serverless.1c + category: serverless_basic + privateRouting: false + subnetID: [your-subnet-id] + securityIPs: + - 0.0.0.0/0 + suffix: "-postgres" +``` + +```mdx-code-block + + +``` + +For KCL configuration file declarations: + + + + +```python +wordpress: ac.AppConfiguration { + # ... + accessories: { + "mysql": mysql.MySQL { + type: "cloud" + version: "8.0" + } + } +} +``` + +```mdx-code-block + + +``` + +```python +pgadmin: ac.AppConfiguration { + # ... + accessories: { + "postgres": postgres.PostgreSQL { + type: "cloud" + version: "14.0" + } + } +} +``` + +```mdx-code-block + + +``` + +We will walkthrough `subnetID` and `privateRouting` in the [Configure Network Access](#configure-network-access) section. + +The full list of supported `instanceType` values can be found in: +- [MySQL instance types(x86)](https://www.alibabacloud.com/help/en/rds/apsaradb-rds-for-mysql/primary-apsaradb-rds-for-mysql-instance-types#concept-2096487) +- [PostgreSQL instance types](https://www.alibabacloud.com/help/en/rds/apsaradb-rds-for-postgresql/primary-apsaradb-rds-for-postgresql-instance-types#concept-2096578) + +### Local Database + +To deploy a local database with MySQL v8.0 or PostgreSQL v14.0: + + + + +```python +wordpress: ac.AppConfiguration { + # ... + accessories: { + "mysql": mysql.MySQL { + type: "local" + version: "8.0" + } + } +} +``` + +```mdx-code-block + + +``` + +```python +pgadmin: ac.AppConfiguration { + # ... + accessories: { + "postgres": postgres.PostgreSQL { + type: "local" + version: "14.0" + } + } +} +``` + +```mdx-code-block + + +``` + +## Database Credentials + +There is no need to manage the database credentials manually. Kusion will automatically generate a random password, set it as the credential when creating the database, and then inject the hostname, username and password into the application runtime. + +You have the option to BYO (Bring Your Own) username for the database credential by specifying the `username` attribute in the `workspace.yaml`: +```yaml +modules: + kusionstack/mysql@0.1.0: + default: + # ... + username: "my_username" +``` + +You **cannot** bring your own password. The password will always be managed by Kusion automatically. + +The database credentials are injected into the environment variables of the application container. You can access them via the following env vars: +``` +# env | grep KUSION_DB +KUSION_DB_HOST_WORDPRESS_MYSQL=wordpress.xxxxxxxx.us-east-1.rds.amazonaws.com +KUSION_DB_USERNAME_WORDPRESS_MYSQL=xxxxxxxxx +KUSION_DB_PASSWORD_WORDPRESS_MYSQL=xxxxxxxxx +``` + +:::info +More details about the environment of database credentials injected by Kusion can be found at [mysql credentials and connectivity](../6-reference/2-modules/1-developer-schemas/database/mysql.md#credentials-and-connectivity) and [postgres credentials and connectivity](../6-reference/2-modules/1-developer-schemas/database/postgres.md#credentials-and-connectivity) +::: + +You can use these environment variables out of the box. Or most likely, your application might retrieve the connection details from a different set of environment variables. In that case, you can map the kusion environment variables to the ones expected by your application using the `$()` expression. + +This example below will assign the value of `KUSION_DB_HOST_WORDPRESS_MYSQL` into `WORDPRESS_DB_HOST`, `KUSION_DB_USERNAME_WORDPRESS_MYSQL` into `WORDPRESS_DB_USER`, likewise for `KUSION_DB_PASSWORD_WORDPRESS_MYSQL` and `WORDPRESS_DB_PASSWORD`: +``` +wordpress: ac.AppConfiguration { + workload: service.Service { + containers: { + wordpress: c.Container { + image = "wordpress:6.3-apache" + env: { + "WORDPRESS_DB_HOST": "$(KUSION_DB_HOST_WORDPRESS_MYSQL)" + "WORDPRESS_DB_USER": "$(KUSION_DB_USERNAME_WORDPRESS_MYSQL)" + "WORDPRESS_DB_PASSWORD": "$(KUSION_DB_PASSWORD_WORDPRESS_MYSQL)" + } + # ... + } + } + # ... + } + accessories: { + # ... + } +} +``` + +## Configure Network Access + +You can also optionally configure the network access to the database as part of the `AppConfiguration`. This is highly recommended because it dramatically increases the security posture of your cloud environment in the means of least privilege principle. + +The `securityIPs` field in the `Database` schema declares the list of network addresses that are allowed to access the database. The network addresses are in the [CIDR notation](https://aws.amazon.com/what-is/cidr/) and can be either a private IP range ([RFC-1918](https://datatracker.ietf.org/doc/html/rfc1918) and [RFC-6598](https://datatracker.ietf.org/doc/html/rfc6598) address) or a public one. + +If the database need to be accessed from a public location (which should most likely not be the case in a production environment), `securityIPs` need to include the public IP address of the traffic source (For instance, if the RDS database needs to be accessed from your computer). + +To configure AWS RDS to restrict network access from a VPC with a CIDR of `10.0.1.0/24` and a public IP of `103.192.227.125`: + +```yaml +modules: + kusionstack/mysql@0.1.0: + default: + cloud: aws + # ... + securityIPs: + - "10.0.1.0/24" + - "103.192.227.125/32" +``` + +Depending on the cloud provider, the default behavior of the database firewall settings may differ if omitted. + +### Subnet ID + +On AWS, you have the option to launch the RDS instance inside a specific VPC if a `subnetID` is present in the application configuration. By default, if `subnetID` is not provided, the RDS will be created in the default VPC for that account. However, the recommendation is to self-manage your VPCs to provider better isolation from a network security perspective. + +On AliCloud, the `subnetID` is required. The concept of subnet maps to VSwitch in AliCloud. + +To place the RDS instance into a specific VPC on Alicloud: + +```yaml +modules: + kusionstack/mysql@0.1.0: + default: + cloud: alicloud + # ... + subnetID: "subnet-xxxxxxxxxxxxxxxx" +``` + +### Private Routing + +There is an option to enforce private routing on certain cloud providers if both the workload and the database are running on the cloud. + +On AliCloud, you can set the `privateRouting` flag to `True`. The database host generated will be a private FQDN that is only resolvable and accessible from within the AliCloud VPCs. Setting `privateRouting` flag to `True` when `type` is `aws` is a no-op. + +To enforce private routing on AliCloud: + +```yaml +modules: + kusionstack/mysql@0.1.0: + default: + cloud: alicloud + # ... + privateRouting: true +``` + +Kusion will then generate a private FQDN and inject it into the application runtime as the environment variable `KUSION_DB_HOST_` for the application to use. A complete list of Kusion-managed environment variables for mysql database can be found [here](../6-reference/2-modules/1-developer-schemas/database/mysql.md#credentials-and-connectivity). + +Otherwise when using the public FQDN to connect to a database from the workload, the route will depend on cloud provider's routing preference. The options are generally either: +- Travel as far as possible on the cloud provider's global backbone network, or also referred to as cold potato routing, or +- Egress as early as possible to the public Internet and re-enter the cloud provider's datacenter later, or also referred to as hot potato routing + +The prior generally has better performance but is also more expensive. + +You can find a good read on the [AWS Blog](https://aws.amazon.com/blogs/architecture/internet-routing-and-traffic-engineering/) or the [Microsoft Learn](https://learn.microsoft.com/en-us/azure/virtual-network/ip-services/routing-preference-overview). \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.13/4-configuration-walkthrough/7-secret.md b/docs_versioned_docs/version-v0.13/4-configuration-walkthrough/7-secret.md new file mode 100644 index 00000000..db1d576e --- /dev/null +++ b/docs_versioned_docs/version-v0.13/4-configuration-walkthrough/7-secret.md @@ -0,0 +1,251 @@ +--- +id: secret +--- + +# Secrets + +Secrets are used to store sensitive data like passwords, API keys, TLS certificates, tokens, or other credentials. Kusion provides multiple secret types, and makes it easy to be consumed in containers. + +For application dependent cloud resources that are managed by Kusion, their credentials are automatically managed by Kusion (generated and injected into application runtime environment variable). You shouldn't have to manually create those. + +## Using secrets in workload + +Secrets must be defined in AppConfiguration. The values can be generated by Kusion or reference existing secrets stored in third-party vault. Secrets can be consumed in containers by referencing them through the `secret:///` URI syntax. + +### Consume secret in an environment variable + +You can consume the data in Secrets as environment variable in your container. For example the db container uses an environment variable to set the root password. + +``` +import kam.v1.app_configuration as ac +import service +import service.container as c +import service.secret as sec + +sampledb: ac.AppConfiguration { + workload: service.Service { + containers: { + "db": c.Container { + image: "mysql" + env: { + # Consume db-root-password secret in environment + "ROOT_PASSWORD": "secret://db-root-password/token" + } + } + } + # Secrets used to generate token + secrets: { + "init-info": sec.Secret { + type: "token" + } + } + } +} +``` + +The example shows the secret `root-password` being consumed as an environment variable in the db container. The secret is of type token and will automatically be generated at runtime by Kusion. + +### Consume all secret keys as environment variables + +Sometimes your secret contains multiple data that need to be consumed as environment variables. The example below shows how to consume all the values in a secret as environment variables named after the keys. + +``` +import kam.v1.app_configuration as ac +import service +import service.container as c +import service.secret as sec + +sampledb: ac.AppConfiguration { + workload: service.Service { + containers: { + "db": c.Container { + image: "mysql" + env: { + # Consume all init-info secret keys as environment variables + "secret://init-info": "" + } + } + } + # Secrets used to init mysql instance + secrets: { + "init-info": sec.Secret { + type: "opaque" + data: { + "ROOT_PASSWORD": "admin" + } + } + } + } +} +``` + +This will set the environment variable "ROOT_PASSWORD" to the value "admin" in the db container. + +## Types of secrets + +Kusion provides multiple types of secrets to application developers. + +1. Basic: Used to generate and/or store usernames and passwords. +2. Token: Used to generate and/or store secret strings for password. +3. Opaque: A generic secret that can store arbitrary user-defined data. +4. Certificate: Used to store a certificate and its associated key that are typically used for TLS. +5. External: Used to retrieve secret form third-party vault. + +### Basic secrets + +Basic secrets are defined in the secrets block with the type "basic". + +``` +import kam.v1.app_configuration as ac +import service +import service.container as c +import service.secret as sec + +sampleapp: ac.AppConfiguration { + workload: service.Service { + # ... + secrets: { + "auth-info": sec.Secret { + type: "basic" + data: { + "username": "admin" + "password": "******" + } + } + } + } +} +``` + +The basic secret type is typically used for basic authentication. The key names must be username and password. If one or both of the fields are defined with a non-empty string, those values will be used. If the empty string, the default value, is used Acorn will generate random values for one or both. + +### Token secrets + +Token secrets are useful for generating a password or secure string used for passwords when the user is already known or not required. + +``` +import kam.v1.app_configuration as ac +import service +import service.container as c +import service.secret as sec + +sampleapp: ac.AppConfiguration { + workload: service.Service { + # ... + secrets: { + "api-token": sec.Secret { + type: "token" + data: { + "token": "" + } + } + } + } +} +``` + +The token secret type must be defined. The `token` field in the data object is optional and if left empty Kusion will generate the token, which is 54 characters in length by default. If the `token` is defined that value will always be used. + +### Opaque secrets + +Opaque secrets have no defined structure and can have arbitrary key value pairs. + +``` +import kam.v1.app_configuration as ac +import service +import service.container as c +import service.secret as sec + +sampleapp: ac.AppConfiguration { + workload: service.Service { + # ... + secrets: { + "my-secret": sec.Secret { + type: "opaque" + } + } + } +} +``` + +### Certificate secrets + +Certificate secrets are useful for storing a certificate and its associated key. One common use for TLS Secrets is to configure encryption in transit for an Ingress, but you can also use it with other resources or directly in your workload. + +``` +import kam.v1.app_configuration as ac +import service +import service.container as c +import service.secret as sec + +sampleapp: ac.AppConfiguration { + workload: service.Service { + # ... + secrets: { + "server-cert": sec.Secret { + type: "certificate" + data: { + # Please do not put private keys in configuration files + "tls.crt": "The cert file content" + "tls.key": "The key file content" + } + } + } + } +} +``` + +### External secrets + +As a general principle, storing secrets in a plain text configuration file is highly discouraged, keeping secrets outside of Git is especially important for future-proofing, even encrypted secrets are not recommended to check into Git. The most common approach is to store secrets in a third-party vault (such as Hashicorp Vault, AWS Secrets Manager and Azure Key Vault, etc) and retrieve the secret in the runtime only. External secrets are used to retrieve sensitive data from external secret store to make it easy to be consumed in containers. + +``` +import kam.v1.app_configuration as ac +import service +import service.container as c +import service.secret as sec + +sampleapp: ac.AppConfiguration { + workload: service.Service { + # ... + secrets: { + "api-access-token": sec.Secret { + type: "external" + data: { + # Please do not put private keys in configuration files + "accessToken": "ref://api-auth-info/accessToken?version=1" + } + } + } + } +} +``` + +The value field in data object follow `ref://PATH[?version=]` URI syntax. `PATH` is the provider-specific path for the secret to be retried. Kusion provides out-of-the-box integration with `Hashicorp Vault`, `AWS Secrets Manager`, `Azure Key Vault` and `Alicloud Secrets Manager`. + +## Immutable secrets + +You can also declare a secret as immutable to prevent it from being changed accidentally. + +To declare a secret as immutable: + +``` +import kam.v1.app_configuration as ac +import service +import service.container as c +import service.secret as sec + +sampleapp: ac.AppConfiguration { + workload: service.Service { + # ... + secrets: { + "my-secret": sec.Secret { + # ... + immutable: True + } + } + } +} +``` + +You can change a secret from mutable to immutable but not the other way around. That is because the Kubelet will stop watching secrets that are immutable. As the name suggests, you can only delete and re-create immutable secrets but you cannot change them. \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.13/4-configuration-walkthrough/8-monitoring.md b/docs_versioned_docs/version-v0.13/4-configuration-walkthrough/8-monitoring.md new file mode 100644 index 00000000..13e430f3 --- /dev/null +++ b/docs_versioned_docs/version-v0.13/4-configuration-walkthrough/8-monitoring.md @@ -0,0 +1,102 @@ +# Application Monitoring + +You could also specify the collection of monitoring requirements for the application. That can be achieved via a `monitoring` module (or bring-your-own-module) in the `accessories` field in `AppConfiguration` to achieve that. + +As of version 0.11.0, Kusion supports integration with Prometheus by managing scraping behaviors in the configuration file. + +:::info + +For the monitoring configuration to work (more specifically, consumed by Prometheus), this requires the target cluster to have installed Prometheus correctly, either as a Kubernetes operator or a server/agent. + +More about how to set up Prometheus can be found in the [Prometheus User Guide for Kusion](../user-guides/observability/prometheus) +::: + +## Import + +In the examples below, we are using schemas defined in the `kam` package and the `monitoring` Kusion Module. For more details on KCL package and module import, please refer to the [Configuration File Overview](overview). + +The `import` statements needed for the following walkthrough: +``` +import kam.v1.app_configuration as ac +import kam.v1.workload as wl +import monitoring as m +``` + +## Workspace configurations + +In addition to the KCL configuration file, there are also workspace-level configurations that should be set first. In an ideal scenario, this step is done by the platform engineers. + +In the event that they do not exist for you or your organization, e.g. if you are an individual developer, you can either do it yourself or use the [default values](#default-values) provided by the KusionStack team. The steps to do this yourself can be found in the [Prometheus User Guide for Kusion](../user-guides/observability/prometheus#setting-up-workspace-configs). + +:::info + +For more details on how workspaces work, please refer to the [workspace concept](../3-concepts/4-workspace.md) +::: + +By separating configurations that the developers are interested in and those that platform owners are interested in, we can reduce the cognitive complexity of the application configuration and achieve separation of concern. + +You can append the following YAML file to your own workspace configurations and update the corresponding workspace with command `kusion workspace update`. + +```yaml +modules: + kusionstack/monitoring@v0.1.0: + default: + interval: 30s + monitorType: Pod + operatorMode: true + scheme: http + timeout: 15s +``` + +## Managing Scraping Configuration +To manage scrape configuration for the application: +``` +myapp: ac.AppConfiguration { + workload: service.Service { + # ... + } + # Add the monitoring configuration backed by Prometheus + accessories: { + "monitoring": m.Prometheus { + path: "/metrics" + port: "web" + } + } +} +``` + +The example above will instruct the Prometheus job to scrape metrics from the `/metrics` endpoint of the application on the port named `web`. + +To instruct Prometheus to scrape from `/actuator/metrics` on port `9099` instead: +``` +myapp: ac.AppConfiguration { + workload: service.Service { + # ... + } + # Add the monitoring configuration backed by Prometheus + accessories: { + "monitoring": m.Prometheus { + path: "/actuator/metrics" + port: "9099" + } + } +} +``` + +Note that numbered ports only work when your Prometheus is not running as an operator. + +Neither `path` and `port` are required fields if Prometheus runs as an operator. If omitted, `path` defaults to `/metrics`, and `port` defaults to the container port or service port, depending on which resource is being monitored. If Prometheus does not run as an operator, both fields are required. + +Scraping scheme, interval and timeout are considered platform-managed configurations and are therefore managed as part of the [workspace configurations](../user-guides/observability/prometheus#setting-up-workspace-configs). + +More details about how the Prometheus integration works can be found in the [design documentation](https://github.com/KusionStack/kusion/blob/main/docs/prometheus.md). + +## Default values + +If no workspace configurations are found, the default values provided by the KusionStack team are: +- Scraping interval defaults to 30 seconds +- Scraping timeout defaults to 15 seconds +- Scraping scheme defaults to http +- Defaults to NOT running as an operator + +If any of the default values does not meet your need, you can change them by [setting up the workspace configuration](../user-guides/observability/prometheus#setting-up-workspace-configs). \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.13/4-configuration-walkthrough/9-operational-rules.md b/docs_versioned_docs/version-v0.13/4-configuration-walkthrough/9-operational-rules.md new file mode 100644 index 00000000..674d2f2c --- /dev/null +++ b/docs_versioned_docs/version-v0.13/4-configuration-walkthrough/9-operational-rules.md @@ -0,0 +1,54 @@ +--- +id: operational-rules +--- + +# Operational Rules + +You could also specify the collection of operational rule requirements for the application. That can be achieved via a `opsrule` module (or bring-your-own-module) in the `accessories` field in `AppConfiguration` to achieve that. Operational rules are used as a preemptive measure to police and stop any unwanted changes. + +## Import + +In the examples below, we are using schemas defined in the `kam` package and the `opsrule` Kusion Module. For more details on KCL package and module import, please refer to the [Configuration File Overview](overview). + +The `import` statements needed for the following walkthrough: +``` +import kam.v1.app_configuration as ac +import kam.v1.workload as wl +import opsrule as o +``` + +## Max Unavailable Replicas + +Currently, the `opsrule` module supports setting a `maxUnavailable` parameter, which specifies the maximum number of pods that can be rendered unavailable at any time. It can be either a fraction of the total pods for the current application or a fixed number. This operational rule is particularly helpful against unexpected changes or deletes to the workloads. It can also prevent too many workloads from going down during an application upgrade. + +More rules will be available in future versions of Kusion. + +To set `maxUnavailable` to a percentage of pods: +``` +myapp: ac.AppConfiguration { + workload: service.Service { + containers: { + # ... + } + } + accessories: { + "opsRule": o.OpsRule { + maxUnavailable: "30%" + } + } +} +``` + +To set `maxUnavailable` to a fixed number of pods: +``` +myapp: ac.AppConfiguration { + workload: service.Service { + # ... + } + accessories: { + "opsRule": o.OpsRule { + maxUnavailable: 2 + } + } +} +``` \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.13/4-configuration-walkthrough/_category_.json b/docs_versioned_docs/version-v0.13/4-configuration-walkthrough/_category_.json new file mode 100644 index 00000000..64d45678 --- /dev/null +++ b/docs_versioned_docs/version-v0.13/4-configuration-walkthrough/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Configuration Walkthrough" +} diff --git a/docs_versioned_docs/version-v0.13/5-user-guides/1-cloud-resources/1-database.md b/docs_versioned_docs/version-v0.13/5-user-guides/1-cloud-resources/1-database.md new file mode 100644 index 00000000..497c3ab4 --- /dev/null +++ b/docs_versioned_docs/version-v0.13/5-user-guides/1-cloud-resources/1-database.md @@ -0,0 +1,305 @@ +--- +id: database +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Deliver the WordPress Application with Cloud RDS + +This tutorial will demonstrate how to deploy a WordPress application with Kusion, which relies on both Kubernetes and IaaS resources provided by cloud vendors. We can learn how to declare the Relational Database Service (RDS) to provide a cloud-based database solution with Kusion for our application from this article. + +## Prerequisites + +- Install [Kusion](../../2-getting-started/1-install-kusion.md). +- Install [kubectl CLI](https://kubernetes.io/docs/tasks/tools/#kubectl) and run a [Kubernetes](https://kubernetes.io/) or [k3s](https://docs.k3s.io/quick-start) or [k3d](https://k3d.io/v5.4.4/#installation) or [MiniKube](https://minikube.sigs.k8s.io/docs/tutorials/multi_node) cluster. +- Prepare a cloud service account and create a user with at least **VPCFullAccess** and **RDSFullAccess** related permissions to use the Relational Database Service (RDS). This kind of user can be created and managed in the Identity and Access Management (IAM) console of the cloud vendor. +- The environment that executes `kusion` needs to have connectivity to terraform registry to download the terraform providers. + +Additionally, we also need to configure the obtained AccessKey and SecretKey as well as the cloud resource region as environment variables for specific cloud provider: + + + + +```bash +export AWS_ACCESS_KEY_ID="AKIAQZDxxxx" # replace it with your AccessKey +export AWS_SECRET_ACCESS_KEY="oE/xxxx" # replace it with your SecretKey +export AWS_REGION=us-east-1 # replace it with your region +``` + +![aws iam account](/img/docs/user_docs/getting-started/aws-iam-account.png) + +```mdx-code-block + + +``` + +```bash +export ALICLOUD_ACCESS_KEY="LTAI5txxx" # replace it with your AccessKey +export ALICLOUD_SECRET_KEY="nxuowIxxx" # replace it with your SecretKey +export ALICLOUD_REGION=cn-hangzhou # replace it with your region +``` + +![alicloud iam account](/img/docs/user_docs/getting-started/set-rds-access.png) + +```mdx-code-block + + +``` + +## Init Workspace + +To deploy the WordPress application with cloud rds, we first need to initiate a `Workspace` for the targeted stack (here we are using `dev`). Please copy the following example YAML file to your local `workspace.yaml`. + + + + +`workspace.yaml` +```yaml +# MySQL configurations for AWS RDS +modules: + mysql: + path: oci://ghcr.io/kusionstack/mysql + version: 0.2.0 + configs: + default: + cloud: aws + size: 20 + instanceType: db.t3.micro + privateRouting: false + databaseName: "wordpress-mysql" +``` + +```mdx-code-block + + +``` + +`workspace.yaml` +```yaml +# MySQL configurations for Alicloud RDS +modules: + mysql: + path: oci://ghcr.io/kusionstack/mysql + version: 0.2.0 + configs: + default: + cloud: alicloud + size: 20 + instanceType: mysql.n2.serverless.1c + category: serverless_basic + privateRouting: false + subnetID: [your-subnet-id] + databaseName: "wordpress-mysql" +``` + +```mdx-code-block + + +``` + +If you would like to try creating the `Alicloud` RDS instance, you should replace the `[your-subnet-id]` of `modules.kusionstack/mysql@0.1.0.default.subnetID` field with the Alicloud `vSwitchID` to which the database will be provisioned in. After that, you can execute the following command line to initiate the configuration for `dev` workspace. + +```shell +kusion workspace create dev -f workspace.yaml +``` + +Since Kusion by default use the `default` workspace, we can switch to the `dev` workspace with the following cmd: + +```shell +kusion workspace switch dev +``` + +If you have already created and used the configuration of `dev` workspace, you can append the MySQL module configs to your workspace YAML file and use the following command line to update the workspace configuration. + +```shell +kusion workspace update dev -f workspace.yaml +``` + +We can use the following command lines to show the current workspace configurations for `dev` workspace. + +```shell +kusion workspace show +``` + +The `workspace.yaml` is a sample configuration file for workspace management, including `MySQL` module configs. Workspace configurations are usually declared by **Platform Engineers** and will take effect through the corresponding stack. + +:::info +More details about the configuration of Workspace can be found in [Concepts of Workspace](../../3-concepts/4-workspace.md). +::: + +## Create Project And Stack + +We can create a new project named `wordpress-rds-cloud` with the `kusion project create` command. + +```shell +# Create a new directory and navigate into it. +mkdir wordpress-rds-cloud && cd wordpress-rds-cloud + +# Create a new project with the name of the current directory. +kusion project create +``` + +After creating the new project, we can create a new stack named `dev` with the `kusion stack create` command. + +```shell +# Create a new stack with the specified name under current project directory. +kusion stack create dev +``` + +The created project and stack structure looks like below: + +```shell +tree +. +├── dev +│   ├── kcl.mod +│   ├── main.k +│   └── stack.yaml +└── project.yaml + +2 directories, 4 files +``` + +### Update And Review Configuration Codes + +The configuration codes in the created stack are basically empty, thus we should replace the `dev/kcl.mod` and `dev/main.k` with the below codes: + +```shell +# dev/kcl.mod +[dependencies] +kam = { git = "https://github.com/KusionStack/kam.git", tag = "0.2.0" } +service = { oci = "oci://ghcr.io/kusionstack/service", tag = "0.1.0" } +network = { oci = "oci://ghcr.io/kusionstack/network", tag = "0.2.0" } +mysql = { oci = "oci://ghcr.io/kusionstack/mysql", tag = "0.2.0" } +``` + +```python +# dev/main.k +import kam.v1.app_configuration as ac +import service +import service.container as c +import network as n +import mysql + +# main.k declares customized configurations for dev stacks. +wordpress: ac.AppConfiguration { + workload: service.Service { + containers: { + wordpress: c.Container { + image: "wordpress:6.3" + env: { + "WORDPRESS_DB_HOST": "$(KUSION_DB_HOST_WORDPRESS_MYSQL)" + "WORDPRESS_DB_USER": "$(KUSION_DB_USERNAME_WORDPRESS_MYSQL)" + "WORDPRESS_DB_PASSWORD": "$(KUSION_DB_PASSWORD_WORDPRESS_MYSQL)" + "WORDPRESS_DB_NAME": "mysql" + } + resources: { + "cpu": "500m" + "memory": "512Mi" + } + } + } + replicas: 1 + } + accessories: { + "network": n.Network { + ports: [ + n.Port { + port: 80 + } + ] + } + "mysql": mysql.MySQL { + type: "cloud" + version: "8.0" + } + } +} +``` + +## Application Delivery + +You can complete the delivery of the WordPress application in the folder of `wordpress-cloud-rds/dev` using the following command line. Kusion will enable the watching of the application resource creation and automatic port-forwarding of the specified port (80) from local to the Kubernetes Service. + +```shell +cd dev && kusion apply --watch +``` + +:::info +During the first apply, the models and modules as well as the Terraform CLI (if not exists) that the application depends on will be downloaded, so it may take some time (usually within two minutes). You can take a break and have a cup of coffee. +::: + + + + +![apply the wordpress application with aws rds](/img/docs/user_docs/getting-started/apply-wordpress-cloud-rds-aws.png) + +```mdx-code-block + + +``` + +![apply the wordpress application with alicloud rds](/img/docs/user_docs/getting-started/apply-wordpress-cloud-rds-alicloud.png) + +```mdx-code-block + + +``` + +After all the resources reconciled, we can port-forward our local port (e.g. 12345) to the WordPress frontend service port (80) in the cluster: + +```shell +kubectl port-forward -n wordpress-cloud-rds svc/wordpress-cloud-rds-dev-wordpress-private 12345:80 +``` + +![kubectl port-forward for wordpress](/img/docs/user_docs/getting-started/wordpress-cloud-rds-port-forward.png) + +## Verify WordPress Application + +Next, we will verify the WordPress site service we just delivered, along with the creation of the RDS instance it depends on. We can start using the WordPress site by accessing the link of local-forwarded port [(http://localhost:12345)](http://localhost:12345) we just configured in the browser. + +![wordpress site page](/img/docs/user_docs/getting-started/wordpress-site-page.png) + +In addition, we can also log in to the cloud service console page to view the RDS instance we just created. + + + + +![aws rds instance](/img/docs/user_docs/getting-started/cloud-rds-instance-aws.png) + +```mdx-code-block + + +``` + +![alicloud rds instance](/img/docs/user_docs/getting-started/cloud-rds-instance-alicloud.png) + +```mdx-code-block + + +``` + +## Delete WordPress Application + +You can delete the WordPress application and related RDS resources using the following command line. + +```shell +kusion destroy --yes +``` + + + + +![kusion destroy wordpress with aws rds](/img/docs/user_docs/getting-started/destroy-wordpress-cloud-rds-aws.png) + +```mdx-code-block + + +``` + +![kusion destroy wordpress with alicloud rds](/img/docs/user_docs/getting-started/destroy-wordpress-cloud-rds-alicloud.png) + +```mdx-code-block + + diff --git a/docs_versioned_docs/version-v0.13/5-user-guides/1-cloud-resources/2-expose-service.md b/docs_versioned_docs/version-v0.13/5-user-guides/1-cloud-resources/2-expose-service.md new file mode 100644 index 00000000..b3e78d73 --- /dev/null +++ b/docs_versioned_docs/version-v0.13/5-user-guides/1-cloud-resources/2-expose-service.md @@ -0,0 +1,259 @@ +--- +id: expose-service +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Expose Application Service Deployed on CSP Kubernetes + +Deploying applications on the Kubernetes provided by CSP (Cloud Service Provider) is convenient and reliable, which is adopted by many enterprises. Kusion has a good integration with CSP Kubernetes service. You can deploy your application to the Kubernetes cluster, and expose the service in a quite easy way. + +This tutorial demonstrates how to expose service of the application deployed on CSP Kubernetes. And the responsibilities of platform engineers and application developers are also clearly defined. + +## Prerequisites + +Create a Kubernetes cluster provided by CSP, and complete the corresponding configurations for `KUBECONFIG`. The following CSP Kubernetes services are supported: + +- [Amazon Elastic Kubernetes Service (EKS)](https://aws.amazon.com/eks) +- [Alibaba Cloud Container Service for Kubernetes (ACK)](https://www.alibabacloud.com/product/kubernetes) + +## Expose Service Publicly + +If you want the application to be accessed from outside the cluster, you should expose the service publicly. Follow the steps below, you will simply hit the goal. + +### Set up Workspace + +Create the workspace as the target where the application will be deployed to. The workspace is usually set up by **Platform Engineers**, which contains platform-standard and application-agnostic configurations. The workspace configurations are organized through a YAML file. + + + + +```yaml +modules: + network: + path: oci://ghcr.io/kusionstack/network + version: 0.2.0 + configs: + default: + port: + type: aws +``` + +```mdx-code-block + + +``` + +```yaml +modules: + network: + path: oci://ghcr.io/kusionstack/network + version: 0.2.0 + configs: + default: + port: + type: alicloud + annotations: + service.beta.kubernetes.io/alibaba-cloud-loadbalancer-spec: slb.s1.small +``` + +```mdx-code-block + + +``` + +The YAML shown above gives an example of the workspace configuration to expose service on **EKS** and **ACK**. The block `port` contains the workspace configuration of Kusion module `network`, which has the following fields: + +- type: the CSP providing Kubernetes service, support `alicloud` and `aws` +- annotations: annotations attached to the service, should be a map +- labels: labels attached to the service, should be a map + +Then, create the workspace with the configuration file. The following command creates a workspace named `dev` with configuration file `workspace.yaml`. + +```bash +kusion workspace create dev -f workspace.yaml +``` + +After that, we can switch to the `dev` workspace with the following cmd: + +```shell +kusion workspace switch dev +``` + +If you already create and use the configuration of `dev` workspace, you can append the MySQL module configs to your workspace YAML file and use the following command line to update the workspace configuration. + +```shell +kusion workspace update dev -f workspace.yaml +``` + +We can use the following command lines to show the current workspace configurations for `dev` workspace. + +```shell +kusion workspace show +``` + + +### Init Project + +After creating workspace, you should write application configuration code, which only contains simple and application-centric configurations. This step is usually accomplished by application developers. + +We can start by initializing this tutorial project with `kusion init` cmd: + +```shell +# Create a new directory and navigate into it. +mkdir nginx && cd nginx + +# Initialize the demo project with the name of the current directory. +kusion init +``` + +The created project structure looks like below: + +```shell +tree +. +├── dev +│   ├── kcl.mod +│   ├── main.k +│   └── stack.yaml +└── project.yaml + +2 directories, 4 files +``` + +:::info +More details about the directory structure can be found in [Project](../../3-concepts/1-project/1-overview.md) and [Stack](../../3-concepts/2-stack/1-overview.md). +::: + +### Update And Review Configuration Codes + +The initiated configuration codes are for the demo quickstart application, we should replace the `dev/kcl.mod` and `dev/main.k` with the below codes: + +`dev/kcl.mod` +```shell +[package] +name = "nginx" +version = "0.1.0" + +[dependencies] +kam = { git = "https://github.com/KusionStack/kam.git", tag = "0.2.0" } +service = { oci = "oci://ghcr.io/kusionstack/service", tag = "0.1.0" } +network = { oci = "oci://ghcr.io/kusionstack/network", tag = "0.2.0" } + +[profile] +entries = ["main.k"] +``` + +`dev/main.k` +```python +import kam.v1.app_configuration as ac +import service +import service.container as c +import network as n + +# main.k declares customized configurations for dev stacks. +nginx: ac.AppConfiguration { + workload: service.Service { + containers: { + nginx: c.Container { + image: "nginx:1.25.2" + resources: { + "cpu": "500m" + "memory": "512Mi" + } + } + } + replicas: 1 + } + accessories: { + "network": n.Network { + ports: [ + n.Port { + port: 80 + protocol: "TCP" + public: True + } + ] + } + } +} +``` + +The code shown above describes how to expose service publicly. Kusion use schema `Port` to describe the network configuration, the primary fields of Port are as follows: + +- port: port number to expose service +- protocol: protocol to expose service, support `TCP` and `UDP` +- public: whether to public the service + +To public the service, you should set `public` as True. Besides, schema `Service` should be used to describe the workload configuration. + +That's all what an application developer needs to configure! Next, preview and apply the configuration, the application will get deployed and exposed publicly. + +:::info +Kusion uses Load Balancer (LB) provided by the CSP to expose service publicly. For more detailed network configuration, please refer to [Application Networking](https://www.kusionstack.io/docs/kusion/configuration-walkthrough/networking) +::: + +:::info +During the first preview and apply, the models and modules as well as the Terraform CLI (if not exists) that the application depends on will be downloaded, so it may take some time (usually within two minutes). You can take a break and have a cup of coffee. +::: + +### Preview and Apply + +Execute `kusion preview` under the stack path, you will get what will be created in the real infrastructure. The picture below gives the preview result of the example. A Namespace, Service and Deployment will be created, which meets the expectation. The service name has a suffix `public`, which shows it can be accessed publicly. + +![preview-public](/img/docs/user_docs/cloud-resources/expose-service/preview-public.png) + +Then, execute `kusion apply --yes` to do the real deploying job. Just a command and a few minutes, you have accomplished deploying application and expose it publicly. + +![apply-public](/img/docs/user_docs/cloud-resources/expose-service/apply-public.png) + +### Verify Accessibility + +In the example above, the kubernetes Namespace whose name is nginx, and a `Service` and `Deployment` under the Namespace should be created. Use `kubectl get` to check, the Service whose type is `LoadBalancer` and Deployment are created indeed. And the Service has `EXTERNAL-IP` 106.5.190.109, which means it can be accessed from outside the cluster. + +![k8s-resource-public](/img/docs/user_docs/cloud-resources/expose-service/k8s-resource-public.png) + +Visit the `EXTERNAL-IP` via browser, the correct result is returned, which illustrates the servie getting publicly exposed successfully. + +![result-public](/img/docs/user_docs/cloud-resources/expose-service/result-public.png) + +## Expose Service Inside Cluster + +If you only need the application to be accessed inside the cluster, just configure `Public` as `False` in schema `Port`. There is no need to change the workspace, which means an application developer can easily change a service exposure range, without the involvement of platform engineers. + +```python +import kam.v1.app_configuration as ac +import service +import service.container as c +import network as n + +# main.k declares customized configurations for dev stacks. +nginx: ac.AppConfiguration { + workload: service.Service { + ... + } + accessories: { + "network": n.Network { + ports: [ + n.Port { + port: 80 + protocol: "TCP" + public: False + } + ] + } + } +} +``` + +Execute `kusion apply --yes`, the generated Service has suffix `private`. + +![apply-private](/img/docs/user_docs/cloud-resources/expose-service/apply-private.png) + +And the Service type is `ClusterIP`, only has `CLUSTER_IP` and no `EXTERNAL_IP`, which means it cannot get accessed from outside the cluster. + +![k8s-resource-private](/img/docs/user_docs/cloud-resources/expose-service/k8s-resource-private.png) + +## Summary +This tutorial demonstrates how to expose service of the application deployed on the CSP Kubernetes. By platform engineers' setup of workspace, and application developers' configuration of schema `Port` of Kusion module `network`, Kusion enables you expose service simply and efficiently. diff --git a/docs_versioned_docs/version-v0.13/5-user-guides/1-cloud-resources/_category_.json b/docs_versioned_docs/version-v0.13/5-user-guides/1-cloud-resources/_category_.json new file mode 100644 index 00000000..f6f2c380 --- /dev/null +++ b/docs_versioned_docs/version-v0.13/5-user-guides/1-cloud-resources/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Cloud Resources" +} diff --git a/docs_versioned_docs/version-v0.13/5-user-guides/2-working-with-k8s/1-deploy-application.md b/docs_versioned_docs/version-v0.13/5-user-guides/2-working-with-k8s/1-deploy-application.md new file mode 100644 index 00000000..9793e2e7 --- /dev/null +++ b/docs_versioned_docs/version-v0.13/5-user-guides/2-working-with-k8s/1-deploy-application.md @@ -0,0 +1,282 @@ +--- +id: deploy-application +--- + +# Deploy Application + +This guide shows you how to use Kusion CLIs to complete the deployment of an application running in Kubernetes. +We call the abstraction of application operation and maintenance configuration as `AppConfiguration`, and its instance as `Application`. +It is essentially a configuration model that describes an application. The complete definition can be seen [here](../../reference/modules/developer-schemas/app-configuration). + +In production, the application generally includes minimally several k8s resources: + +- Namespace +- Deployment +- Service + +:::tip +This guide requires you to have a basic understanding of Kubernetes. +If you are not familiar with the relevant concepts, please refer to the links below: + +- [Learn Kubernetes Basics](https://kubernetes.io/docs/tutorials/kubernetes-basics/) +- [Namespace](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/) +- [Deployment](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/) +- [Service](https://kubernetes.io/docs/concepts/services-networking/service/) +::: + +## Prerequisites + +Before we start, we need to complete the following steps: + +1、Install Kusion + +We recommend using HomeBrew(Mac), Scoop(Windows), or an installation shell script to download and install Kusion. +See [Download and Install](../../getting-started/install-kusion) for more details. + +2、Running Kubernetes cluster + +There must be a running and accessible Kubernetes cluster and a [kubectl](https://Kubernetes.io/docs/tasks/tools/#kubectl) command line tool. +If you don't have a cluster yet, you can use [Minikube](https://minikube.sigs.k8s.io/docs/tutorials/multi_node/) to start one of your own. + +## Initializing + +This guide is to deploy an app using Kusion, relying on the Kusion CLI and an existing Kubernetes cluster. + +### Initializing workspace configuration + +In version 0.10.0, we have introduced the new concept of [workspaces](../../concepts/workspace), which is a logical layer whose configurations represent an opinionated set of defaults, often appointed by the platform team. In most cases workspaces are represented with an "environment" in traditional SDLC terms. These workspaces provide a means to separate the concerns between the **application developers** who wish to focus on business logic, and a group of **platform engineers** who wish to standardize the applications on the platform. + +Driven by the discipline of Platform Engineering, management of the workspaces, including create/updating/deleting workspaces and their configurations should be done by dedicated platform engineers in a large software organizations to facilitate a more mature and scalable collaboration pattern. + +:::tip +More on the collaboration pattern can be found in the [design doc](https://github.com/KusionStack/kusion/blob/main/docs/design/collaboration/collaboration_paradigm.md). +::: + +However, if that does NOT apply to your scenario, e.g. if you work in a smaller org without platform engineers or if you are an individual developer, we wish Kusion can still be a value tool to have when delivering an application. In this guide, we are NOT distinctively highlighting the different roles or what the best practices entails (the design doc above has all that) but rather the steps needed to get Kusion tool to work. + +As of version 0.11.0, workspace configurations in Kusion can not only be managed on the local filesystem in the form of YAML files, but the remotely-managed workspaces have been supported as well. + +To initialize the workspace configuration: + +```bash +~/playground$ touch ~/dev.yaml +~/playground$ kusion workspace create dev -f ~/dev.yaml +create workspace dev successfully +``` + +To verify the workspace has been created properly: + +``` +~/playground$ kusion workspace list +- default +- dev +~/playground$ kusion workspace show dev +{} +``` + +Note that `show` command tells us the workspace configuration is currently empty, which is expected because we created the `dev` workspace with an empty YAML file. An empty workspace configuration will suffice in some cases, where no platform configurations are needed. + +Kusion by default uses the `default` workspace, thus we need to switch to the `dev` workspace we have just created. + +```bash +~/playground$ kusion workspace switch dev +``` + +We will progressively add more workspace configurations throughout this user guide. + +### Initializing application configuration + +Now that workspaces are properly initialized, we can begin by initializing the application configuration: + +```bash +# Create a new directory and navigate into it. +mkdir simple-service && cd simple-service + +# Initialize the demo project with the name of the current directory. +kusion init +``` + +The directory structure is as follows: + +```shell +simple-service/ +. +├── dev +│   ├── kcl.mod +│   ├── main.k +│   └── stack.yaml +└── project.yaml + +2 directories, 4 files +``` + +The project directory has the following files that are automatically generated: +- `project.yaml` represents project-level configurations. +- `dev` directory stores the customized stack configuration: + - `dev/main.k` stores configurations in the `dev` stack. + - `dev/stack.yaml` stores stack-level configurations. + - `dev/kcl.mod` stores stack-level dependencies. + +In general, the `.k` files are the KCL source code that represents the application configuration, and the `.yaml` is the static configuration file that describes behavior at the project or stack level. + +:::info +See [Project](../../concepts/project/overview) and [Stack](../../concepts/stack/overview) for more details about Project and Stack. +::: + +The `kusion init` command will create a demo quickstart application, we may update the `dev/kcl.mod` and `dev/main.k` later. + +#### kcl.mod +There should be a `kcl.mod` file generated automatically under the project directory. The `kcl.mod` file describes the dependency for the current project or stack. By default, it should contain a reference to the official [`kam` repository](https://github.com/KusionStack/kam) which holds the Kusion `AppConfiguration` and related workload model definitions that fits best practices. You can also create your own models library and reference that. + +You can change the package name in `kcl.mod` to `simple-service`: + +`dev/kcl.mod` +```shell +[package] +name = "simple-service" +version = "0.1.0" + +[dependencies] +kam = { git = "https://github.com/KusionStack/kam.git", tag = "0.2.0" } +service = { oci = "oci://ghcr.io/kusionstack/service", tag = "0.1.0" } +network = { oci = "oci://ghcr.io/kusionstack/network", tag = "0.2.0" } + +[profile] +entries = ["main.k"] +``` + +#### main.k +The configuration file `main.k`, usually written by the application developers, declare customized configurations for a specific stack, including an `Application` instance of `AppConfiguration` model. + +You can update the `main.k` as follows: + +```python +import kam.v1.app_configuration as ac +import service +import service.container as c +import network as n + +helloworld: ac.AppConfiguration { + workload: service.Service { + containers: { + "helloworld": c.Container { + image = "kusionstack/kusion-quickstart:latest" + } + } + replicas: 2 + } + accessories: { + "network": n.Network { + ports: [ + n.Port { + port: 80 + } + ] + } + } +} +``` + +## Previewing + +At this point, the project has been completely initialized. +The configuration is written in KCL, not JSON/YAML which Kubernetes recognizes, so it needs to be built to get the final output. And we can use the `kusion preview` cmd to preview the Kubernetes resources intended to deliver. + +Enter stack dir `simple-service/dev` and preview: + +```bash +cd simple-service/dev && kusion preview +``` + +:::tip +For instructions on the kusion command line tool, execute `kusion -h`, or refer to the tool's online [documentation](../../reference/commands). +::: + +## Applying + +Preview is now completed. We can apply the configuration as the next step. In the output from `kusion preview`, you can see 3 resources: + +- a Namespace named `simple-service` +- a Deployment named `simple-service-dev-helloworld` in the `simple-service` namespace +- a Service named `simple-service-dev-helloworld-private` in the `simple-service` namespace + +Execute command: + +```bash +kusion apply +``` + +The output is similar to: + +``` + ✔︎ Generating Spec in the Stack dev... +Stack: dev ID Action +* ├─ v1:Namespace:simple-service Create +* ├─ v1:Service:simple-service:simple-service-dev-helloworld-private Create +* └─ apps/v1:Deployment:simple-service:simple-service-dev-helloworld Create + + +? Do you want to apply these diffs? yes +Start applying diffs ... + SUCCESS Create v1:Namespace:simple-service success + SUCCESS Create v1:Service:simple-service:simple-service-dev-helloworld-private success + SUCCESS Create apps/v1:Deployment:simple-service:simple-service-dev-helloworld success +Create apps/v1:Deployment:simple-service:simple-service-dev-helloworld success [3/3] ███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████ 100% | 0s +Apply complete! Resources: 3 created, 0 updated, 0 deleted. +``` + +After the configuration applying successfully, you can use the `kubectl` to check the actual status of these resources. + +1、 Check Namespace + +```bash +kubectl get ns +``` + +The output is similar to: + +``` +NAME STATUS AGE +default Active 117d +simple-service Active 38s +kube-system Active 117d +... +``` + +2、Check Deployment + +```bash +kubectl get deploy -n simple-service +``` + +The output is similar to: + +``` +NAME READY UP-TO-DATE AVAILABLE AGE +simple-service-dev-helloworld 1/1 1 1 59s +``` + +3、Check Service + +```bash +kubectl get svc -n simple-service +``` + +The output is similar to: + +``` +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +simple-service-dev-helloworld-private ClusterIP 10.98.89.104 80/TCP 79s +``` + +4、Validate app + +Using the `kubectl` tool, forward native port `30000` to the service port `80`. + +```bash +kubectl port-forward svc/simple-service-dev-helloworld-private -n simple-service 30000:80 +``` + +Open browser and visit [http://127.0.0.1:30000](http://127.0.0.1:30000): + +![app-preview](/img/docs/user_docs/guides/working-with-k8s/app-preview.png) diff --git a/docs_versioned_docs/version-v0.13/5-user-guides/2-working-with-k8s/10-customize-health-policy.md b/docs_versioned_docs/version-v0.13/5-user-guides/2-working-with-k8s/10-customize-health-policy.md new file mode 100644 index 00000000..8261953e --- /dev/null +++ b/docs_versioned_docs/version-v0.13/5-user-guides/2-working-with-k8s/10-customize-health-policy.md @@ -0,0 +1,194 @@ +--- +id: health-policy +--- + +# Customized Health Check with KCL + +Kusion now offers advanced customized health checks leveraging the power of `KCL`. This robust feature empowers users to define complex and tailored health policies for their Kubernetes resources. By implementing these custom policies, you can ensure that your resources not only meet specific criteria but also satisfy complex conditions before being deemed healthy. This capability is particularly valuable when assessing the health status of Kubernetes Custom Resources (CRs), providing a flexible and precise mechanism to validate the state of your entire `project`. + +## Prerequisites + +Please refer to the [prerequisites](deploy-application#prerequisites) in the guide for deploying an application. + +The example below also requires you to have [initialized the project](deploy-application#initializing) using the `kusion workspace create` and `kusion init` command, which will create a workspace and also generate a [`kcl.mod` file](deploy-application#kclmod) under the stack directory. + +## Defining a Health Policy + +You can define a health policy in the `Workspace` configuration via the `healthPolicy` field. The `healthPolicy` should contain a KCL script that defines the health check logic and can be used to assert healthy conditions on your `Kubernetes` resources. + +Firstly, you need to initialize the workspace configuration: + +```shell +~/$ touch ~/dev.yaml +~/$ kusion workspace create dev -f ~/dev.yaml +create workspace dev successfully +``` + +### Example Health Policy + +Here is an example of how to define a health policy for a Kubernetes Deployment. This policy checks multiple aspects of the Deployment's health status. Update ~/dev.yaml with this example: + +```yaml +modules: + service: + configs: + default: + healthPolicy: + health.kcl: | + assert res.metadata.generation == res.status.observedGeneration + assert res.status.replicas == 1 +``` + +In this example, the custom health check ensures that: + +1. The Deployment has 1 replicas +2. The observed generation matches the current generation (indicating that the latest changes have been processed) + +:::note +`res` represents the Kubernetes resource being evaluated. It's a fixed expression in this feature that provides access to all fields and properties of the resource. You can use dot notation (e.g., `res.metadata.name`) to access nested fields within the resource. This allows you to create complex health checks based on various aspects of your Kubernetes resources. +::: + +## How It Works + +When you apply your configuration, `kusion` will patch the provided `KCL` script into the `extension` field of the specified resource in the `Spec` and use the provided KCL script to evaluate the health of this resource. The health check will be performed repeatedly until it passes or a timeout is reached. + +The KCL script has access to the full Kubernetes resource object through the `res` variable. You can use any fields or properties of the resource in your health check logic. + +Besides configuring the workspace, platform engineers can also utilize the useful `PatchHealthPolicyToExtension` function in SDK to perform this feature while constructing the `module`. This function allows for a more programmatic and flexible approach to applying health policies while it's multiple resources case for a module. + +Here's a code snippet of how to use the `PatchHealthPolicyToExtension` function: + +```golang +// Generate Kusion resource ID and wrap the Kubernetes Service into Kusion resource +// with the SDK provided by kusion module framework. +resourceID := module.KubernetesResourceID(svc.TypeMeta, svc.ObjectMeta) +resource, err := module.WrapK8sResourceToKusionResource(resourceID, svc) +if err != nil { + return nil, err +} +module.PatchHealthPolicyToExtension(resource, "assert res.metadata.generation == res.status.observedGeneration") +``` + +## Applying the Health Policy + +To apply the health policy, update your workspace configuration: + +```shell +~/$ kusion workspace update dev -f ~/dev.yaml +update workspace dev successfully +``` + +After updating the workspace configuration, apply your new configuration with the customized health check with the following commands: + +```shell +~/$ cd quickstart/default +~/quickstart/default/$ kusion apply + ✔︎ Generating Spec in the Stack default... + +Stack: default +ID Action +v1:Namespace:quickstart Create +v1:Service:quickstart:quickstart-default-quickstart-private Create +apps/v1:Deployment:quickstart:quickstart-default-quickstart Create + + +Do you want to apply these diffs?: + > yes + +Start applying diffs ... + ✔︎ Succeeded v1:Namespace:quickstart + ⣽ Creating v1:Service:quickstart:quickstart-default-quickstart-private (0s) + ✔︎ Succeeded v1:Namespace:quickstart + ⢿ Creating v1:Service:quickstart:quickstart-default-quickstart-private (0s) + ⢿ Creating apps/v1:Deployment:quickstart:quickstart-default-quickstart (0s) + ...... + ✔︎ Succeeded v1:Namespace:quickstart + ✔︎ Succeeded v1:Service:quickstart:quickstart-default-quickstart-private + ✔︎ Succeeded apps/v1:Deployment:quickstart:quickstart-default-quickstart +Apply complete! Resources: 3 created, 0 updated, 0 deleted. + +[v1:Namespace:quickstart] +Type Kind Name Detail +READY Namespace quickstart Phase: Active +READY ServiceAccount default Secrets: 0, Age: 0s +[v1:Service:quickstart:quickstart-default-quickstart-private] +Type Kind Name Detail +READY Service quickstart-default-quickstart-private Type: ClusterIP, InternalIP: 10.96.196.38, ExternalIP: , Port(s): 8080/TCP +READY EndpointSlice quickstart-default-quickstart-private-v42zc AddressType: IPv4, Ports: 8080, Endpoints: 10.244.1.99 +[apps/v1:Deployment:quickstart:quickstart-default-quickstart] +Type Kind Name Detail +READY Deployment quickstart-default-quickstart Reconciled +READY ReplicaSet quickstart-default-quickstart-67459cd68d Desired: 1, Current: 1, Ready: 1 +READY Pod quickstart-default-quickstart-67459cd68d-jqtt7 Ready: 1/1, Status: Running, Restart: 0, Age: 4s +``` + +## Explanation + +The `Detail` column for the Deployment `quickstart-default-quickstart` provides crucial information about the resource's reconciliation status: + +- If it shows "Reconciled", it means the resource has successfully met the conditions defined in the health policy. + +```shell +Type Kind Name Detail +READY Deployment quickstart-default-quickstart Reconciled +``` + +- If it displays "Reconciling...", it indicates that the resource is still in the process of reaching the desired state as per the health policy. + +```shell +Type Kind Name Detail +MODIFIED Deployment quickstart-default-quickstart Reconciling... +``` + +- In case of any errors or unsupported configurations, appropriate messages will be shown, and customized health check will be skipped. + +```shell +Type Kind Name Detail +READY Deployment quickstart-default-quickstart health policy err: invalid syntax error, skip +``` + +This `Detail` helps you quickly assess whether your Kubernetes resources have reached their intended state after applying changes. It's an essential feedback mechanism for ensuring the reliability and correctness of your deployments. + +:::note +`Detail` showing as `Unsupported kind, skip` indicates that the health policy is not configured for this resource's health check. This can occur due to several reasons: + +1. There's a mismatch between the resource kind in your Kubernetes manifests and the kinds specified in your health policy. +2. The health policy in your workspace configuration might be missing or incorrectly defined for this particular resource. +3. You might forgot to updated your workspace with the new configuration. + +To resolve this: + +1. Review your workspace configuration to ensure the health policy is correctly defined for all intended resource kinds. +2. Check that the resource kind in your Kubernetes manifests matches the kinds specified in your health policy. + +If the issue persists, you may need to update your workspace configuration or adjust your health policy to include the specific resource kind. +::: + +## Best Practices + +- Keep your health check logic simple and focused on key indicators of health for your specific resource. +- Use assertions to clearly define what conditions must be true for the resource to be considered healthy. +- Consider both the desired state (e.g., number of replicas) and the current state (e.g., available replicas) in your health checks. +- For complex resources, you may want to check multiple conditions to ensure full health and readiness. + +## Limitations + +- The customized health check feature is currently only available for Kubernetes resources. +- The KCL script must complete execution within a reasonable time to avoid timeouts during the apply process. +- Errors in the KCL script syntax will cause the health check to be skipped, so be sure to test your scripts thoroughly. + +## Validation + +To verify the health policy, you can check the status of your Kubernetes resources: + +```bash +kubectl get -n quickstart deployment quickstart-default-quickstart -o yaml +``` + +Ensure that the resource meets the conditions defined in your health policy. + +## Conclusion + +Customized health checks provides a powerful way to ensure your Kubernetes resources are in the desired state before considering an `apply` operation complete. By defining health policies, you can automate the validation of your resources and ensure they meet specific criteria before being considered healthy. By leveraging KCL, you can create sophisticated health check logic tailored to your specific `project` needs. + +For more details on KCL and its syntax, refer to the [KCL documentation](../../4-configuration-walkthrough/2-kcl-basics.md). diff --git a/docs_versioned_docs/version-v0.13/5-user-guides/2-working-with-k8s/2-container.md b/docs_versioned_docs/version-v0.13/5-user-guides/2-working-with-k8s/2-container.md new file mode 100644 index 00000000..eb51ec5e --- /dev/null +++ b/docs_versioned_docs/version-v0.13/5-user-guides/2-working-with-k8s/2-container.md @@ -0,0 +1,146 @@ +--- +id: container +--- + +# Configure Containers + +You can manage container-level configurations in the `AppConfiguration` model via the `containers` field (under the `workload` schema). By default, everything defined in the `containers` field will be treated as application containers. Sidecar containers will be supported in a future version of kusion. + +For the full `Container` schema reference, please see [here](../../reference/modules/developer-schemas/workload/service#schema-container) for more details. + +## Pre-requisite + +Please refer to the [prerequisites](deploy-application#prerequisites) in the guide for deploying an application. + +The example below also requires you to have [initialized the project](deploy-application#initializing) using the `kusion workspace create` and `kusion init` command, which will create a workspace and also generate a [`kcl.mod` file](deploy-application#kclmod) under the stack directory. + +## Managing Workspace Configuration + +In the last guide, we introduced a step to [initialize a workspace](deploy-application#initializing-workspace-configuration) with an empty configuration. The same empty configuration will still work in this guide, no changes are required there. + +However, if you (or the platform team) would like to set default values for the workloads to standardize the behavior of applications in the `dev` workspace, you can do so by updating the `~/dev.yaml`: + +```yaml +modules: + service: + default: + replicas: 3 + labels: + label-key: label-value + annotations: + annotation-key: annotation-value + type: CollaSet +``` + +Please note that the `replicas` in the workspace configuration only works as a default value and will be overridden by the value set in the application configuration. + +The workspace configuration need to be updated with the command: + +```bash +kusion workspace update dev -f ~/dev.yaml +``` + +For a full reference of what can be configured in the workspace level, please see the [workspace reference](../../reference/modules/workspace-configs/workload/service). + +## Example + +`simple-service/dev/main.k`: +```python +import kam.v1.app_configuration as ac +import service +import service.container as c +import network as n + +"helloworld": ac.AppConfiguration { + workload: service.Service { + containers: { + "helloworld": c.Container { + image = "gcr.io/google-samples/gb-frontend:v4" + env: { + "env1": "VALUE" + "env2": "VALUE2" + } + resources: { + "cpu": "500m" + "memory": "512Mi" + } + # Configure an HTTP readiness probe + readinessProbe: p.Probe { + probeHandler: p.Http { + url: "http://localhost:80" + } + initialDelaySeconds: 10 + } + } + } + replicas: 2 + } + accessories: { + "network": n.Network { + ports: [ + n.Port { + port: 80 + } + ] + } + } +} +``` + +## Apply + +Re-run steps in [Applying](deploy-application#applying), new container configuration can be applied. + +``` +$ kusion apply + ✔︎ Generating Spec in the Stack dev... +Stack: dev ID Action +* ├─ v1:Namespace:simple-service UnChanged +* ├─ v1:Service:simple-service:simple-service-dev-helloworld-private UnChanged +* └─ apps/v1:Deployment:simple-service:simple-service-dev-helloworld Update + + +? Do you want to apply these diffs? yes +Start applying diffs ... + SUCCESS UnChanged v1:Namespace:simple-service, skip + SUCCESS UnChanged v1:Service:simple-service:simple-service-dev-helloworld-private, skip + SUCCESS Update apps/v1:Deployment:simple-service:simple-service-dev-helloworld success +Update apps/v1:Deployment:simple-service:simple-service-dev-helloworld success [3/3] ███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████ 100% | 0s +Apply complete! Resources: 0 created, 1 updated, 0 deleted. +``` + +## Validation + +We can verify the container (in the deployment template) now has the updated attributes as defined in the container configuration: +``` +$ kubectl get deployment -n simple-service -o yaml +... + template: + ... + spec: + containers: + - env: + - name: env1 + value: VALUE + - name: env2 + value: VALUE2 + image: gcr.io/google-samples/gb-frontend:v4 + imagePullPolicy: IfNotPresent + name: helloworld + readinessProbe: + failureThreshold: 3 + httpGet: + host: localhost + path: / + port: 80 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + resources: + limits: + cpu: 500m + memory: 512M +... +``` \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.13/5-user-guides/2-working-with-k8s/3-service.md b/docs_versioned_docs/version-v0.13/5-user-guides/2-working-with-k8s/3-service.md new file mode 100644 index 00000000..f795430c --- /dev/null +++ b/docs_versioned_docs/version-v0.13/5-user-guides/2-working-with-k8s/3-service.md @@ -0,0 +1,139 @@ +--- +id: service +--- + +# Expose Service + +You can determine how to expose your service in the `AppConfiguration` model via the `ports` field (under the `network` accessory). The `ports` field defines a list of all the `Port`s you want to expose for the application (and their corresponding listening ports on the container, if they don't match the service ports), so that it can be consumed by other applications. + +Unless explicitly defined, each of the ports exposed is by default exposed privately as a `ClusterIP` type service. You can expose a port publicly by specifying the `public` field in the `Port` schema. At the moment, the implementation for publicly access is done via Load Balancer type service backed by cloud providers. Ingress will be supported in a future version of kusion. + +For the `Port` schema reference, please see [here](../../reference/modules/developer-schemas/workload/service#schema-port) for more details. + +## Prerequisites + +Please refer to the [prerequisites](deploy-application#prerequisites) in the guide for deploying an application. + +The example below also requires you to have [initialized the project](deploy-application#initializing) using the `kusion workspace create` and `kusion init` command, which will create a workspace and also generate a [`kcl.mod` file](deploy-application#kclmod) under the stack directory. + +## Managing Workspace Configuration + +In the first guide in this series, we introduced a step to [initialize a workspace](deploy-application#initializing-workspace-configuration) with an empty configuration. The same empty configuration will still work in this guide, no changes are required there. + +However, if you (or the platform team) would like to set default values for the services to standardize the behavior of applications in the `dev` workspace, you can do so by updating the `~/dev.yaml`: + +```yaml +modules: + kusionstack/network@0.1.0: + default: + port: + type: alicloud + labels: + kusionstack.io/control: "true" + annotations: + service.beta.kubernetes.io/alibaba-cloud-loadbalancer-spec: slb.s1.small +``` + +The workspace configuration need to be updated with the command: + +```bash +kusion workspace update dev -f ~/dev.yaml +``` + +For a full reference of what can be configured in the workspace level, please see the [workspace reference](../../reference/modules/workspace-configs/networking/network). + +## Example + +`simple-service/dev/main.k`: +```python +import kam.v1.app_configuration as ac +import service +import service.container as c +import network as n + +"helloworld": ac.AppConfiguration { + workload: service.Service { + containers: { + "helloworld": c.Container { + image = "gcr.io/google-samples/gb-frontend:v4" + env: { + "env1": "VALUE" + "env2": "VALUE2" + } + resources: { + "cpu": "500m" + "memory": "512Mi" + } + # Configure an HTTP readiness probe + readinessProbe: p.Probe { + probeHandler: p.Http { + url: "http://localhost:80" + } + initialDelaySeconds: 10 + } + } + } + replicas: 2 + } + accessories: { + "network": n.Network { + ports: [ + n.Port { + port: 8080 + targetPort: 80 + } + ] + } + } +} +``` + +The code above changes the service port to expose from `80` in the last guide to `8080`, but still targeting the container port `80` because that's what the application is listening on. + +## Applying + +Re-run steps in [Applying](deploy-application#applying), new service configuration can be applied. + +``` +$ kusion apply + ✔︎ Generating Spec in the Stack dev... +Stack: dev ID Action +* ├─ v1:Namespace:simple-service UnChanged +* ├─ v1:Service:simple-service:simple-service-dev-helloworld-private Update +* └─ apps/v1:Deployment:simple-service:simple-service-dev-helloworld UnChanged + + +? Do you want to apply these diffs? yes +Start applying diffs ... + SUCCESS UnChanged v1:Namespace:simple-service, skip + SUCCESS Update v1:Service:simple-service:simple-service-dev-helloworld-private success + SUCCESS UnChanged apps/v1:Deployment:simple-service:simple-service-dev-helloworld, skip +UnChanged apps/v1:Deployment:simple-service:simple-service-dev-helloworld, skip [3/3] ██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████ 100% | 0s +Apply complete! Resources: 0 created, 1 updated, 0 deleted. +``` + +## Validation + +We can verify the Kubernetes service now has the updated attributes (mapping service port 8080 to container port 80) as defined in the `ports` configuration: + +``` +kubectl get svc -n simple-service -o yaml +... + spec: + ... + ports: + - name: simple-service-dev-helloworld-private-8080-tcp + port: 8080 + protocol: TCP + targetPort: 80 +... +``` + +Exposing service port 8080: +``` +kubectl port-forward svc/simple-service-dev-helloworld-private -n simple-service 30000:8080 +``` + +Open browser and visit [http://127.0.0.1:30000](http://127.0.0.1:30000), the application should be up and running: + +![app-preview](/img/docs/user_docs/guides/working-with-k8s/app-preview.png) \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.13/5-user-guides/2-working-with-k8s/4-image-upgrade.md b/docs_versioned_docs/version-v0.13/5-user-guides/2-working-with-k8s/4-image-upgrade.md new file mode 100644 index 00000000..ccee54e0 --- /dev/null +++ b/docs_versioned_docs/version-v0.13/5-user-guides/2-working-with-k8s/4-image-upgrade.md @@ -0,0 +1,78 @@ +--- +id: image-upgrade +--- + +# Upgrade Image + +You can declare the application's container image via `image` field of the `Container` schema. + +For the full `Container` schema reference, please see [here](../../reference/modules/developer-schemas/workload/service#schema-container) for more details. + +## Pre-requisite + +Please refer to the [prerequisites](deploy-application#prerequisites) in the guide for deploying an application. + +The example below also requires you to have [initialized the project](deploy-application#initializing) using the `kusion workspace create` and `kusion init` command, which will create a workspace and also generate a [`kcl.mod` file](deploy-application#kclmod) under the stack directory. + +## Managing Workspace Configuration + +In the first guide in this series, we introduced a step to [initialize a workspace](deploy-application#initializing-workspace-configuration) with an empty configuration. The same empty configuration will still work in this guide, no changes are required there. + +## Example + +Update the image value in `simple-service/dev/main.k`: +```python +import kam.v1.app_configuration as ac + +helloworld: ac.AppConfiguration { + workload.containers.nginx: { + ... + # before: + # image = "gcr.io/google-samples/gb-frontend:v4" + # after: + image = "gcr.io/google-samples/gb-frontend:v5" + ... + } +} +``` + +Everything else in `main.k` stay the same. + +## Applying + +Re-run steps in [Applying](deploy-application#applying), update image is completed. + +``` +$ kusion apply + ✔︎ Generating Spec in the Stack dev... +Stack: dev ID Action +* ├─ v1:Namespace:simple-service UnChanged +* ├─ v1:Service:simple-service:simple-service-dev-helloworld-private UnChanged +* └─ apps/v1:Deployment:simple-service:simple-service-dev-helloworld Update + + +? Do you want to apply these diffs? yes +Start applying diffs ... + SUCCESS UnChanged v1:Namespace:simple-service, skip + SUCCESS UnChanged v1:Service:simple-service:simple-service-dev-helloworld-private, skip + SUCCESS Update apps/v1:Deployment:simple-service:simple-service-dev-helloworld success +Update apps/v1:Deployment:simple-service:simple-service-dev-helloworld success [3/3] ███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████ 100% | 0s +Apply complete! Resources: 0 created, 1 updated, 0 deleted. +``` + +## Validation + +We can verify the application container (in the deployment template) now has the updated image (v5) as defined in the container configuration: +``` +kubectl get deployment -n simple-service -o yaml +... + template: + ... + spec: + containers: + - env: + ... + image: gcr.io/google-samples/gb-frontend:v5 + ... +... +``` \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.13/5-user-guides/2-working-with-k8s/5-resource-spec.md b/docs_versioned_docs/version-v0.13/5-user-guides/2-working-with-k8s/5-resource-spec.md new file mode 100644 index 00000000..1e5f208a --- /dev/null +++ b/docs_versioned_docs/version-v0.13/5-user-guides/2-working-with-k8s/5-resource-spec.md @@ -0,0 +1,90 @@ +--- +id: resource-spec +--- + +# Configure Resource Specification + +You can manage container-level resource specification in the `AppConfiguration` model via the `resources` field (under the `Container` schema). + +For the full `Container` schema reference, please see [here](../../reference/modules/developer-schemas/workload/service#schema-container) for more details. + +## Prerequisites + +Please refer to the [prerequisites](deploy-application#prerequisites) in the guide for deploying an application. + +The example below also requires you to have [initialized the project](deploy-application#initializing) using the `kusion workspace create` and `kusion init` command, which will create a workspace and also generate a [`kcl.mod` file](deploy-application#kclmod) under the stack directory. + +## Managing Workspace Configuration + +In the first guide in this series, we introduced a step to [initialize a workspace](deploy-application#initializing-workspace-configuration) with an empty configuration. The same empty configuration will still work in this guide, no changes are required there. + +## Example + +Update the resources value in `simple-service/dev/main.k`: + +```py +import kam.v1.app_configuration as ac + +helloworld: ac.AppConfiguration { + workload.containers.helloworld: { + ... + # before: + # resources: { + # "cpu": "500m" + # "memory": "512M" + # } + # after: + resources: { + "cpu": "250m" + "memory": "256Mi" + } + ... + } +} +``` + +Everything else in `main.k` stay the same. + +## Applying + +Re-run steps in [Applying](deploy-application#applying), resource scaling is completed. + +``` +$ kusion apply + ✔︎ Generating Spec in the Stack dev... +Stack: dev ID Action +* ├─ v1:Namespace:simple-service UnChanged +* ├─ v1:Service:simple-service:simple-service-dev-helloworld-private UnChanged +* └─ apps/v1:Deployment:simple-service:simple-service-dev-helloworld Update + + +? Do you want to apply these diffs? yes +Start applying diffs ... + SUCCESS UnChanged v1:Namespace:simple-service, skip + SUCCESS UnChanged v1:Service:simple-service:simple-service-dev-helloworld-private, skip + SUCCESS Update apps/v1:Deployment:simple-service:simple-service-dev-helloworld success +Update apps/v1:Deployment:simple-service:simple-service-dev-helloworld success [3/3] ███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████ 100% | 0s +Apply complete! Resources: 0 created, 1 updated, 0 deleted. +``` + +## Validation + +We can verify the application container (in the deployment template) now has the updated resources attributes (cpu:250m, memory:256Mi) as defined in the container configuration: + +``` +kubectl get deployment -n simple-service -o yaml +... + template: + ... + spec: + containers: + - env: + ... + image: gcr.io/google-samples/gb-frontend:v5 + ... + resources: + limits: + cpu: 250m + memory: 256Mi +... +``` \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.13/5-user-guides/2-working-with-k8s/6-set-up-operational-rules.md b/docs_versioned_docs/version-v0.13/5-user-guides/2-working-with-k8s/6-set-up-operational-rules.md new file mode 100644 index 00000000..915f84c4 --- /dev/null +++ b/docs_versioned_docs/version-v0.13/5-user-guides/2-working-with-k8s/6-set-up-operational-rules.md @@ -0,0 +1,86 @@ +--- +id: set-up-operational-rules +--- + +# Set up Operational Rules + +You can set up operational rules in the `AppConfiguration` model with the `opsrule` accessory and corresponding platform configurations in the workspace directory. The `opsrule` is the collection of operational rule requirements for the application that are used as a preemptive measure to police and stop any unwanted changes. + +## Prerequisites + +Please refer to the [prerequisites](deploy-application#prerequisites) in the guide for deploying an application. + +The example below also requires you to have [initialized the project](deploy-application#initializing) using the `kusion workspace create` and `kusion init` command, which will create a workspace and also generate a [`kcl.mod` file](deploy-application#kclmod) under the stack directory. + +## Managing Workspace Configuration + +In the first guide in this series, we introduced a step to [initialize a workspace](deploy-application#initializing-workspace-configuration) with an empty configuration. The same empty configuration will still work in this guide, no changes are required there. + +However, if you (or the platform team) would like to set default values for the opsrule to standardize the behavior of applications, you can do so by updating the `~/dev.yaml`. +Note that the platform engineers should set the default workload to [Kusion Operation CollaSet](https://github.com/KusionStack/operating) and installed the Kusion Operation controllers properly, the `opsrules` module will generate a [PodTransitionRule](https://www.kusionstack.io/docs/operating/manuals/podtransitionrule) instead of updating the `maxUnavailable` value in the deployment: + +```yaml +modules: + service: + default: + type: CollaSet + kusionstack/opsrule@0.1.0: + default: + maxUnavailable: 30% +``` + +Please note that the `maxUnavailable` in the workspace configuration only works as a default value and will be overridden by the value set in the application configuration. + +The workspace configuration need to be updated with the command: + +```bash +kusion workspace update dev -f ~/dev.yaml +``` + +## Example + +Add the `opsrule` module dependency to `kcl.mod`: + +```shell +[package] +name = "simple-service" +version = "0.1.0" + +[dependencies] +kam = { git = "https://github.com/KusionStack/kam.git", tag = "0.2.0" } +service = { oci = "oci://ghcr.io/kusionstack/service", tag = "0.1.0" } +network = { oci = "oci://ghcr.io/kusionstack/network", tag = "0.2.0" } +opsrule = { oci = "oci://ghcr.io/kusionstack/opsrule", tag = "0.1.0" } + +[profile] +entries = ["main.k"] +``` + +Add the `opsrule` snippet to the `AppConfiguration` in `simple-service/dev/main.k`: + +```py +import kam.v1.app_configuration as ac +import service +import service.container as c +import opsrule + +helloworld: ac.AppConfiguration { + workload: service.Service { + ... + } + # Configure the maxUnavailable rule + accessories: { + "opsrule": opsrule.OpsRule { + "maxUnavailable": 50% + } + } +} +``` + +## Applying + +Re-run steps in [Applying](deploy-application#applying), resource scaling is completed. + +## Validation + +We can verify the application deployment strategy now has the updated attributes `maxUnavailable: 50%` in the container configuration. diff --git a/docs_versioned_docs/version-v0.13/5-user-guides/2-working-with-k8s/7-job.md b/docs_versioned_docs/version-v0.13/5-user-guides/2-working-with-k8s/7-job.md new file mode 100644 index 00000000..29a4466d --- /dev/null +++ b/docs_versioned_docs/version-v0.13/5-user-guides/2-working-with-k8s/7-job.md @@ -0,0 +1,146 @@ +--- +id: job +--- + +# Schedule a Job + +The guides above provide examples on how to configure workloads of the type `service.Service`, which is typically used for long-running web applications that should **never** go down. Alternatively, you could also schedule another kind of workload profile, namely `wl.Job` which corresponds to a one-off or recurring execution of tasks that run to completion and then stop. + +## Prerequisites + +Please refer to the [prerequisites](deploy-application#prerequisites) in the guide for scheduling a job. + +The example below also requires you to have [initialized the project](deploy-application#initializing) using the `kusion workspace create` and `kusion init` command, which will create a workspace and also generate a [`kcl.mod` file](deploy-application#kclmod) under the stack directory. + +## Managing Workspace Configuration + +In the first guide in this series, we introduced a step to [initialize a workspace](deploy-application#initializing-workspace-configuration) with an empty configuration. The same empty configuration will still work in this guide, no changes are required there. Alternatively, if you have updated your workspace config in the previous guides, no changes need to be made either. + +However, if you (or the platform team) would like to set default values for the workloads to standardize the behavior of applications in the `dev` workspace, you can do so by updating the `~/dev.yaml`: + +```yaml +modules: + service: + default: + replicas: 3 + labels: + label-key: label-value + annotations: + annotation-key: annotation-value +``` + +Please note that the `replicas` in the workspace configuration only works as a default value and will be overridden by the value set in the application configuration. + +The workspace configuration need to be updated with the command: + +```bash +kusion workspace update dev -f ~/dev.yaml +``` + +For a full reference of what can be configured in the workspace level, please see the [workspace reference](../../reference/modules/workspace-configs/workload/job). + +## Example + +To schedule a job with cron expression, update `simple-service/dev/kcl.mod` and `simple-service/dev/main.k` to the following: + +`simple-service/dev/kcl.mod`: +```py +[package] +name = "simple-service" +version = "0.1.0" + +[dependencies] +kam = { git = "https://github.com/KusionStack/kam.git", tag = "0.2.0" } +job = { oci = "oci://ghcr.io/kusionstack/job", tag = "0.1.0" } +network = { oci = "oci://ghcr.io/kusionstack/network", tag = "0.2.0" } + +[profile] +entries = ["main.k"] +``` + +`simple-service/dev/main.k`: +```py +import kam.v1.app_configuration as ac +import job +import job.container as c + +helloworld: ac.AppConfiguration { + workload: job.Job { + containers: { + "busybox": c.Container { + # The target image + image: "busybox:1.28" + # Run the following command as defined + command: ["/bin/sh", "-c", "echo hello"] + } + } + # Run every minute. + schedule: "* * * * *" + } +} +``` + +The KCL snippet above schedules a job. Alternatively, if you want a one-time job without cron, simply remove the `schedule` from the configuration. + +You can find the full example in here in the [konfig repo](https://github.com/KusionStack/konfig/tree/main/example/simple-job). + +## Applying + +Re-run steps in [Applying](deploy-application#applying) and schedule the job. Your output might look like one of the following: + +If you are starting from scratch, all resources are created on the spot: + +``` +$ kusion apply + ✔︎ Generating Spec in the Stack dev... +Stack: dev ID Action +* ├─ v1:Namespace:simple-service Create +* └─ batch/v1:CronJob:simple-service:simple-service-dev-helloworld Create + + +? Do you want to apply these diffs? yes +Start applying diffs ... + SUCCESS Create v1:Namespace:simple-service success + SUCCESS Create batch/v1:CronJob:simple-service:helloworld-dev-helloworld success +Create batch/v1:CronJob:simple-service:simple-service-dev-helloworld success [2/2] ██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████ 100% | 0s +Apply complete! Resources: 2 created, 0 updated, 0 deleted. +``` + +If you are starting from the last guide which configures an `opsrule`, the output looks like the following which destroys the `Deployment` and `Service` and replace it with a `CronJob`: + +``` +$ kusion apply + ✔︎ Generating Spec in the Stack dev... +Stack: dev ID Action +* ├─ v1:Namespace:simple-service UnChanged +* ├─ batch/v1:CronJob:simple-service:simple-service-dev-helloworld Create +* ├─ apps/v1:Deployment:simple-service:simple-service-dev-helloworld Delete +* └─ v1:Service:simple-service:simple-service-dev-helloworld-private Delete + + +? Do you want to apply these diffs? yes +Start applying diffs ... + SUCCESS UnChanged v1:Namespace:simple-service, skip + SUCCESS Delete apps/v1:Deployment:simple-service:simple-service-dev-helloworld success + SUCCESS Create batch/v1:CronJob:simple-service:simple-service-dev-helloworld success + SUCCESS Delete v1:Service:simple-service:simple-service-dev-helloworld-private success +Delete v1:Service:simple-service:simple-service-dev-helloworld-private success [4/4] ███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████ 100% | 0s +Apply complete! Resources: 1 created, 0 updated, 2 deleted. +``` + +## Validation + +We can verify the job has now been scheduled: + +```shell +$ kubectl get cronjob -n simple-service +NAME SCHEDULE SUSPEND ACTIVE LAST SCHEDULE AGE +simple-service-dev-helloworld * * * * * False 0 2m18s +``` + +Verify the job has been triggered after the minute mark since we scheduled it to run every minute: +```shell +$ kubectl get job -n simple-service +NAME COMPLETIONS DURATION AGE +simple-service-dev-helloworld-28415748 1/1 5s 11s +``` diff --git a/docs_versioned_docs/version-v0.13/5-user-guides/2-working-with-k8s/9-k8s-manifest.md b/docs_versioned_docs/version-v0.13/5-user-guides/2-working-with-k8s/9-k8s-manifest.md new file mode 100644 index 00000000..b706c71a --- /dev/null +++ b/docs_versioned_docs/version-v0.13/5-user-guides/2-working-with-k8s/9-k8s-manifest.md @@ -0,0 +1,208 @@ +--- +id: k8s-manifest +--- + +# Apply the Raw K8s Manifest YAML + +The guides above provide examples on how to configure workloads and accessories with KCL, and generate the related Kubernetes resources with Kusion Module generators, which is the usage method we recommend, as it can achieve the separation of concerns between developers and platform engineers, reducing the cognitive burden on developers. + +However, in some specific scenario, users may also have the need to directly use Kusion to apply and manage the raw Kubernetes manifest YAML files, such as taking over some existing resources and deploying CRD (CustomResourceDefinition), or other special resources. + +To help users directly apply raw K8s manifests, the KusionStack community has provided the [k8s_manifest](../../6-reference/2-modules/1-developer-schemas/k8s_manifest/k8s_manifest.md) Kusion Module. + +:::info +The module definition and implementation, as well as the example can be found at [here](https://github.com/KusionStack/catalog/tree/main/modules/k8s_manifest). +::: + +## Prerequisites + +Please refer to the [prerequisites](deploy-application#prerequisites) in the guide for deploying an application. + +The example below also requires you to have [initialized the project](deploy-application#initializing) using the `kusion workspace create`, `kusion project create`, `kusion stack create` command, which will create a workspace and project, and also generate a [kcl.mod](deploy-application#kclmod) file under the stack directory. + +## Managing Workspace Configuration + +In the first guide in this series, we introduced a step to [initialize a workspace](deploy-application#initializing-workspace-configuration) with an empty configuration. The same empty configuration will still work in this guide, no changes are required there. Alternatively, if you have updated your workspace config in the previous guides, no changes need to be made either. + +However, if you (or the platform team) would like to set some default paths for the raw K8s manifest YAML files to standardize the behavior of applications in the `dev` workspace, you can do so by updating the `dev.yaml` with the following config block: + +```yaml +modules: + k8s_manifest: + path: oci://ghcr.io/kusionstack/k8s_manifest + version: 0.1.0 + configs: + default: + # The default paths to apply for the raw K8s manifest YAML files. + paths: + - /path/to/k8s_manifest.yaml + - /dir/to/k8s_manifest/ +``` + +Please note that the `paths` decalred by the platform engineers in the workspace configs will be merged with the ones declared by the developers in the `AppConfiguration` in `main.k`. + +The workspace configuration needs to be updated with the command: + +```bash +kusion workspace update dev -f dev.yaml +``` + +## Example + +To apply the specified raw K8s manifest YAML files with `k8s_manifest` module, please use the `v0.2.1` version of `kam`, whose `workload` is no longer a required field in the `AppConfiguration` model. An example is shown below: + +`kcl.mod`: +```py +[dependencies] +kam = { git = "https://github.com/KusionStack/kam.git", tag = "v0.2.1" } +k8s_manifest = { oci = "oci://ghcr.io/kusionstack/k8s_manifest", tag = "0.1.0" } +``` + +`stack.yaml`: +```yaml +# Generate a specified namespace +name: dev +extensions: + - kind: kubernetesNamespace + kubernetesNamespace: + namespace: test +``` + +`main.k`: +```py +import kam.v1.app_configuration as ac +import k8s_manifest + +test: ac.AppConfiguration { + accessories: { + "k8s_manifests": k8s_manifest.K8sManifest { + paths: [ + # The `test.yaml` should be placed under the stack directory, + # as it is declared using a relative path. + "./test.yaml" + ] + } + } +} +``` + +`test.yaml`: +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-deployment + namespace: test + labels: + app: nginx +spec: + replicas: 3 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx:1.14.2 + ports: + - containerPort: 80 +``` + +## Generate and Applying + +Execute the `kusion generate` command, the `Deployment` in the `test.yaml` will be generated into a Kusion `Resource` with a Kusion ID in the `Spec`. + +``` +➜ dev git:(main) ✗ kusion generate + ✔︎ Generating Spec in the Stack dev... +resources: + - id: v1:Namespace:test + type: Kubernetes + attributes: + apiVersion: v1 + kind: Namespace + metadata: + creationTimestamp: null + name: test + spec: {} + status: {} + extensions: + GVK: /v1, Kind=Namespace + - id: apps/v1:Deployment:test:nginx-deployment + type: Kubernetes + attributes: + apiVersion: apps/v1 + kind: Deployment + metadata: + labels: + app: nginx + name: nginx-deployment + namespace: test + spec: + replicas: 3 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - image: nginx:1.14.2 + name: nginx + ports: + - containerPort: 80 + dependsOn: + - v1:Namespace:test +secretStore: null +context: {} +``` + +Execute the `kusion apply` command, you may get the output like the following: + +``` +➜ dev git:(main) ✗ kusion apply + ✔︎ Generating Spec in the Stack dev... +Stack: dev +ID Action +v1:Namespace:test Create +apps/v1:Deployment:test:nginx-deployment Create + + +Do you want to apply these diffs?: + > yes + +Start applying diffs ... + ✔︎ Succeeded v1:Namespace:test + ✔︎ Succeeded apps/v1:Deployment:test:nginx-deployment +Apply complete! Resources: 2 created, 0 updated, 0 deleted. + +[v1:Namespace:test] +Type Kind Name Detail +READY Namespace test Phase: Active +[apps/v1:Deployment:test:nginx-deployment] +Type Kind Name Detail +READY Deployment nginx-deployment Ready: 3/3, Up-to-date: 3, Available: 3 +READY ReplicaSet nginx-deployment-7fb96c846b Desired: 3, Current: 3, Ready: 3 +READY Pod nginx-deployment-7fb96c846b-d9pp4 Ready: 1/1, Status: Running, Restart: 0, Age: 2s +``` + +## Validation + +We can verify the `Deployment` and `Pod` we have just applied: + +```shell +➜ dev git:(main) ✗ kubectl get deployment -n test +NAME READY UP-TO-DATE AVAILABLE AGE +nginx-deployment 3/3 3 3 70s +➜ dev git:(main) ✗ kubectl get pod -n test +NAME READY STATUS RESTARTS AGE +nginx-deployment-7fb96c846b-d9pp4 1/1 Running 0 87s +nginx-deployment-7fb96c846b-j45nt 1/1 Running 0 87s +nginx-deployment-7fb96c846b-tnz5f 1/1 Running 0 87s +``` diff --git a/docs_versioned_docs/version-v0.13/5-user-guides/2-working-with-k8s/_category_.json b/docs_versioned_docs/version-v0.13/5-user-guides/2-working-with-k8s/_category_.json new file mode 100644 index 00000000..79d3c6c5 --- /dev/null +++ b/docs_versioned_docs/version-v0.13/5-user-guides/2-working-with-k8s/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Kubernetes" +} diff --git a/docs_versioned_docs/version-v0.13/5-user-guides/3-observability/1-prometheus.md b/docs_versioned_docs/version-v0.13/5-user-guides/3-observability/1-prometheus.md new file mode 100644 index 00000000..d67141de --- /dev/null +++ b/docs_versioned_docs/version-v0.13/5-user-guides/3-observability/1-prometheus.md @@ -0,0 +1,327 @@ +--- +id: prometheus +--- + +# Configure Monitoring Behavior With Prometheus + +This document provides the step-by-step instruction to set up monitoring for your application. + +As of today, Kusion supports the configuration of Prometheus scraping behaviors for the target application. In the future, we will add more cloud-provider-native solutions, such as AWS CloudWatch, Azure Monitor, etc. + +The user guide below is composed of the following components: + +- Namespace +- Deployment +- Service +- ServiceMonitor + +:::tip + +This guide requires you to have a basic understanding of Kubernetes and Prometheus. +If you are not familiar with the relevant concepts, please refer to the links below: + +- [Learn Kubernetes Basics](https://kubernetes.io/docs/tutorials/kubernetes-basics/) +- [Prometheus Introduction](https://prometheus.io/docs/introduction/overview/) +::: + +## Pre-requisite +Please refer to the [prerequisites](../working-with-k8s/deploy-application#prerequisites) in the guide for deploying an application. + +The example below also requires you to have [initialized the project](../working-with-k8s/deploy-application#initializing) using the `kusion init` command, which will generate a [`kcl.mod` file](../working-with-k8s/deploy-application#kclmod) under the project directory. + +## Setting up your own Prometheus + +There a quite a few ways to set up Prometheus in your cluster: +1. Installing a Prometheus operator +2. Installing a standalone Prometheus server +3. Installing a Prometheus agent and connect to a remote Prometheus server + +[The advice from the Prometheus team](https://github.com/prometheus-operator/prometheus-operator/issues/1547#issuecomment-401092041) is to use the `ServiceMonitor` or `PodMonitor` CRs via the Prometheus operator to manage scrape configs going forward[2]. + +In either case, you only have to do this setup once per cluster. This doc will use a minikube cluster and Prometheus operator as an example. + +### Installing Prometheus operator[3]. +To get the example in this user guide working, all you need is a running Prometheus operator. You can have that installed by running: +``` +LATEST=$(curl -s https://api.github.com/repos/prometheus-operator/prometheus-operator/releases/latest | jq -cr .tag_name) +curl -sL https://github.com/prometheus-operator/prometheus-operator/releases/download/${LATEST}/bundle.yaml | kubectl create -f - +``` + +This will install all the necessary CRDs and the Prometheus operator itself in the default namespace. Wait a few minutes, you can confirm the operator is up by running: +``` +kubectl wait --for=condition=Ready pods -l app.kubernetes.io/name=prometheus-operator -n default +``` + +### Make sure RBAC is properly set up +If you have RBAC enabled on the cluster, the following must be created for Prometheus to work properly: +``` +apiVersion: v1 +kind: ServiceAccount +metadata: + name: prometheus +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: prometheus +rules: +- apiGroups: [""] + resources: + - nodes + - nodes/metrics + - services + - endpoints + - pods + verbs: ["get", "list", "watch"] +- apiGroups: [""] + resources: + - configmaps + verbs: ["get"] +- apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: ["get", "list", "watch"] +- nonResourceURLs: ["/metrics"] + verbs: ["get"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: prometheus +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: prometheus +subjects: +- kind: ServiceAccount + name: prometheus + namespace: default +``` + +### Configure Prometheus instance via the operator +Once all of the above is set up, you can then configure the Prometheus instance via the operator: +``` +apiVersion: monitoring.coreos.com/v1 +kind: Prometheus +metadata: + name: prometheus +spec: + serviceAccountName: prometheus + serviceMonitorNamespaceSelector: {} + serviceMonitorSelector: {} + podMonitorNamespaceSelector: {} + podMonitorSelector: {} + resources: + requests: + memory: 400Mi +``` +This Prometheus instance above will be cluster-wide, picking up ALL the service monitors and pod monitors across ALL the namespaces. +You can adjust the requests and limits accordingly if you have a larger cluster. + +### Exposing the Prometheus portal (optional) +Once you have the managed Prometheus instance created via the Prometheus CR above, you should be able to see a service created called `prometheus-operated`: + +![prometheus-operated](/img/docs/user_docs/guides/prometheus/prometheus-operated.png) + +If you are also running on minikube, you can expose it onto your localhost via kubectl: +``` +kubectl port-forward svc/prometheus-operated 9099:9090 +``` + +You should then be able to see the Prometheus portal via `localhost:9099` in your browser: + +![prometheus-portal](/img/docs/user_docs/guides/prometheus/prometheus-portal.png) + +If you are running a non-local cluster, you can try to expose it via another way, through an ingress controller for example. + +## Setting up workspace configs + +Since v0.10.0, we have introduced the concept of [workspaces](../../3-concepts/4-workspace.md), whose configurations represent the part of the application behaviors that platform teams are interested in standardizing, or the ones to eliminate from developer's mind to make their lives easier. + +In the case of setting up Prometheus, there are a few things to set up on the workspace level: + +### Operator mode + +The `operatorMode` flag indicates to Kusion whether the Prometheus instance installed in the cluster runs as a Kubernetes operator or not. This determines the different kinds of resources Kusion manages. + +To see more about different ways to run Prometheus in the Kubernetes cluster, please refer to the [design documentation](https://github.com/KusionStack/kusion/blob/main/docs/prometheus.md#prometheus-installation). + +Most cloud vendors provide an out-of-the-box monitoring solutions for workloads running in a managed-Kubernetes cluster (EKS, AKS, etc), such as AWS CloudWatch, Azure Monitor, etc. These solutions mostly involve installing an agent (CloudWatch Agent, OMS Agent, etc) in the cluster and collecting the metrics to a centralized monitoring server. In those cases, you don't need to set `operatorMode` to `True`. It only needs to be set to `True` when you have an installation of the [Prometheus operator](https://github.com/prometheus-operator/prometheus-operator) running inside the Kubernetes cluster. + +:::info + +For differences between [Prometheus operator](https://github.com/prometheus-operator/prometheus-operator), [kube-prometheus](https://github.com/prometheus-operator/kube-prometheus) and the [community kube-prometheus-stack helm chart](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack), the details are documented [here](https://github.com/prometheus-operator/prometheus-operator#prometheus-operator-vs-kube-prometheus-vs-community-helm-chart). +::: + +### Monitor types + +The `monitorType` flag indicates the kind of monitor Kusion will create. It only applies when `operatorMode` is set to `True`. As of version 0.10.0, Kusion provides options to scrape metrics from either the application pods or its corresponding Kubernetes services. This determines the different kinds of resources Kusion manages when Prometheus runs as an operator in the target cluster. + +A sample `workspace.yaml` with Prometheus settings: +```yaml +modules: + ... + kusionstack/monitoring@0.1.0: + default: + operatorMode: True + monitorType: Service + scheme: http + interval: 30s + timeout: 15s +... +``` + +To instruct Prometheus to scrape from pod targets instead: +```yaml +modules: + ... + kusionstack/monitoring@0.1.0: + default: + operatorMode: True + monitorType: Pod + scheme: http + interval: 30s + timeout: 15s +... +``` + +If the `operatorMode` is omitted from the `workspace.yaml`, Kusion defaults `operatorMode` to false. + +### Overriding with projectSelector + +Workspace configurations contain a set of default setting group for all projects in the workspace, with means to override them by Projects using a `projectSelector` keyword. + +Projects with the name matching those in projectSelector will use the values defined in that override group instead of the default. If a key is not present in the override group, the default value will be used. + +Take a look at the sample `workspace.yaml`: +```yaml +modules: + ... + kusionstack/monitoring@0.1.0: + default: + operatorMode: True + monitorType: Pod + scheme: http + interval: 30s + timeout: 15s + low_frequency: + operatorMode: False + interval: 2m + projectSelector: + - foobar + high_frequency: + monitorType: Service + projectSelector: + - helloworld +... +``` + +In the example above, a project with the name `helloworld` will have the monitoring settings where `operatorMode` is set to `False`, a 2 minute scraping interval, 15 seconds timeout (coming from default) and http scheme (coming from default). + +You cannot have the same project appear in two projectSelectors. + +For a full reference of what can be configured in the workspace level, please see the [workspace reference](../../reference/modules/workspace-configs/monitoring/prometheus). + +## Updating the workspace config + +Assuming you now have a `workspace.yaml` that looks like the following: +```yaml +modules: + kusionstack/monitoring@0.1.0: + default: + operatorMode: True + monitorType: Service + scheme: http + interval: 30s + timeout: 15s +... +``` + +Update the workspace configuration by running the following command: +``` +kusion workspace update dev -f workspace.yaml +``` +Verify the workspace config is properly updated by running the command: +``` +kusion workspace show dev +``` + +## Using kusion to deploy your application with monitoring requirements + +At this point we are set up for good! Any new applications you deploy via kusion will now automatically have the monitoring-related resources created, should you declare you want it via the `monitoring` field in the `AppConfiguration` model. + +The monitoring in an AppConfiguration is declared in the `monitoring` field. See the example below for a full, deployable AppConfiguration. + +Please note we are using a new image `quay.io/brancz/prometheus-example-app` since the app itself need to expose metrics for Prometheus to scrape: + +`helloworld/dev/kcl.mod`: +``` +[package] +name = "helloworld" + +[dependencies] +monitoring = { oci = "oci://ghcr.io/kusionstack/monitoring", tag = "0.2.0" } +kam = { git = "https://github.com/KusionStack/kam.git", tag = "0.2.0" } +service = { oci = "oci://ghcr.io/kusionstack/service", tag = "0.1.0" } + +[profile] +entries = ["main.k"] +``` + +`helloworld/dev/main.k`: +``` +import kam.v1.app_configuration as ac +import service +import service.container as c +import monitoring as m +import network.network as n + +helloworld: ac.AppConfiguration { + workload: service.Service { + containers: { + "monitoring-sample-app": c.Container { + image: "quay.io/brancz/prometheus-example-app:v0.3.0" + } + } + } + # Add the monitoring configuration backed by Prometheus + accessories: { + "monitoring": m.Prometheus { + path: "/metrics" + } + "network": n.Network { + ports: [ + n.Port { + port: 8080 + } + ] + } + } +} +``` + +The KCL file above represents an application with a service type workload, exposing the port 8080, and would like Prometheus to scrape the `/metrics` endpoint every 2 minutes. + +Running `kusion apply` would show that kusion will create a `Namespace`, a `Deployment`, a `Service` and a `ServiceMonitor`: +![kusion-apply-with-monitor](/img/docs/user_docs/guides/prometheus/kusion-apply-with-monitor.png) + +Continue applying all resources: +![kusion-apply-success](/img/docs/user_docs/guides/prometheus/kusion-apply-success.png) + +If we want to, we can verify the service monitor has been created successfully: +![service-monitor](/img/docs/user_docs/guides/prometheus/service-monitor.png) + +In a few seconds, you should be able to see in the Prometheus portal that the service we just deployed has now been discovered and monitored by Prometheus: +![prometheus-targets](/img/docs/user_docs/guides/prometheus/prometheus-targets.png) + +You can run a few simply queries for the data that Prometheus scraped from your application: +![prometheus-simple-query](/img/docs/user_docs/guides/prometheus/prometheus-simple-query.png) + +For more info about PromQL, you can find them [here](https://prometheus.io/docs/prometheus/latest/querying/basics/)[4]. + +## References +1. Prometheus: https://prometheus.io/docs/introduction/overview/ +2. Prometheus team advise: https://github.com/prometheus-operator/prometheus-operator/issues/1547#issuecomment-446691500 +3. Prometheus operator getting started doc: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/user-guides/getting-started.md +4. PromQL basics: https://prometheus.io/docs/prometheus/latest/querying/basics/ \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.13/5-user-guides/3-observability/2-resource-graph.md b/docs_versioned_docs/version-v0.13/5-user-guides/3-observability/2-resource-graph.md new file mode 100644 index 00000000..1d169d6c --- /dev/null +++ b/docs_versioned_docs/version-v0.13/5-user-guides/3-observability/2-resource-graph.md @@ -0,0 +1,88 @@ +--- +id: resource-graph +--- + +# Resource Graph + +Kusion provides a powerful feature to visualize the relationships and dependencies between kusion `resources` using a resource graph. This feature offers several key benefits: + +- Comprehensive Visualization: The resource graph offers a clear, visual representation of your entire infrastructure, allowing you to see all resources and their interconnections at a glance. It includes detailed information about each cloud resource, such as its type, name, and unique identifiers, making it easier to locate and manage resources in your cloud environment. + +- Dependency Tracking: It helps you understand how resources are linked, making it easier to identify potential impacts when making changes to your infrastructure. + +- Troubleshooting Aid: When issues arise during the `apply` operation, the resource graph becomes an invaluable tool for pinpointing the source of problems. It provides a clear visual representation of resource relationships and their current status. This comprehensive view significantly reduces debugging time and enhances your ability to maintain a stable and efficient infrastructure. + +- Visual Documentation: The resource graph provides a clear, up-to-date visual representation of your infrastructure. It automatically updates as changes occur,providing a clear and current representation of your resource +landscape. It improves team understanding and communication about the infrastructure setup. + +This feature empowers you to gain a comprehensive and intuitive understanding of your infrastructure's architecture, enabling more efficient troubleshooting and decision-making. + +## Prerequisites + +Please refer to the [Deliver the WordPress Application with Cloud RDS](../1-cloud-resources/1-database.md) in the guide for deploying an application. + +This guide will assume that you have already deployed an application following the guide. + +## Display Resource Graph + +To display a resource graph, you need to run the following command with project name specified: + +```bash +kusion resource graph --project wordpress-rds-cloud +``` + +The output will be a resource graph in the terminal: + +```shell +Displaying resource graph in the project wordpress-rds-cloud... + +Workspace: demo + +Workload Resources: +ID Kind Name CloudResourceID Status +apps/v1:Deployment:wordpress-rds-cloud:wordpress-rds-cl Kubernetes:apps/v1:Deployment wordpress-rds-cloud/wordpress- Apply succeeded | Reconciled +oud-dev-wordpress rds-cloud-dev-wordpress + +Dependency Resources: +ID Kind Name CloudResourceID Status +v1:Secret:wordpress-rds-cloud:wordpress-mysql-mysql Kubernetes:v1:Secret wordpress-rds-cloud/wordpress- Apply succeeded | Reconciled + mysql-mysql +v1:Service:wordpress-rds-cloud:wordpress-rds-cloud-dev- Kubernetes:v1:Service wordpress-rds-cloud/wordpress- Apply succeeded | Reconciled +wordpress-private rds-cloud-dev-wordpress-privat + e +v1:Namespace:wordpress-rds-cloud Kubernetes:v1:Namespace wordpress-rds-cloud Apply succeeded | Reconciled + +Other Resources: +ID Kind Name CloudResourceID Status +aliyun:alicloud:alicloud_db_connection:wordpress-mysql alicloud:alicloud_db_connectio wordpress-mysql rm-2zer0f93xy490fdzq:rm-2zer0f Apply succeeded | Reconciled + n 93xy490fdzqtf +aliyun:alicloud:alicloud_db_instance:wordpress-mysql alicloud:alicloud_db_instance wordpress-mysql rm-2zer0f93xy490fdzq Apply succeeded | Reconciled +aliyun:alicloud:alicloud_rds_account:wordpress-mysql alicloud:alicloud_rds_account wordpress-mysql rm-2zer0f93xy490fdzq:root Apply succeeded | Reconciled +hashicorp:random:random_password:wordpress-mysql-mysql custom:random_password Apply succeeded +``` + +The resource graph output provides a comprehensive overview of the resources in your project. Let's break down each field: + +- ID: This is a unique identifier for each resource. + +- Kind: This field indicates the type of resource. + +- Name: This is the name of the resource within its namespace or scope. + +- CloudResourceID: For cloud resources, this field shows the unique identifier assigned by the cloud provider. For Kubernetes resources, this field is often empty. + +- Status: This field shows the current state of the resource. Common statuses include: + - "Apply succeeded | Reconciled": The resource has been successfully created and is in sync with the desired state. + - "Apply succeeded | Reconcile failed": The resource has been successfully created, but the resource is not in sync with the desired state. + - "Apply succeeded": The `apply` operation has completed, but the resource might not in sync with the desired state. + - "Apply failed": The `apply` operation has failed. + +The graph is divided into three sections: + +1. Workload Resources: These are the main application components, such as Kubernetes Deployments. + +2. Dependency Resources: These are resources that the workload depends on, such as Kubernetes Secrets, Services, and Namespaces. + +3. Other Resources: This section includes additional resources, often cloud provider-specific, such as database instances and connections. + +This graph gives you a clear view of all the resources in your project, their types, names, cloud identifiers (if applicable), and current status. It's particularly useful for understanding the structure of your application and its dependencies, as well as for troubleshooting and ensuring all resources are in the expected state. diff --git a/docs_versioned_docs/version-v0.13/5-user-guides/3-observability/_category_.json b/docs_versioned_docs/version-v0.13/5-user-guides/3-observability/_category_.json new file mode 100644 index 00000000..b061ae3e --- /dev/null +++ b/docs_versioned_docs/version-v0.13/5-user-guides/3-observability/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Automated Observability" +} diff --git a/docs_versioned_docs/version-v0.13/5-user-guides/4-secrets-management/1-using-cloud-secrets.md b/docs_versioned_docs/version-v0.13/5-user-guides/4-secrets-management/1-using-cloud-secrets.md new file mode 100644 index 00000000..2eb46f74 --- /dev/null +++ b/docs_versioned_docs/version-v0.13/5-user-guides/4-secrets-management/1-using-cloud-secrets.md @@ -0,0 +1,101 @@ +--- +id: using-cloud-secrets +--- + +# Using Cloud Secrets Manager + +Applications usually store sensitive data in secrets by using centralized secrets management solutions. For example, you authenticate databases, services, and external systems with passwords, API keys, tokens, and other credentials stored in a secret store, e.g. Hashicorp Vault, AWS Secrets Manager, Azure Key Vault, etc + +Kusion provides out-of-the-box support to reference existing external secrets management solution, this tutorial introduces that how to pull the secret from AWS Secrets Manager to make it available to applications. + +## Prerequisites + +Please refer to the [prerequisites](../working-with-k8s/deploy-application#prerequisites) in the guide for deploying an application. + +The example below also requires you to have [initialized the project](../working-with-k8s/deploy-application#initializing) using the `kusion init` command, which will generate a [`kcl.mod` file](../working-with-k8s/deploy-application#kclmod) under the project directory. + +Additionally, you also need to configure the obtained AccessKey and SecretKey as environment variables: + +```bash +export AWS_ACCESS_KEY_ID="AKIAQZDxxxx" # replace it with your AccessKey +export AWS_SECRET_ACCESS_KEY="oE/xxxx" # replace it with your SecretKey +``` + +![aws iam account](/img/docs/user_docs/getting-started/aws-iam-account.png) + +## Setting up workspace + +Since v0.10.0, we have introduced the concept of [workspaces](../../3-concepts/4-workspace.md), whose configurations represent the part of the application behaviors that platform teams are interested in standardizing, or the ones to eliminate from developer's mind to make their lives easier. + +In the case of setting up cloud secrets manager, platform teams need to specify which secrets management solution to use and necessary information to access on the workspace level. + +A sample `workspace.yaml` with AWS Secrets Manager settings: + +``` +modules: + ... +secretStore: + provider: + aws: + region: us-east-1 + profile: The optional profile to be used to interact with AWS Secrets Manager. +... +``` + +:::note +The `provider` of the `secretStore` now supports `aws`, `alicloud` and `viettelcloud`. +::: + +## Update AppConfiguration + +At this point we are set up for good! Now you can declare external type of secrets via the `secrets` field in the `AppConfiguration` model to consume sensitive data stored in AWS Secrets Manager. + +See the example below for a full, deployable AppConfiguration. + +``` +import kam.v1.app_configuration as ac +import service +import service.container as c +import service.secret as sec + +gitsync: ac.AppConfiguration { + workload: service.Service { + containers: { + "syncer": c.Container { + image: "dyrnq/git-sync" + # Run the following command as defined + command: [ + "--repo=https://github.com/KusionStack/kusion" + "--ref=HEAD" + "--root=/mnt/git" + ] + # Consume secrets in environment variables + env: { + "GIT_SYNC_USERNAME": "secret://git-auth/username" + "GIT_SYNC_PASSWORD": "secret://git-auth/password" + } + } + } + # Secrets used to retrieve secret data from AWS Secrets Manager + secrets: { + "git-auth": sec.Secret { + type: "external" + data: { + "username": "ref://git-auth-info/username" + "password": "ref://git-auth-info/password" + } + } + } + } +} +``` + +## Apply and Verify + +Run `kusion apply` command to deploy above application, then use the below command to verify if the secret got deployed: + +``` +kubectl get secret -n secretdemo +``` + +You will find `git-auth` of type Opaque automatically created and contains sensitive information pulled from AWS Secrets Manager. \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.13/5-user-guides/4-secrets-management/_category_.json b/docs_versioned_docs/version-v0.13/5-user-guides/4-secrets-management/_category_.json new file mode 100644 index 00000000..8990c11b --- /dev/null +++ b/docs_versioned_docs/version-v0.13/5-user-guides/4-secrets-management/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Secrets Management" +} diff --git a/docs_versioned_docs/version-v0.13/5-user-guides/5-production-practice-case/1-production-practice-case.md b/docs_versioned_docs/version-v0.13/5-user-guides/5-production-practice-case/1-production-practice-case.md new file mode 100644 index 00000000..52d5f07e --- /dev/null +++ b/docs_versioned_docs/version-v0.13/5-user-guides/5-production-practice-case/1-production-practice-case.md @@ -0,0 +1,190 @@ +--- +id: collaborate-with-github-actions +--- + +# Achieving Team Collaboration in Production Practice with GitHub Actions + +In this article, we will introduce how to use Kusion CLI in combination with GitHub Actions to achieve team collaboration in production practice. + +Adopting the concept of separation of concerns, we divide the staff involved in application delivery and operation into two groups: **Platform Engineers (PEs)** and **Developers (Devs)**. As the builders of the Internal Developer Platform (IDP), platform engineers are primarily responsible for creating the [storage backend](../../3-concepts/7-backend.md) for the Kusion CLI in team collaborative scenarios (e.g. AWS S3 or Alicloud OSS), developing custom reusable [Kusion modules](../../3-concepts/3-module/1-overview.md), and creating and maintaining standardized platform configurations in [workspace](../../3-concepts/4-workspace.md). While application developers can focus on writing the application business logic and the configuration codes, self-serving the application delivery and operation by triggering the automated CI/CD pipelines. [GitHub Actions](https://github.com/features/actions) is such a CI/CD platform, and by customizing [GitHub Actions workflow](https://docs.github.com/en/actions/using-workflows), the pipeline such as building, testing, and deploying will be executed automatically. + +In the following sections, we will demonstrate the specific workflow from the perspectives of both PEs and Devs with the sample workflows from our [konfg](https://github.com/KusionStack/konfig) and [catalog](https://github.com/KusionStack/catalog) repository. + +## Perspective of PE + +### Setup Kusion Storage Backend + +In order to enable multiple people to collaboratively edit and modify application configuration code within a team, PEs need to create a centralized remote storage backend for Kusion CLI, such as [AWS S3](https://aws.amazon.com/pm/serv-s3/) or [Alicloud OSS](https://www.alibabacloud.com/en/product/object-storage-service). Below is an example OSS bucket, we can see that it is mainly used to store application **releases** and **workspace** configurations. + +![alicloud oss bucket for storage backend](/img/docs/user_docs/guides/github-actions/alicloud_oss_storage_backend.png) + +Suppose PEs have set up the Alicloud OSS storage backend and get the AK/SK with the permission to read and write the bucket, they can use the following commands to set up the remote storage backend. + +```shell +# please replace the env with actual AK/SK +export OSS_ACCESS_KEY_ID=LTAxxxxxxxxxxxxxx +export OSS_ACCESS_KEY_SECRET=uUPxxxxxxxxxx + +# set up backend +kusion config set backends.oss_test '{"type":"oss","configs":{"bucket":"kusion-test","endpoint":"oss-cn-shanghai.aliyuncs.com"}}' +kusion config set backends.current oss_test +``` + +### Develop Customized Kusion Modules + +In the production practice of an enterprise, a common scenario is that PEs need to abstract and encapsulate the on-premises infrastructural computing, storage and networking resources to reduce the cognitive burden of the developers. And they can develop customized Kusion modules, a kind of reusable building blocks to achieve this goal. Below shows an example [GitHub Actions workflow](https://github.com/KusionStack/catalog/actions/runs/9398478367/job/25883893076) for pushing the module artifacts provided by KusionStack Official with multiple os/arch to [GitHub Packages](https://github.com/features/packages). + +![upload kusion modules through github actions](/img/docs/user_docs/guides/github-actions/upload_modules.png) + +### Create and Update Workspace + +Moreover, PEs also need to create and update the workspace configurations, where they can declare the Kusion modules available in the workspace, and add some standardized default or application-specific configurations across the entire scope of the workspace. + +Suppose PEs have set up the remote storage backend, they can use the following commands to create and update workspace. + +```shell +# create workspace with the name of 'dev' +kusion workspace create dev + +# update workspace with 'dev.yaml' +kusion workspace update dev -f dev.yaml + +# switch to the 'dev' workspace +kusion workspace switch dev +``` + +```yaml +# dev.yaml declares 'mysql' and 'network' modules in the workspace +modules: + mysql: + path: oci://ghcr.io/kusionstack/mysql + version: 0.2.0 + network: + path: oci://ghcr.io/kusionstack/network + version: 0.2.0 +``` + +So far, PE has almost completed the fundamental work for setting up the IDP. + +## Perspective of Dev + +### Setup Kusion Storage Backend + +In order to get the available modules of the workspace and validate the generated [spec](../../3-concepts/6-spec.md), developers need to communicate with PEs to obtain the AK/SK (usually with **Read-Only** permission), bucket name, and the endpoint to access the remote storage backend. And similar to the PEs, developers can set up the backend configs with the following commands. + +```shell +# please replace the env with actual AK/SK +export OSS_ACCESS_KEY_ID=LTAxxxxxxxxxxxxxx +export OSS_ACCESS_KEY_SECRET=uUPxxxxxxxxxx + +# set up backend +kusion config set backends.oss_test '{"type":"oss","configs":{"bucket":"kusion-test","endpoint":"oss-cn-shanghai.aliyuncs.com"}}' +kusion config set backends.current oss_test +``` + +### Create and Update Project and Stack + +Next, developers can create and update the [Project](../../3-concepts/1-project/1-overview.md) and [Stack](../../3-concepts/2-stack/1-overview.md) configurations with `kusion project` and `kusion stack` command. + +```shell +# create a new project named quickstart +mkdir quickstart && cd quickstart +kusion project create + +# create a stack named dev +kusion stack create dev +``` + +Below shows the initiated project and stack contents. + +```yaml +# quickstart/project.yaml +name: quickstart +``` + +```yaml +# quickstart/dev/stack.yaml +# The metadata information of the stack. +name: dev +``` + +```python +# kcl.mod +# Please add the modules you need in 'dependencies'. +[dependencies] +kam = { git = "https://github.com/KusionStack/kam.git", tag = "0.2.0" } +service = {oci = "oci://ghcr.io/kusionstack/service", tag = "0.1.0" } +``` + +```python +# main.k +# The configuration codes in perspective of developers. +import kam.v1.app_configuration as ac +import service +import service.container as c + +# Please replace the ${APPLICATION_NAME} with the name of your application, and complete the +# 'AppConfiguration' instance with your own workload and accessories. +${APPLICATION_NAME}: ac.AppConfiguration { + workload: service.Service { + containers: { + + } + } + accessories: { + + } +} +``` + +Developers can use `kusion mod list` to get the available modules in current workspace and use `kusion mod add` to add a specified module to current stack. + +```shell +# list the available modules in the current workspace +➜ kusion mod list +Name Version URL +mysql 0.2.0 oci://ghcr.io/kusionstack/mysql +network 0.2.0 oci://ghcr.io/kusionstack/network +``` + +```shell +# add the specified modules to the current stack +kusion mod add mysql && kusion mod add network +``` + +The corresponding module artifacts will be downloaded and the declaration of the modules will be added to `kcl.mod`, which can be compared to `go mod tidy` and `go.mod`. + +```python +# kcl.mod after executing 'kusion mod add' +[package] + +[dependencies] +kam = { git = "https://github.com/KusionStack/kam.git", tag = "0.2.0" } +service = { oci = "oci://ghcr.io/kusionstack/service", tag = "0.1.0" } +mysql = { oci = "oci://ghcr.io/kusionstack/mysql", tag = "0.2.0" } +network = { oci = "oci://ghcr.io/kusionstack/network", tag = "0.2.0" } +``` + +After this, developers can edit the application configuration codes according to the actual needs. + +### Trigger Preview and Apply Pipeline + +[KusionStack/konfig](https://github.com/KusionStack/konfig) is the official example repository, and provides a set of GitHub Actions workflows [preview.yml](https://github.com/KusionStack/konfig/blob/main/.github/workflows/preview.yml) and [apply.yml](https://github.com/KusionStack/konfig/blob/main/.github/workflows/apply.yml). The `preview.yml` is triggered by a pull request to the main branch, while `apply.yml` is triggered by a push to the main branch. + +![preview workflow](/img/docs/user_docs/guides/github-actions/github_actions_preview.png) + +![apply workflow](/img/docs/user_docs/guides/github-actions/github_actions_apply.png) + +The previewing workflow will first get the changed projects and stacks. + +![get changed projects and stacks](/img/docs/user_docs/guides/github-actions/github_actions_get_changed_projects_stacks.png) + +Then the previewing workflow will execute the `kusion preview` command to all of the changed stacks, and open an issue for manual approval to merge the changes after the approvers check the preview result artifact. + +![preview workflow details](/img/docs/user_docs/guides/github-actions/github_actions_preview_details.png) + +![mannual approval](/img/docs/user_docs/guides/github-actions/github_actions_mannual_approval.png) + +Once the code review is completed and the pull request is merged into the main branch, it will trigger the apply workflow, which will deploy the changes to the affected Projects and Stacks, and upload the respective results to the GitHub Actions Artifacts. + +![apply workflow details](/img/docs/user_docs/guides/github-actions/github_actions_apply_details.png) \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.13/5-user-guides/5-production-practice-case/_category_.json b/docs_versioned_docs/version-v0.13/5-user-guides/5-production-practice-case/_category_.json new file mode 100644 index 00000000..2b76a644 --- /dev/null +++ b/docs_versioned_docs/version-v0.13/5-user-guides/5-production-practice-case/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Production Practice Case" +} diff --git a/docs_versioned_docs/version-v0.13/5-user-guides/6-llm-ops/1-inference.md b/docs_versioned_docs/version-v0.13/5-user-guides/6-llm-ops/1-inference.md new file mode 100644 index 00000000..a2851189 --- /dev/null +++ b/docs_versioned_docs/version-v0.13/5-user-guides/6-llm-ops/1-inference.md @@ -0,0 +1,409 @@ +--- +id: inference +--- + +# Provide LLM Service with Inference Module for AI Application + +In the wave of Artificial Intelligence (AI), Large Language Models (LLMs) are gradually becoming a key factor in driving innovation and productivity. As a result, researchers and developers are looking for a more efficient way to deploy and manage complex LLM models and AI applications. + +To simplify the process from model construction, deployment and interaction with applications, the KusionStack community has provided an `inference` module. We will explore in detail how to deploy an AI application using LLM service provided by this module in this article. + +:::info +The module definition and implementation, as well as the example application we are about to show can be found [here](https://github.com/KusionStack/catalog/tree/main/modules/inference). +::: + +## Prerequisites + +Before we begin, we need to perform the following steps to set up the environment required by Kusion: + +- Install Kusion +- Running Kubernetes cluster + +For more details, please refer to the [prerequisites](https://www.kusionstack.io/docs/user-guides/working-with-k8s/deploy-application#prerequisites) in the guide for deploying an application with Kusion. + +## Initializing and Managing Workspace Configuration + +For information on how to initialize and switch a workspace with `kusion workspace create` and `kusion workspace switch`, please refer to [this document](https://www.kusionstack.io/docs/user-guides/working-with-k8s/deploy-application#initializing-workspace-configuration). + +For the current version of the `inference` module, an empty configuration for workspace initialization is enough, and users may need to configure the `network` module as an accessory to provide the network service for the AI application, whose workload is described with `service` module. Users can also add other modules' platform configurations in the workspace according to their need. + +An example is shown below: + +```yaml +modules: + service: + path: oci://ghcr.io/kusionstack/service + version: 0.2.0 + configs: + default: {} + network: + path: oci://ghcr.io/kusionstack/network + version: 0.2.0 + configs: + default: {} + inference: + path: oci://ghcr.io/kusionstack/inference + version: 0.1.0-beta.4 + configs: + default: {} +``` + +## Example + +After creating and switching to the workspace shown above, we can initialize the example `Project` and `Stack` with `kusion project create` and `kusion stack create`. Please refer to [this document](https://www.kusionstack.io/docs/user-guides/working-with-k8s/deploy-application#initializing-application-configuration) for more details. + +The directory structure, and configuration file contents of the example project is shown below: + +```shell +example/ +. +├── default +│ ├── kcl.mod +│ ├── main.k +│ └── stack.yaml +└── project.yaml +``` + +`project.yaml`: + +```yaml +name: example +``` + +`stack.yaml`: + +```yaml +name: default +``` + +`kcl.mod`: + +```yaml +[dependencies] +kam = { git = "https://github.com/KusionStack/kam.git", tag = "0.2.0" } +service = {oci = "oci://ghcr.io/kusionstack/service", tag = "0.1.0" } +network = { oci = "oci://ghcr.io/kusionstack/network", tag = "0.2.0" } +inference = { oci = "oci://ghcr.io/kusionstack/inference", tag = "0.1.0-beta.4" } +``` + +`main.k`: + +```python +import kam.v1.app_configuration as ac +import service +import service.container as c +import network as n +import inference.v1.inference + +inference: ac.AppConfiguration { + # Declare the workload configurations. + workload: service.Service { + containers: { + myct: c.Container {image: "kangy126/app"} + } + replicas: 1 + } + # Declare the inference module configurations. + accessories: { + "inference": inference.Inference { + model: "llama3" + framework: "Ollama" + } + "network": n.Network {ports: [n.Port { + port: 80 + targetPort: 5000 + }]} + } +} +``` + +In the above example, we configure the `model` and `framework` item of the `inference` module, which are two required configuration items for this module. The inference service of different models with different inference frameworks could be quickly built up by changing these two configuration items. + +As for how the AI application use the LLM service provided by the `inference` module, an environment variable named `INFERENCE_URL` will be injected by the module and the application can call the LLM service with the address. + +Which model used in the application is transparent, and you only need to provide the `prompt` parameter to the request address. Of course, you can directly modify the model and other configuration items in the `main.k` file and update the deployment resources by `kusion apply`. + +There are also some optional configuration items in the `inference` module for adjusting the LLM service, whose details can be found [here](../../6-reference/2-modules/1-developer-schemas/inference/inference.md). + +## Deployment + +Now we can generate and deploy the `Spec` containing all the relevant resources the AI application needs with Kusion. + +First, we should navigate to the folder `example/default` and execute the `kusion generate` command, and a `Spec` will be generated. + +``` +➜ default git:(main) ✗ kusion generate + ✔︎ Generating Spec in the Stack default... +resources: + - id: v1:Namespace:example + type: Kubernetes + attributes: + apiVersion: v1 + kind: Namespace + metadata: + creationTimestamp: null + name: example + spec: {} + status: {} + extensions: + GVK: /v1, Kind=Namespace + - id: apps/v1:Deployment:example:example-default-inference + type: Kubernetes + attributes: + apiVersion: apps/v1 + kind: Deployment + metadata: + creationTimestamp: null + labels: + app.kubernetes.io/name: inference + app.kubernetes.io/part-of: example + name: example-default-inference + namespace: example + spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: inference + app.kubernetes.io/part-of: example + strategy: {} + template: + metadata: + creationTimestamp: null + labels: + app.kubernetes.io/name: inference + app.kubernetes.io/part-of: example + spec: + containers: + - env: + - name: INFERENCE_URL + value: ollama-infer-service + image: kangy126/app + name: myct + resources: {} + status: {} + dependsOn: + - v1:Namespace:example + - v1:Service:example:ollama-infer-service + - v1:Service:example:example-default-inference-private + extensions: + GVK: apps/v1, Kind=Deployment + kusion.io/is-workload: true + - id: apps/v1:Deployment:example:ollama-infer-deployment + type: Kubernetes + attributes: + apiVersion: apps/v1 + kind: Deployment + metadata: + creationTimestamp: null + name: ollama-infer-deployment + namespace: example + spec: + selector: + matchLabels: + accessory: ollama + strategy: {} + template: + metadata: + creationTimestamp: null + labels: + accessory: ollama + spec: + containers: + - command: + - /bin/sh + - -c + - |- + echo 'FROM llama3 + PARAMETER top_k 40 + PARAMETER top_p 0.900000 + PARAMETER temperature 0.800000 + PARAMETER num_predict 128 + PARAMETER num_ctx 2048 + ' > Modelfile && ollama serve & OLLAMA_SERVE_PID=$! && sleep 5 && ollama create llama3 -f Modelfile && wait $OLLAMA_SERVE_PID + image: ollama/ollama + name: ollama-infer-container + ports: + - containerPort: 11434 + name: ollama-port + resources: {} + volumeMounts: + - mountPath: /root/.ollama + name: ollama-infer-storage + volumes: + - emptyDir: {} + name: ollama-infer-storage + status: {} + dependsOn: + - v1:Namespace:example + - v1:Service:example:ollama-infer-service + - v1:Service:example:example-default-inference-private + extensions: + GVK: apps/v1, Kind=Deployment + - id: v1:Service:example:ollama-infer-service + type: Kubernetes + attributes: + apiVersion: v1 + kind: Service + metadata: + creationTimestamp: null + labels: + accessory: ollama + name: ollama-infer-service + namespace: example + spec: + ports: + - port: 80 + targetPort: 11434 + selector: + accessory: ollama + type: ClusterIP + status: + loadBalancer: {} + dependsOn: + - v1:Namespace:example + extensions: + GVK: /v1, Kind=Service + - id: v1:Service:example:example-default-inference-private + type: Kubernetes + attributes: + apiVersion: v1 + kind: Service + metadata: + creationTimestamp: null + labels: + app.kubernetes.io/name: inference + app.kubernetes.io/part-of: example + name: example-default-inference-private + namespace: example + spec: + ports: + - name: example-default-inference-private-80-tcp + port: 80 + protocol: TCP + targetPort: 5000 + selector: + app.kubernetes.io/name: inference + app.kubernetes.io/part-of: example + type: ClusterIP + status: + loadBalancer: {} + dependsOn: + - v1:Namespace:example + extensions: + GVK: /v1, Kind=Service +secretStore: null +context: {} +``` + +Next, we can execute the `kusion preview` command and review the resource three-way diffs for a more secure deployment. + +``` +➜ default git:(main) ✗ kusion preview + ✔︎ Generating Spec in the Stack default... +Stack: default +ID Action +v1:Namespace:example Create +v1:Service:example:ollama-infer-service Create +v1:Service:example:example-default-inference-private Create +apps/v1:Deployment:example:example-default-inference Create +apps/v1:Deployment:example:ollama-infer-deployment Create + + +Which diff detail do you want to see?: +> all + v1:Namespace:example Create + v1:Service:example:ollama-infer-service Create + v1:Service:example:example-default-inference-private Create + apps/v1:Deployment:example:example-default-inference Create +``` + +Finally, execute the `kusion apply` command to deploy the related Kubernetes resources. + +``` +➜ default git:(main) ✗ kusion apply + ✔︎ Generating Spec in the Stack default... +Stack: default +ID Action +v1:Namespace:example Create +v1:Service:example:ollama-infer-service Create +v1:Service:example:example-default-inference-private Create +apps/v1:Deployment:example:ollama-infer-deployment Create +apps/v1:Deployment:example:example-default-inference Create + + +Do you want to apply these diffs?: + > yes + +Start applying diffs ... + ✔︎ Succeeded v1:Namespace:example + ✔︎ Succeeded v1:Service:example:ollama-infer-service + ✔︎ Succeeded v1:Service:example:example-default-inference-private + ✔︎ Succeeded apps/v1:Deployment:example:ollama-infer-deployment + ✔︎ Succeeded apps/v1:Deployment:example:example-default-inference +Apply complete! Resources: 5 created, 0 updated, 0 deleted. + +``` + +## Testing + +Execute the `kubectl get all -n example` command, and the deployed Kubernetes resources will be shown. + +``` +➜ ~ kubectl get all -n example +NAME READY STATUS RESTARTS AGE +pod/example-dev-inference-5cf6c74574-7w92f 1/1 Running 0 2d6h +pod/mynginx 1/1 Running 0 2d6h +pod/ollama-infer-deployment-7c56845496-s5snb 1/1 Running 0 2d6h + +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +service/example-dev-inference-public ClusterIP 192.168.116.121 80:32693/TCP 2d6h +service/ollama-infer-service ClusterIP 192.168.28.208 80/TCP 2d6h + +NAME READY UP-TO-DATE AVAILABLE AGE +deployment.apps/example-dev-inference 1/1 1 1 2d6h +deployment.apps/ollama-infer-deployment 1/1 1 1 2d6h + +NAME DESIRED CURRENT READY AGE +replicaset.apps/example-dev-inference-5cf6c74574 1 1 1 2d6h +replicaset.apps/ollama-infer-deployment-7c56845496 1 1 1 2d6h +``` + +The AI application in the example provides a simple service that returns the LLM responses when sending a GET request with the `prompt` parameter. + +We can test the application service locally by `port-forward`, allowing us to directly send requests to the application via our browser. + +```sh +kubectl port-forward service/example-dev-inference-public 8080:80 -n example +``` + +The test results are shown in the figure below. + +![](/img/docs/user_docs/guides/llm-ops/inference-test-1.png) + +By modifying the `model` parameter in the `main.k` file, you can switch to a different model without having to change the application itself. + +For example, we change the value of `model` from `llama3` to `qwen`. Then we execute the `kusion apply` command to update the K8S resources. + +```sh +❯ kusion apply + ✔︎ Generating Spec in the Stack dev... +Stack: dev +ID Action +v1:Namespace:example UnChanged +v1:Service:example:ollama-infer-service UnChanged +v1:Service:example:proxy-infer-service UnChanged +v1:Service:example:example-dev-inference-public UnChanged +apps/v1:Deployment:example:example-dev-inference UnChanged +apps/v1:Deployment:example:proxy-infer-deployment Update +apps/v1:Deployment:example:ollama-infer-deployment Update + + +Do you want to apply these diffs?: + yes +> details + no +``` + +We repeat to send the request to the application via the browser, and the new results are as follows. + +![](/img/docs/user_docs/guides/llm-ops/inference-test-2.png) diff --git a/docs_versioned_docs/version-v0.13/5-user-guides/6-llm-ops/_category_.json b/docs_versioned_docs/version-v0.13/5-user-guides/6-llm-ops/_category_.json new file mode 100644 index 00000000..d0ed9947 --- /dev/null +++ b/docs_versioned_docs/version-v0.13/5-user-guides/6-llm-ops/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "LLM Ops" +} diff --git a/docs_versioned_docs/version-v0.13/5-user-guides/_category_.json b/docs_versioned_docs/version-v0.13/5-user-guides/_category_.json new file mode 100644 index 00000000..abf4c874 --- /dev/null +++ b/docs_versioned_docs/version-v0.13/5-user-guides/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "User Guides" +} diff --git a/docs_versioned_docs/version-v0.13/6-reference/1-commands/_category_.json b/docs_versioned_docs/version-v0.13/6-reference/1-commands/_category_.json new file mode 100644 index 00000000..d783ca2e --- /dev/null +++ b/docs_versioned_docs/version-v0.13/6-reference/1-commands/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Kusion Commands" +} diff --git a/docs_versioned_docs/version-v0.13/6-reference/1-commands/index.md b/docs_versioned_docs/version-v0.13/6-reference/1-commands/index.md new file mode 100644 index 00000000..dd782a3e --- /dev/null +++ b/docs_versioned_docs/version-v0.13/6-reference/1-commands/index.md @@ -0,0 +1,40 @@ +# Kusion Commands + +Kusion is the Platform Orchestrator of Internal Developer Platform + +Find more information at: https://www.kusionstack.io + +### Synopsis + +As a Platform Orchestrator, Kusion delivers user intentions to Kubernetes, Clouds and On-Premise resources. Also enables asynchronous cooperation between the development and the platform team and drives the separation of concerns. + +``` +kusion [flags] +``` + +### Options + +``` + -h, --help help for kusion + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion apply](kusion-apply.md) - Apply the operational intent of various resources to multiple runtimes +* [kusion config](kusion-config.md) - Interact with the Kusion config +* [kusion destroy](kusion-destroy.md) - Destroy resources within the stack. +* [kusion generate](kusion-generate.md) - Generate and print the resulting Spec resources of target Stack +* [kusion init](kusion-init.md) - Initialize the scaffolding for a demo project +* [kusion mod](kusion-mod.md) - Manage Kusion modules +* [kusion options](kusion-options.md) - Print the list of flags inherited by all commands +* [kusion preview](kusion-preview.md) - Preview a series of resource changes within the stack +* [kusion project](kusion-project.md) - Project is a folder that contains a project.yaml file and is linked to a Git repository +* [kusion release](kusion-release.md) - Manage Kusion release files +* [kusion resource](kusion-resource.md) - Observe Kusion resource information +* [kusion stack](kusion-stack.md) - Stack is a folder that contains a stack.yaml file within the corresponding project directory +* [kusion version](kusion-version.md) - Print the Kusion version information for the current context +* [kusion workspace](kusion-workspace.md) - Workspace is a logical concept representing a target that stacks will be deployed to + +###### Auto generated by spf13/cobra on 26-Sep-2024 diff --git a/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-apply.md b/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-apply.md new file mode 100644 index 00000000..032fe3dc --- /dev/null +++ b/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-apply.md @@ -0,0 +1,77 @@ +# kusion apply + +Apply the operational intent of various resources to multiple runtimes + +### Synopsis + +Apply a series of resource changes within the stack. + + Create, update or delete resources according to the operational intent within a stack. By default, Kusion will generate an execution preview and prompt for your approval before performing any actions. You can review the preview details and make a decision to proceed with the actions or abort them. + +``` +kusion apply [flags] +``` + +### Examples + +``` + # Apply with specified work directory + kusion apply -w /path/to/workdir + + # Apply with specified arguments + kusion apply -D name=test -D age=18 + + # Apply with specifying spec file + kusion apply --spec-file spec.yaml + + # Skip interactive approval of preview details before applying + kusion apply --yes + + # Apply without output style and color + kusion apply --no-style=true + + # Apply without watching the resource changes and waiting for reconciliation + kusion apply --watch=false + + # Apply with the specified timeout duration for kusion apply command, measured in second(s) + kusion apply --timeout=120 + + # Apply with localhost port forwarding + kusion apply --port-forward=8080 +``` + +### Options + +``` + -a, --all --detail Automatically show all preview details, combined use with flag --detail + -D, --argument stringArray Specify arguments on the command line + --backend string The backend to use, supports 'local', 'oss' and 's3'. + -d, --detail Automatically show preview details with interactive options (default true) + --dry-run Preview the execution effect (always successful) without actually applying the changes + -h, --help help for apply + --ignore-fields strings Ignore differences of target fields + --no-style no-style sets to RawOutput mode and disables all of styling + -o, --output string Specify the output format + --port-forward int Forward the specified port from local to service + --spec-file string Specify the spec file path as input, and the spec file must be located in the working directory or its subdirectories + --timeout int The timeout duration for kusion apply command, measured in second(s) + --watch After creating/updating/deleting the requested object, watch for changes (default true) + -w, --workdir string The work directory to run Kusion CLI. + --workspace string The name of target workspace to operate in. + -y, --yes Automatically approve and perform the update after previewing it +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion](index.md) - Kusion is the Platform Orchestrator of Internal Developer Platform + +Find more information at: https://www.kusionstack.io + +###### Auto generated by spf13/cobra on 26-Sep-2024 diff --git a/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-config-get.md b/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-config-get.md new file mode 100644 index 00000000..4d50b615 --- /dev/null +++ b/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-config-get.md @@ -0,0 +1,37 @@ +# kusion config get + +Get a config item + +### Synopsis + +This command gets the value of a specified kusion config item, where the config item must be registered. + +``` +kusion config get +``` + +### Examples + +``` + # Get a config item + kusion config get backends.current +``` + +### Options + +``` + -h, --help help for get +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion config](kusion-config.md) - Interact with the Kusion config + +###### Auto generated by spf13/cobra on 26-Sep-2024 diff --git a/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-config-list.md b/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-config-list.md new file mode 100644 index 00000000..95d9620c --- /dev/null +++ b/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-config-list.md @@ -0,0 +1,37 @@ +# kusion config list + +List all config items + +### Synopsis + +This command lists all the kusion config items and their values. + +``` +kusion config list +``` + +### Examples + +``` + # List config items + kusion config list +``` + +### Options + +``` + -h, --help help for list +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion config](kusion-config.md) - Interact with the Kusion config + +###### Auto generated by spf13/cobra on 26-Sep-2024 diff --git a/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-config-set.md b/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-config-set.md new file mode 100644 index 00000000..cf3d3213 --- /dev/null +++ b/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-config-set.md @@ -0,0 +1,40 @@ +# kusion config set + +Set a config item + +### Synopsis + +This command sets the value of a specified kusion config item, where the config item must be registered, and the value must be in valid type. + +``` +kusion config set +``` + +### Examples + +``` + # Set a config item with string type value + kusion config set backends.current s3-pre + + # Set a config item with struct or map type value + kusion config set backends.s3-pre.configs '{"bucket":"kusion"}' +``` + +### Options + +``` + -h, --help help for set +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion config](kusion-config.md) - Interact with the Kusion config + +###### Auto generated by spf13/cobra on 26-Sep-2024 diff --git a/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-config-unset.md b/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-config-unset.md new file mode 100644 index 00000000..538668fe --- /dev/null +++ b/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-config-unset.md @@ -0,0 +1,37 @@ +# kusion config unset + +Unset a config item + +### Synopsis + +This command unsets a specified kusion config item, where the config item must be registered. + +``` +kusion config unset +``` + +### Examples + +``` + # Unset a config item + kusion config unset backends.s3-pre.configs.bucket +``` + +### Options + +``` + -h, --help help for unset +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion config](kusion-config.md) - Interact with the Kusion config + +###### Auto generated by spf13/cobra on 26-Sep-2024 diff --git a/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-config.md b/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-config.md new file mode 100644 index 00000000..e8264ee8 --- /dev/null +++ b/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-config.md @@ -0,0 +1,34 @@ +# kusion config + +Interact with the Kusion config + +### Synopsis + +Config contains the operation of Kusion configurations. + +``` +kusion config [flags] +``` + +### Options + +``` + -h, --help help for config +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion](index.md) - Kusion is the Platform Orchestrator of Internal Developer Platform +* [kusion config get](kusion-config-get.md) - Get a config item +* [kusion config list](kusion-config-list.md) - List all config items +* [kusion config set](kusion-config-set.md) - Set a config item +* [kusion config unset](kusion-config-unset.md) - Unset a config item + +###### Auto generated by spf13/cobra on 26-Sep-2024 diff --git a/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-destroy.md b/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-destroy.md new file mode 100644 index 00000000..6881a3ce --- /dev/null +++ b/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-destroy.md @@ -0,0 +1,47 @@ +# kusion destroy + +Destroy resources within the stack. + +### Synopsis + +Destroy resources within the stack. + + Please note that the destroy command does NOT perform resource version checks. Therefore, if someone submits an update to a resource at the same time you execute a destroy command, their update will be lost along with the rest of the resource. + +``` +kusion destroy [flags] +``` + +### Examples + +``` + # Delete resources of current stack + kusion destroy +``` + +### Options + +``` + --backend string The backend to use, supports 'local', 'oss' and 's3'. + -d, --detail Automatically show preview details after previewing it + -h, --help help for destroy + --no-style no-style sets to RawOutput mode and disables all of styling + -w, --workdir string The work directory to run Kusion CLI. + --workspace string The name of target workspace to operate in. + -y, --yes Automatically approve and perform the update after previewing it +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion](index.md) - Kusion is the Platform Orchestrator of Internal Developer Platform + +Find more information at: https://www.kusionstack.io + +###### Auto generated by spf13/cobra on 26-Sep-2024 diff --git a/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-generate.md b/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-generate.md new file mode 100644 index 00000000..3514b3ee --- /dev/null +++ b/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-generate.md @@ -0,0 +1,53 @@ +# kusion generate + +Generate and print the resulting Spec resources of target Stack + +### Synopsis + +This command generates Spec resources with given values, then write the resulting Spec resources to specific output file or stdout. + + The nearest parent folder containing a stack.yaml file is loaded from the project in the current directory. + +``` +kusion generate [flags] +``` + +### Examples + +``` + # Generate and write Spec resources to specific output file + kusion generate -o /tmp/spec.yaml + + # Generate spec with custom workspace + kusion generate -o /tmp/spec.yaml --workspace dev + + # Generate spec with specified arguments + kusion generate -D name=test -D age=18 +``` + +### Options + +``` + -D, --argument stringArray Specify arguments on the command line + --backend string The backend to use, supports 'local', 'oss' and 's3'. + -h, --help help for generate + --no-style no-style sets to RawOutput mode and disables all of styling + -o, --output string File to write generated Spec resources to + -w, --workdir string The work directory to run Kusion CLI. + --workspace string The name of target workspace to operate in. +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion](index.md) - Kusion is the Platform Orchestrator of Internal Developer Platform + +Find more information at: https://www.kusionstack.io + +###### Auto generated by spf13/cobra on 26-Sep-2024 diff --git a/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-init.md b/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-init.md new file mode 100644 index 00000000..2d24082f --- /dev/null +++ b/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-init.md @@ -0,0 +1,46 @@ +# kusion init + +Initialize the scaffolding for a demo project + +### Synopsis + +This command initializes the scaffolding for a demo project with the name of the current directory to help users quickly get started. + + Note that target directory needs to be an empty directory. + +``` +kusion init [flags] +``` + +### Examples + +``` + # Initialize a demo project with the name of the current directory + mkdir quickstart && cd quickstart + kusion init + + # Initialize the demo project in a different target directory + kusion init --target projects/my-demo-project +``` + +### Options + +``` + -h, --help help for init + -t, --target string specify the target directory +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion](index.md) - Kusion is the Platform Orchestrator of Internal Developer Platform + +Find more information at: https://www.kusionstack.io + +###### Auto generated by spf13/cobra on 26-Sep-2024 diff --git a/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-mod-add.md b/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-mod-add.md new file mode 100644 index 00000000..a0530340 --- /dev/null +++ b/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-mod-add.md @@ -0,0 +1,39 @@ +# kusion mod add + +Add a module from a workspace + +``` +kusion mod add MODULE_NAME [--workspace WORKSPACE] [flags] +``` + +### Examples + +``` + # Add a kusion module to the kcl.mod from the current workspace to use it in AppConfiguration + kusion mod add my-module + + # Add a module to the kcl.mod from a specified workspace to use it in AppConfiguration + kusion mod add my-module --workspace=dev +``` + +### Options + +``` + --backend string The backend to use, supports 'local', 'oss' and 's3'. + -h, --help help for add + -w, --workdir string The work directory to run Kusion CLI. + --workspace string The name of target workspace to operate in. +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion mod](kusion-mod.md) - Manage Kusion modules + +###### Auto generated by spf13/cobra on 26-Sep-2024 diff --git a/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-mod-init.md b/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-mod-init.md new file mode 100644 index 00000000..c94b9653 --- /dev/null +++ b/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-mod-init.md @@ -0,0 +1,40 @@ +# kusion mod init + +Create a kusion module along with common files and directories in the current directory + +``` +kusion mod init MODULE_NAME PATH [flags] +``` + +### Examples + +``` + # Create a kusion module template in the current directory + kusion mod init my-module + + # Init a kusion module at the specified Path + kusion mod init my-module ./modules + + # Init a module from a remote git template repository + kusion mod init my-module --template https://github.com// +``` + +### Options + +``` + -h, --help help for init + --template string Initialize with specified template +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion mod](kusion-mod.md) - Manage Kusion modules + +###### Auto generated by spf13/cobra on 26-Sep-2024 diff --git a/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-mod-list.md b/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-mod-list.md new file mode 100644 index 00000000..0ea4e426 --- /dev/null +++ b/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-mod-list.md @@ -0,0 +1,39 @@ +# kusion mod list + +List kusion modules in a workspace + +``` +kusion mod list [--workspace WORKSPACE] [flags] +``` + +### Examples + +``` + # List kusion modules in the current workspace + kusion mod list + + # List modules in a specified workspace + kusion mod list --workspace=dev +``` + +### Options + +``` + --backend string The backend to use, supports 'local', 'oss' and 's3'. + -h, --help help for list + -w, --workdir string The work directory to run Kusion CLI. + --workspace string The name of target workspace to operate in. +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion mod](kusion-mod.md) - Manage Kusion modules + +###### Auto generated by spf13/cobra on 26-Sep-2024 diff --git a/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-mod-push.md b/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-mod-push.md new file mode 100644 index 00000000..2f0d0a72 --- /dev/null +++ b/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-mod-push.md @@ -0,0 +1,62 @@ +# kusion mod push + +Push a module to OCI registry + +### Synopsis + +The push command packages the module as an OCI artifact and pushes it to the OCI registry using the version as the image tag. + +``` +kusion mod push MODULE_PATH OCI_REPOSITORY_URL [--creds CREDENTIALS] +``` + +### Examples + +``` + # Push a module of current OS arch to an OCI Registry using a token + kusion mod push /path/to/my-module oci://ghcr.io/org --creds + + # Push a module of specific OS arch to an OCI Registry using a token + kusion mod push /path/to/my-module oci://ghcr.io/org --os-arch=darwin/arm64 --creds + + # Push a module to an OCI Registry using a credentials in : format. + kusion mod push /path/to/my-module oci://ghcr.io/org --creds : + + # Push a release candidate without marking it as the latest stable + kusion mod push /path/to/my-module oci://ghcr.io/org --latest=false + + # Push a module with custom OCI annotations + kusion mod push /path/to/my-module oci://ghcr.io/org \ + --annotation='org.opencontainers.image.documentation=https://app.org/docs' + + # Push and sign a module with Cosign (the cosign binary must be present in PATH) + export COSIGN_PASSWORD=password + kusion mod push /path/to/my-module oci://ghcr.io/org \ + --sign=cosign --cosign-key=/path/to/cosign.key +``` + +### Options + +``` + -a, --annotations strings Set custom OCI annotations in '=' format. + --cosign-key string The Cosign private key for signing the module. + --creds string The credentials token for the OCI registry in or : format. + -h, --help help for push + --insecure-registry If true, allows connecting to a OCI registry without TLS or with self-signed certificates. + --latest Tags the current version as the latest stable module version. (default true) + --os-arch string The os arch of the module e.g. 'darwin/arm64', 'linux/amd64'. + --sign string Signs the module with the specified provider. +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion mod](kusion-mod.md) - Manage Kusion modules + +###### Auto generated by spf13/cobra on 26-Sep-2024 diff --git a/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-mod.md b/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-mod.md new file mode 100644 index 00000000..4ba93969 --- /dev/null +++ b/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-mod.md @@ -0,0 +1,36 @@ +# kusion mod + +Manage Kusion modules + +### Synopsis + +Commands for managing Kusion modules. + + These commands help you manage the lifecycle of Kusion modules. + +``` +kusion mod +``` + +### Options + +``` + -h, --help help for mod +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion](index.md) - Kusion is the Platform Orchestrator of Internal Developer Platform +* [kusion mod add](kusion-mod-add.md) - Add a module from a workspace +* [kusion mod init](kusion-mod-init.md) - Create a kusion module along with common files and directories in the current directory +* [kusion mod list](kusion-mod-list.md) - List kusion modules in a workspace +* [kusion mod push](kusion-mod-push.md) - Push a module to OCI registry + +###### Auto generated by spf13/cobra on 26-Sep-2024 diff --git a/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-options.md b/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-options.md new file mode 100644 index 00000000..d7fbc13d --- /dev/null +++ b/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-options.md @@ -0,0 +1,37 @@ +# kusion options + +Print the list of flags inherited by all commands + +### Synopsis + +Print the list of flags inherited by all commands + +``` +kusion options [flags] +``` + +### Examples + +``` + # Print flags inherited by all commands + kubectl options +``` + +### Options + +``` + -h, --help help for options +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion](index.md) - Kusion is the Platform Orchestrator of Internal Developer Platform + +###### Auto generated by spf13/cobra on 26-Sep-2024 diff --git a/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-preview.md b/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-preview.md new file mode 100644 index 00000000..61103f19 --- /dev/null +++ b/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-preview.md @@ -0,0 +1,64 @@ +# kusion preview + +Preview a series of resource changes within the stack + +### Synopsis + +Preview a series of resource changes within the stack. + + Create, update or delete resources according to the intent described in the stack. By default, Kusion will generate an execution preview and present it for your approval before taking any action. + +``` +kusion preview [flags] +``` + +### Examples + +``` + # Preview with specified work directory + kusion preview -w /path/to/workdir + + # Preview with specified arguments + kusion preview -D name=test -D age=18 + + # Preview with specifying spec file + kusion preview --spec-file spec.yaml + + # Preview with ignored fields + kusion preview --ignore-fields="metadata.generation,metadata.managedFields" + + # Preview with json format result + kusion preview -o json + + # Preview without output style and color + kusion preview --no-style=true +``` + +### Options + +``` + -a, --all --detail Automatically show all preview details, combined use with flag --detail + -D, --argument stringArray Specify arguments on the command line + --backend string The backend to use, supports 'local', 'oss' and 's3'. + -d, --detail Automatically show preview details with interactive options (default true) + -h, --help help for preview + --ignore-fields strings Ignore differences of target fields + --no-style no-style sets to RawOutput mode and disables all of styling + -o, --output string Specify the output format + --spec-file string Specify the spec file path as input, and the spec file must be located in the working directory or its subdirectories + -w, --workdir string The work directory to run Kusion CLI. + --workspace string The name of target workspace to operate in. +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion](index.md) - Kusion is the Platform Orchestrator of Internal Developer Platform + +###### Auto generated by spf13/cobra on 26-Sep-2024 diff --git a/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-project-create.md b/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-project-create.md new file mode 100644 index 00000000..5f914717 --- /dev/null +++ b/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-project-create.md @@ -0,0 +1,44 @@ +# kusion project create + +Create a new project + +### Synopsis + +This command creates a new project.yaml file under the target directory which by default is the current working directory. + + Note that the target directory needs to be an empty directory. + +``` +kusion project create +``` + +### Examples + +``` + # Create a new project with the name of the current working directory + mkdir my-project && cd my-project + kusion project create + + # Create a new project in a specified target directory + kusion project create --target /dir/to/projects/my-project +``` + +### Options + +``` + -h, --help help for create + -t, --target string specify the target directory +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion project](kusion-project.md) - Project is a folder that contains a project.yaml file and is linked to a Git repository + +###### Auto generated by spf13/cobra on 26-Sep-2024 diff --git a/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-project-list.md b/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-project-list.md new file mode 100644 index 00000000..c42f05e4 --- /dev/null +++ b/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-project-list.md @@ -0,0 +1,51 @@ +# kusion project list + +List the applied projects + +### Synopsis + +This command lists all the applied projects in the target backend and target workspace. + + By default list the projects in the current backend and current workspace. + +``` +kusion project list +``` + +### Examples + +``` + # List the applied project in the current backend and current workspace + kusion project list + + # List the applied project in a specified backend and current workspace + kusion project list --backend default + + # List the applied project in a specified backend and specified workspaces + kusion project list --backend default --workspace dev,default + + # List the applied project in a specified backend and all the workspaces + kusion project list --backend default --all +``` + +### Options + +``` + -a, --all List all the projects in all the workspaces + --backend string The backend to use, supports 'local', 'oss' and 's3' + -h, --help help for list + --workspace strings The name of the target workspace +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion project](kusion-project.md) - Project is a folder that contains a project.yaml file and is linked to a Git repository + +###### Auto generated by spf13/cobra on 26-Sep-2024 diff --git a/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-project.md b/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-project.md new file mode 100644 index 00000000..6866aedf --- /dev/null +++ b/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-project.md @@ -0,0 +1,34 @@ +# kusion project + +Project is a folder that contains a project.yaml file and is linked to a Git repository + +### Synopsis + +Project in Kusion is defined as any folder that contains a project.yaml file and is linked to a Git repository. + + Project organizes logical configurations for internal components to orchestrate the application and assembles them to suit different roles, such as developers and platform engineers. + +``` +kusion project [flags] +``` + +### Options + +``` + -h, --help help for project +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion](index.md) - Kusion is the Platform Orchestrator of Internal Developer Platform +* [kusion project create](kusion-project-create.md) - Create a new project +* [kusion project list](kusion-project-list.md) - List the applied projects + +###### Auto generated by spf13/cobra on 26-Sep-2024 diff --git a/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-release-list.md b/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-release-list.md new file mode 100644 index 00000000..36aafa28 --- /dev/null +++ b/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-release-list.md @@ -0,0 +1,45 @@ +# kusion release list + +List all releases of the current stack + +### Synopsis + +List all releases of the current stack. + + This command displays information about all releases of the current stack in the current or a specified workspace, including their revision, phase, and creation time. + +``` +kusion release list [flags] +``` + +### Examples + +``` + # List all releases of the current stack in current workspace + kusion release list + + # List all releases of the current stack in a specified workspace + kusion release list --workspace=dev +``` + +### Options + +``` + --backend string The backend to use, supports 'local', 'oss' and 's3'. + -h, --help help for list + -w, --workdir string The work directory to run Kusion CLI. + --workspace string The name of target workspace to operate in. +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion release](kusion-release.md) - Manage Kusion release files + +###### Auto generated by spf13/cobra on 26-Sep-2024 diff --git a/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-release-show.md b/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-release-show.md new file mode 100644 index 00000000..b5773b68 --- /dev/null +++ b/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-release-show.md @@ -0,0 +1,56 @@ +# kusion release show + +Show details of a release of the current or specified stack + +### Synopsis + +Show details of a release of the current or specified stack. + + This command displays detailed information about a release of the current project in the current or a specified workspace + +``` +kusion release show [flags] +``` + +### Examples + +``` + # Show details of the latest release of the current project in the current workspace + kusion release show + + # Show details of a specific release of the current project in the current workspace + kusion release show --revision=1 + + # Show details of a specific release of the specified project in the specified workspace + kusion release show --revision=1 --project=hoangndst --workspace=dev + + # Show details of the latest release with specified backend + kusion release show --backend=local + + # Show details of the latest release with specified output format + kusion release show --output=json +``` + +### Options + +``` + --backend string The backend to use, supports 'local', 'oss' and 's3' + -h, --help help for show + -o, --output string Specify the output format + --project string The project name + --revision uint The revision number of the release + --workspace string The workspace name +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion release](kusion-release.md) - Manage Kusion release files + +###### Auto generated by spf13/cobra on 26-Sep-2024 diff --git a/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-release-unlock.md b/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-release-unlock.md new file mode 100644 index 00000000..471fbafc --- /dev/null +++ b/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-release-unlock.md @@ -0,0 +1,47 @@ +# kusion release unlock + +Unlock the latest release file of the current stack + +### Synopsis + +Unlock the latest release file of the current stack. + + The phase of the latest release file of the current stack in the current or a specified workspace will be set to 'failed' if it was in the stages of 'generating', 'previewing', 'applying' or 'destroying'. + + Please note that using the 'kusion release unlock' command may cause unexpected concurrent read-write issues with release files, so please use it with caution. + +``` +kusion release unlock [flags] +``` + +### Examples + +``` + # Unlock the latest release file of the current stack in the current workspace. + kusion release unlock + + # Unlock the latest release file of the current stack in a specified workspace. + kusion release unlock --workspace=dev +``` + +### Options + +``` + --backend string The backend to use, supports 'local', 'oss' and 's3'. + -h, --help help for unlock + -w, --workdir string The work directory to run Kusion CLI. + --workspace string The name of target workspace to operate in. +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion release](kusion-release.md) - Manage Kusion release files + +###### Auto generated by spf13/cobra on 26-Sep-2024 diff --git a/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-release.md b/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-release.md new file mode 100644 index 00000000..47557d34 --- /dev/null +++ b/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-release.md @@ -0,0 +1,35 @@ +# kusion release + +Manage Kusion release files + +### Synopsis + +Commands for managing Kusion release files. + + These commands help you manage the lifecycle of Kusion release files. + +``` +kusion release +``` + +### Options + +``` + -h, --help help for release +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion](index.md) - Kusion is the Platform Orchestrator of Internal Developer Platform +* [kusion release list](kusion-release-list.md) - List all releases of the current stack +* [kusion release show](kusion-release-show.md) - Show details of a release of the current or specified stack +* [kusion release unlock](kusion-release-unlock.md) - Unlock the latest release file of the current stack + +###### Auto generated by spf13/cobra on 26-Sep-2024 diff --git a/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-resource-graph.md b/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-resource-graph.md new file mode 100644 index 00000000..d222e3c9 --- /dev/null +++ b/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-resource-graph.md @@ -0,0 +1,54 @@ +# kusion resource graph + +Display a graph of all the resources' information of the target project and target workspaces + +### Synopsis + +Display information of all the resources of a project. + + This command displays information of all the resources of a project in the current or specified workspaces. + +``` +kusion resource graph [flags] +``` + +### Examples + +``` + # Display information of all the resources of a project in the current workspace. + kusion resource graph --project quickstart + + # Display information of all the resources of a project in specified workspaces. + kusion resource graph --project quickstart --workspace=dev,default + + # Display information of all the resource of a project in all the workspaces that has been deployed. + kusion resource graph --project quickstart --all + kusion resource graph --project quickstart -a + + # Display information of all the resource of a project with in specified workspaces with json format result. + kusion resource graph --project quickstart --workspace dev -o json +``` + +### Options + +``` + -a, --all Display all the resources of all the workspaces + --backend string The backend to use, supports 'local', 'oss' and 's3' + -h, --help help for graph + -o, --output string Specify the output format, json only + --project string The name of the target project + --workspace strings The name of the target workspace +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion resource](kusion-resource.md) - Observe Kusion resource information + +###### Auto generated by spf13/cobra on 26-Sep-2024 diff --git a/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-resource.md b/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-resource.md new file mode 100644 index 00000000..9ac6ba50 --- /dev/null +++ b/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-resource.md @@ -0,0 +1,33 @@ +# kusion resource + +Observe Kusion resource information + +### Synopsis + +Commands for observing Kusion resources. + + These commands help you observe the information of Kusion resources within a project. + +``` +kusion resource +``` + +### Options + +``` + -h, --help help for resource +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion](index.md) - Kusion is the Platform Orchestrator of Internal Developer Platform +* [kusion resource graph](kusion-resource-graph.md) - Display a graph of all the resources' information of the target project and target workspaces + +###### Auto generated by spf13/cobra on 26-Sep-2024 diff --git a/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-stack-create.md b/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-stack-create.md new file mode 100644 index 00000000..ec0d0fef --- /dev/null +++ b/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-stack-create.md @@ -0,0 +1,49 @@ +# kusion stack create + +Create a new stack + +### Synopsis + +This command creates a new stack under the target directory which by default is the current working directory. + + The stack folder to be created contains 'stack.yaml', 'kcl.mod' and 'main.k' with the specified values. + + Note that the target directory needs to be a valid project directory with project.yaml file + +``` +kusion stack create +``` + +### Examples + +``` + # Create a new stack at current project directory + kusion stack create dev + + # Create a new stack in a specified target project directory + kusion stack create dev --target /dir/to/projects/my-project + + # Create a new stack copied from the referenced stack under the target project directory + kusion stack create prod --copy-from dev +``` + +### Options + +``` + --copy-from string specify the referenced stack path to copy from + -h, --help help for create + -t, --target string specify the target project directory +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion stack](kusion-stack.md) - Stack is a folder that contains a stack.yaml file within the corresponding project directory + +###### Auto generated by spf13/cobra on 26-Sep-2024 diff --git a/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-stack.md b/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-stack.md new file mode 100644 index 00000000..41fcbef2 --- /dev/null +++ b/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-stack.md @@ -0,0 +1,33 @@ +# kusion stack + +Stack is a folder that contains a stack.yaml file within the corresponding project directory + +### Synopsis + +Stack in Kusion is defined as any folder that contains a stack.yaml file within the corresponding project directory. + + A stack provides a mechanism to isolate multiple deployments of the same application, serving with the target workspace to which an application will be deployed. + +``` +kusion stack [flags] +``` + +### Options + +``` + -h, --help help for stack +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion](index.md) - Kusion is the Platform Orchestrator of Internal Developer Platform +* [kusion stack create](kusion-stack-create.md) - Create a new stack + +###### Auto generated by spf13/cobra on 26-Sep-2024 diff --git a/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-version.md b/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-version.md new file mode 100644 index 00000000..d2011a26 --- /dev/null +++ b/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-version.md @@ -0,0 +1,38 @@ +# kusion version + +Print the Kusion version information for the current context + +### Synopsis + +Print the Kusion version information for the current context + +``` +kusion version [flags] +``` + +### Examples + +``` + # Print the Kusion version + kusion version +``` + +### Options + +``` + -h, --help help for version + -o, --output string Output format. Only json format is supported for now +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion](index.md) - Kusion is the Platform Orchestrator of Internal Developer Platform + +###### Auto generated by spf13/cobra on 26-Sep-2024 diff --git a/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-workspace-create.md b/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-workspace-create.md new file mode 100644 index 00000000..473570e5 --- /dev/null +++ b/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-workspace-create.md @@ -0,0 +1,46 @@ +# kusion workspace create + +Create a new workspace + +### Synopsis + +This command creates a workspace with specified name and configuration file, where the file must be in the YAML format. + +``` +kusion workspace create +``` + +### Examples + +``` + # Create a workspace + kusion workspace create dev -f dev.yaml + + # Create a workspace and set as current + kusion workspace create dev -f dev.yaml --current + + # Create a workspace in a specified backend + kusion workspace create prod -f prod.yaml --backend oss-prod +``` + +### Options + +``` + --backend string the backend name + --current set the creating workspace as current + -f, --file string the path of workspace configuration file + -h, --help help for create +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion workspace](kusion-workspace.md) - Workspace is a logical concept representing a target that stacks will be deployed to + +###### Auto generated by spf13/cobra on 26-Sep-2024 diff --git a/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-workspace-delete.md b/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-workspace-delete.md new file mode 100644 index 00000000..a465fe4a --- /dev/null +++ b/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-workspace-delete.md @@ -0,0 +1,44 @@ +# kusion workspace delete + +Delete a workspace + +### Synopsis + +This command deletes the current or a specified workspace. + +``` +kusion workspace delete +``` + +### Examples + +``` + # Delete the current workspace + kusion workspace delete + + # Delete a specified workspace + kusion workspace delete dev + + # Delete a specified workspace in a specified backend + kusion workspace delete prod --backend oss-prod +``` + +### Options + +``` + --backend string the backend name + -h, --help help for delete +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion workspace](kusion-workspace.md) - Workspace is a logical concept representing a target that stacks will be deployed to + +###### Auto generated by spf13/cobra on 26-Sep-2024 diff --git a/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-workspace-list.md b/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-workspace-list.md new file mode 100644 index 00000000..97787e16 --- /dev/null +++ b/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-workspace-list.md @@ -0,0 +1,41 @@ +# kusion workspace list + +List all workspace names + +### Synopsis + +This command list the names of all workspaces. + +``` +kusion workspace list +``` + +### Examples + +``` + # List all workspace names + kusion workspace list + + # List all workspace names in a specified backend + kusion workspace list --backend oss-prod +``` + +### Options + +``` + --backend string the backend name + -h, --help help for list +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion workspace](kusion-workspace.md) - Workspace is a logical concept representing a target that stacks will be deployed to + +###### Auto generated by spf13/cobra on 26-Sep-2024 diff --git a/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-workspace-show.md b/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-workspace-show.md new file mode 100644 index 00000000..dab0bcbd --- /dev/null +++ b/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-workspace-show.md @@ -0,0 +1,44 @@ +# kusion workspace show + +Show a workspace configuration + +### Synopsis + +This command gets the current or a specified workspace configuration. + +``` +kusion workspace show +``` + +### Examples + +``` + # Show current workspace configuration + kusion workspace show + + # Show a specified workspace configuration + kusion workspace show dev + + # Show a specified workspace in a specified backend + kusion workspace show prod --backend oss-prod +``` + +### Options + +``` + --backend string the backend name + -h, --help help for show +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion workspace](kusion-workspace.md) - Workspace is a logical concept representing a target that stacks will be deployed to + +###### Auto generated by spf13/cobra on 26-Sep-2024 diff --git a/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-workspace-switch.md b/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-workspace-switch.md new file mode 100644 index 00000000..111127a4 --- /dev/null +++ b/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-workspace-switch.md @@ -0,0 +1,41 @@ +# kusion workspace switch + +Switch the current workspace + +### Synopsis + +This command switches the workspace, where the workspace must be created. + +``` +kusion workspace switch +``` + +### Examples + +``` + # Switch the current workspace + kusion workspace switch dev + + # Switch the current workspace in a specified backend + kusion workspace switch prod --backend oss-prod +``` + +### Options + +``` + --backend string the backend name + -h, --help help for switch +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion workspace](kusion-workspace.md) - Workspace is a logical concept representing a target that stacks will be deployed to + +###### Auto generated by spf13/cobra on 26-Sep-2024 diff --git a/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-workspace-update.md b/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-workspace-update.md new file mode 100644 index 00000000..f868cc86 --- /dev/null +++ b/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-workspace-update.md @@ -0,0 +1,46 @@ +# kusion workspace update + +Update a workspace configuration + +### Synopsis + +This command updates a workspace configuration with specified configuration file, where the file must be in the YAML format. + +``` +kusion workspace update +``` + +### Examples + +``` + # Update the current workspace + kusion workspace update -f dev.yaml + + # Update a specified workspace and set as current + kusion workspace update dev -f dev.yaml --current + + # Update a specified workspace in a specified backend + kusion workspace update prod -f prod.yaml --backend oss-prod +``` + +### Options + +``` + --backend string the backend name + --current set the creating workspace as current + -f, --file string the path of workspace configuration file + -h, --help help for update +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion workspace](kusion-workspace.md) - Workspace is a logical concept representing a target that stacks will be deployed to + +###### Auto generated by spf13/cobra on 26-Sep-2024 diff --git a/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-workspace.md b/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-workspace.md new file mode 100644 index 00000000..a3eba89d --- /dev/null +++ b/docs_versioned_docs/version-v0.13/6-reference/1-commands/kusion-workspace.md @@ -0,0 +1,38 @@ +# kusion workspace + +Workspace is a logical concept representing a target that stacks will be deployed to + +### Synopsis + +Workspace is a logical concept representing a target that stacks will be deployed to. + + Workspace is managed by platform engineers, which contains a set of configurations that application developers do not want or should not concern, and is reused by multiple stacks belonging to different projects. + +``` +kusion workspace [flags] +``` + +### Options + +``` + -h, --help help for workspace +``` + +### Options inherited from parent commands + +``` + --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default "none") + --profile-output string Name of the file to write the profile to (default "profile.pprof") +``` + +### SEE ALSO + +* [kusion](index.md) - Kusion is the Platform Orchestrator of Internal Developer Platform +* [kusion workspace create](kusion-workspace-create.md) - Create a new workspace +* [kusion workspace delete](kusion-workspace-delete.md) - Delete a workspace +* [kusion workspace list](kusion-workspace-list.md) - List all workspace names +* [kusion workspace show](kusion-workspace-show.md) - Show a workspace configuration +* [kusion workspace switch](kusion-workspace-switch.md) - Switch the current workspace +* [kusion workspace update](kusion-workspace-update.md) - Update a workspace configuration + +###### Auto generated by spf13/cobra on 26-Sep-2024 diff --git a/docs_versioned_docs/version-v0.13/6-reference/2-modules/1-developer-schemas/_category_.json b/docs_versioned_docs/version-v0.13/6-reference/2-modules/1-developer-schemas/_category_.json new file mode 100644 index 00000000..0df3bade --- /dev/null +++ b/docs_versioned_docs/version-v0.13/6-reference/2-modules/1-developer-schemas/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Developer Schemas" +} \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.13/6-reference/2-modules/1-developer-schemas/app-configuration.md b/docs_versioned_docs/version-v0.13/6-reference/2-modules/1-developer-schemas/app-configuration.md new file mode 100644 index 00000000..6808bee7 --- /dev/null +++ b/docs_versioned_docs/version-v0.13/6-reference/2-modules/1-developer-schemas/app-configuration.md @@ -0,0 +1,35 @@ +# appconfiguration + +## Schema AppConfiguration + +AppConfiguration is a developer-centric definition that describes how to run an Application.
This application model builds upon a decade of experience at AntGroup running super large scale
internal developer platform, combined with best-of-breed ideas and practices from the community. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**accessories**|{str:any}|Accessories defines a collection of accessories that will be attached to the workload.|{}| +|**annotations**|{str:str}|Annotations are key/value pairs that attach arbitrary non-identifying metadata to resources.|{}| +|**labels**|{str:str}|Labels can be used to attach arbitrary metadata as key-value pairs to resources.|{}| +|**workload** `required`|[service.Service](workload/service#schema-service) \| [wl.Job](workload/job#schema-job) |Workload defines how to run your application code. Currently supported workload profile
includes Service and Job.|N/A| + +### Examples +```python +# Instantiate an App with a long-running service and its image is "nginx:v1" + +import kam as ac +import kam.workload as wl +import kam.workload.container as c + +helloworld : ac.AppConfiguration { + workload: service.Service { + containers: { + "nginx": c.Container { + image: "nginx:v1" + } + } + } +} +``` + + diff --git a/docs_versioned_docs/version-v0.13/6-reference/2-modules/1-developer-schemas/database/mysql.md b/docs_versioned_docs/version-v0.13/6-reference/2-modules/1-developer-schemas/database/mysql.md new file mode 100644 index 00000000..8f6135bb --- /dev/null +++ b/docs_versioned_docs/version-v0.13/6-reference/2-modules/1-developer-schemas/database/mysql.md @@ -0,0 +1,39 @@ +# mysql + +## Schema MySQL + +MySQL describes the attributes to locally deploy or create a cloud provider
managed mysql database instance for the workload. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**type** `required`|"local" | "cloud"|Type defines whether the mysql database is deployed locally or provided by
cloud vendor.|| +|**version** `required`|str|Version defines the mysql version to use.|| + +### Examples +```python +# Instantiate a local mysql database with version of 5.7. + +import mysql + +accessories: { + "mysql": mysql.MySQL { + type: "local" + version: "8.0" + } +} +``` + + +### Credentials and Connectivity + +For sensitive information such as the **host**, **username** and **password** for the database instance, Kusion will automatically inject them into the application container for users through environment variables. The relevant environment variables are listed in the table below. + +| Name | Explanation | +| ---- | ----------- | +| KUSION_DB\_HOST\_`` | Host address for accessing the database instance | +| KUSION_DB\_USERNAME\_`` | Account username for accessing the database instance | +| KUSION_DB\_PASSWORD\_`` | Account password for accessing the database instance | + +The `databaseName` can be declared in [workspace configs of mysql](../../2-workspace-configs/database/mysql.md), and Kusion will automatically concatenate the ``, ``, `` and `mysql` with `-` if not specified. When injecting the credentials into containers' environment variables, Kusion will convert the `databaseName` to uppercase, and replace `-` with `_`. diff --git a/docs_versioned_docs/version-v0.13/6-reference/2-modules/1-developer-schemas/database/postgres.md b/docs_versioned_docs/version-v0.13/6-reference/2-modules/1-developer-schemas/database/postgres.md new file mode 100644 index 00000000..ad8cbb7e --- /dev/null +++ b/docs_versioned_docs/version-v0.13/6-reference/2-modules/1-developer-schemas/database/postgres.md @@ -0,0 +1,39 @@ +# postgres + +## Schema PostgreSQL + +PostgreSQL describes the attributes to locally deploy or create a cloud provider
managed postgresql database instance for the workload. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**type** `required`|"local" | "cloud"|Type defines whether the postgresql database is deployed locally or provided by
cloud vendor.|| +|**version** `required`|str|Version defines the postgres version to use.|| + +### Examples +```python +#Instantiate a local postgresql database with image version of 14.0. + +import postgres as postgres + +accessories: { + "postgres": postgres.PostgreSQL { + type: "local" + version: "14.0" + } +} +``` + + +### Credentials and Connectivity + +For sensitive information such as the **host**, **username** and **password** for the database instance, Kusion will automatically inject them into the application container for users through environment variables. The relevant environment variables are listed in the table below. + +| Name | Explanation | +| ---- | ----------- | +| KUSION_DB\_HOST\_`` | Host address for accessing the database instance | +| KUSION_DB\_USERNAME\_`` | Account username for accessing the database instance | +| KUSION_DB\_PASSWORD\_`` | Account password for accessing the database instance | + +The `databaseName` can be declared in [workspace configs of postgres](../../2-workspace-configs/database/postgres.md), and Kusion will automatically concatenate the ``, ``, `` and `postgres` with `-` if not specified. When injecting the credentials into containers' environment variables, Kusion will convert the `databaseName` to uppercase, and replace `-` with `_`. diff --git a/docs_versioned_docs/version-v0.13/6-reference/2-modules/1-developer-schemas/inference/inference.md b/docs_versioned_docs/version-v0.13/6-reference/2-modules/1-developer-schemas/inference/inference.md new file mode 100644 index 00000000..4cdb853a --- /dev/null +++ b/docs_versioned_docs/version-v0.13/6-reference/2-modules/1-developer-schemas/inference/inference.md @@ -0,0 +1,50 @@ +# inference + +## Index + +- v1 + - [Inference](#inference) + +## Schemas + +### Inference + +Inference is a module schema consisting of model, framework and so on + +#### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**framework** `required`|"Ollama" \| "KubeRay"|The framework or environment in which the model operates.|| +|**model** `required`|str|The model name to be used for inference.|| +|**num_ctx**|int|The size of the context window used to generate the next token.|2048| +|**num_predict**|int|Maximum number of tokens to predict when generating text.|128| +|**system**|str|The system message, which will be set in the template.|""| +|**temperature**|float|A parameter determines whether the model's output is more random and creative or more predictable.|0.8| +|**template**|str|The full prompt template, which will be sent to the model.|""| +|**top_k**|int|A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.|40| +|**top_p**|float|A higher value (e.g. 0.9) will give more diverse answers, while a lower value (e.g. 0.5) will be more conservative.|0.9| +#### Examples + +``` +import inference.v1.infer + +accessories: { + "inference@v0.1.0": infer.Inference { + model: "llama3" + framework: "Ollama" + + system: "You are Mario from super mario bros, acting as an assistant." + template: "{{ if .System }}<|im_start|>system {{ .System }}<|im_end|> {{ end }}{{ if .Prompt }}<|im_start|>user {{ .Prompt }}<|im_end|> {{ end }}<|im_start|>assistant" + + top_k: 40 + top_p: 0.9 + temperature: 0.8 + + num_predict: 128 + num_ctx: 2048 + } +} +``` + + diff --git a/docs_versioned_docs/version-v0.13/6-reference/2-modules/1-developer-schemas/internal/common.md b/docs_versioned_docs/version-v0.13/6-reference/2-modules/1-developer-schemas/internal/common.md new file mode 100644 index 00000000..8b649196 --- /dev/null +++ b/docs_versioned_docs/version-v0.13/6-reference/2-modules/1-developer-schemas/internal/common.md @@ -0,0 +1,17 @@ +# common + +## Schema WorkloadBase + +WorkloadBase defines set of attributes shared by different workload profile, e.g Service
and Job. You can inherit this Schema to reuse these common attributes. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**annotations**|{str:str}|Annotations are key/value pairs that attach arbitrary non-identifying metadata to the workload.|| +|**containers** `required`|{str:}|Containers defines the templates of containers to be ran.
More info: https://kubernetes.io/docs/concepts/containers|| +|**labels**|{str:str}|Labels are key/value pairs that are attached to the workload.|| +|**replicas**|int|Number of container replicas based on this configuration that should be ran.|| +|**secrets**|{str:[s.Secret](#schema-secret)}|Secrets can be used to store small amount of sensitive data e.g. password, token.|| + + diff --git a/docs_versioned_docs/version-v0.13/6-reference/2-modules/1-developer-schemas/internal/container/container.md b/docs_versioned_docs/version-v0.13/6-reference/2-modules/1-developer-schemas/internal/container/container.md new file mode 100644 index 00000000..ce170fc6 --- /dev/null +++ b/docs_versioned_docs/version-v0.13/6-reference/2-modules/1-developer-schemas/internal/container/container.md @@ -0,0 +1,63 @@ +# container + +## Schema Container + +Container describes how the Application's tasks are expected to be run. Depending on
the replicas parameter 1 or more containers can be created from each template. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**args**|[str]|Arguments to the entrypoint.
Args will overwrite the CMD value set in the Dockfile, otherwise the Docker
image's CMD is used if this is not provided.|| +|**command**|[str]|Entrypoint array. Not executed within a shell.
Command will overwrite the ENTRYPOINT value set in the Dockfile, otherwise the Docker
image's ENTRYPOINT is used if this is not provided.|| +|**dirs**|{str:str}|Collection of volumes mount into the container's filesystem.
The dirs parameter is a dict with the key being the folder name in the container and the value
being the referenced volume.|| +|**env**|{str:str}|List of environment variables to set in the container.
The value of the environment variable may be static text or a value from a secret.|| +|**files**|{str:[FileSpec](#filespec)}|List of files to create in the container.
The files parameter is a dict with the key being the file name in the container and the value
being the target file specification.|| +|**image** `required`|str|Image refers to the Docker image name to run for this container.
More info: https://kubernetes.io/docs/concepts/containers/images|| +|**lifecycle**|[lc.Lifecycle](lifecycle/lifecycle.md#schema-lifecycle)|Lifecycle refers to actions that the management system should take in response to container lifecycle events.|| +|**livenessProbe**|[p.Probe](probe/probe.md#schema-probe)|LivenessProbe indicates if a running process is healthy.
Container will be restarted if the probe fails.|| +|**readinessProbe**|[p.Probe](probe/probe.md#schema-probe)|ReadinessProbe indicates whether an application is available to handle requests.|| +|**resources**|{str:str}|Map of resource requirements the container should run with.
The resources parameter is a dict with the key being the resource name and the value being
the resource value.|| +|**startupProbe**|[p.Probe](probe/probe.md#schema-probe)|StartupProbe indicates that the container has started for the first time.
Container will be restarted if the probe fails.|| +|**workingDir**|str|The working directory of the running process defined in entrypoint.
Default container runtime will be used if this is not specified.|| + +### Examples +```python +import kam.workload.container as c + +web = c.Container { + image: "nginx:latest" + command: ["/bin/sh", "-c", "echo hi"] + env: { + "name": "value" + } + resources: { + "cpu": "2" + "memory": "4Gi" + } +} +``` + +## Schema FileSpec + +FileSpec defines the target file in a Container. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**content**|str|File content in plain text.|| +|**contentFrom**|str|Source for the file content, reference to a secret of configmap value.|| +|**mode** `required`|str|Mode bits used to set permissions on this file, must be an octal value
between 0000 and 0777 or a decimal value between 0 and 511|"0644"| + +### Examples +```python +import kam.workload.container as c + +tmpFile = c.FileSpec { + content: "some file contents" + mode: "0777" +} +``` + + diff --git a/docs_versioned_docs/version-v0.13/6-reference/2-modules/1-developer-schemas/internal/container/lifecycle/lifecycle.md b/docs_versioned_docs/version-v0.13/6-reference/2-modules/1-developer-schemas/internal/container/lifecycle/lifecycle.md new file mode 100644 index 00000000..91123526 --- /dev/null +++ b/docs_versioned_docs/version-v0.13/6-reference/2-modules/1-developer-schemas/internal/container/lifecycle/lifecycle.md @@ -0,0 +1,29 @@ +# lifecycle + +## Schema Lifecycle + +Lifecycle describes actions that the management system should take in response
to container lifecycle events. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**postStart**| | |The action to be taken after a container is created.
More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks|| +|**preStop**| | |The action to be taken before a container is terminated due to an API request or
management event such as liveness/startup probe failure, preemption, resource contention, etc.
More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks|| + +### Examples +```python +import kam.workload.container.probe as p +import kam.workload.container.lifecycle as lc + +lifecycleHook = lc.Lifecycle { + preStop: p.Exec { + command: ["preStop.sh"] + } + postStart: p.Http { + url: "http://localhost:80" + } +} +``` + + diff --git a/docs_versioned_docs/version-v0.13/6-reference/2-modules/1-developer-schemas/internal/container/probe/probe.md b/docs_versioned_docs/version-v0.13/6-reference/2-modules/1-developer-schemas/internal/container/probe/probe.md new file mode 100644 index 00000000..64d709cd --- /dev/null +++ b/docs_versioned_docs/version-v0.13/6-reference/2-modules/1-developer-schemas/internal/container/probe/probe.md @@ -0,0 +1,92 @@ +# probe + +## Schema Probe + +Probe describes a health check to be performed against a container to determine whether it is
alive or ready to receive traffic. There are three probe types: readiness, liveness, and startup. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**failureThreshold**|int|Minimum consecutive failures for the probe to be considered failed after having succeeded.|| +|**initialDelaySeconds**|int|The number of seconds before health checking is activated.
More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes|| +|**periodSeconds**|int|How often (in seconds) to perform the probe.|| +|**probeHandler** `required`|[Exec](#exec) | [Http](#http) | [Tcp](#tcp)|The action taken to determine the alive or health of a container|| +|**successThreshold**|int|Minimum consecutive successes for the probe to be considered successful after having failed.|| +|**terminationGracePeriod**|int|Duration in seconds before terminate gracefully upon probe failure.|| +|**timeoutSeconds**|int|The number of seconds after which the probe times out.
More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes|| + +### Examples +```python +import kam.workload.container.probe as p + +probe = p.Probe { + probeHandler: p.Http { + path: "/healthz" + } + initialDelaySeconds: 10 +} +``` + +## Schema Exec + +Exec describes a "run in container" action. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**command** `required`|[str]|The command line to execute inside the container.|| + +### Examples +```python +import kam.workload.container.probe as p + +execProbe = p.Exec { + command: ["probe.sh"] +} +``` + +## Schema Http + +Http describes an action based on HTTP Get requests. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**headers**|{str:str}|Collection of custom headers to set in the request|| +|**url** `required`|str|The full qualified url to send HTTP requests.|| + +### Examples +```python +import kam.workload.container.probe as p + +httpProbe = p.Http { + url: "http://localhost:80" + headers: { + "X-HEADER": "VALUE" + } +} +``` + +## Schema Tcp + +Tcp describes an action based on opening a socket. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**url** `required`|str|The full qualified url to open a socket.|| + +### Examples +```python +import kam.workload.container.probe as p + +tcpProbe = p.Tcp { + url: "tcp://localhost:1234" +} +``` + + diff --git a/docs_versioned_docs/version-v0.13/6-reference/2-modules/1-developer-schemas/internal/secret/secret.md b/docs_versioned_docs/version-v0.13/6-reference/2-modules/1-developer-schemas/internal/secret/secret.md new file mode 100644 index 00000000..1f13bb85 --- /dev/null +++ b/docs_versioned_docs/version-v0.13/6-reference/2-modules/1-developer-schemas/internal/secret/secret.md @@ -0,0 +1,29 @@ +# secret + +## Schema Secret + +Secrets are used to provide data that is considered sensitive like passwords, API keys,
TLS certificates, tokens or other credentials. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**data**|{str:str}|Data contains the non-binary secret data in string form.|| +|**immutable**|bool|Immutable, if set to true, ensures that data stored in the Secret cannot be updated.|| +|**params**|{str:str}|Collection of parameters used to facilitate programmatic handling of secret data.|| +|**type** `required`|"basic" | "token" | "opaque" | "certificate" | "external"|Type of secret, used to facilitate programmatic handling of secret data.|| + +### Examples +```python +import kam.workload.secret as sec + +basicAuth = sec.Secret { + type: "basic" + data: { + "username": "" + "password": "" + } +} +``` + + diff --git a/docs_versioned_docs/version-v0.13/6-reference/2-modules/1-developer-schemas/k8s_manifest/k8s_manifest.md b/docs_versioned_docs/version-v0.13/6-reference/2-modules/1-developer-schemas/k8s_manifest/k8s_manifest.md new file mode 100644 index 00000000..3e749af9 --- /dev/null +++ b/docs_versioned_docs/version-v0.13/6-reference/2-modules/1-developer-schemas/k8s_manifest/k8s_manifest.md @@ -0,0 +1,30 @@ +# k8s_manifest + +## Schema K8sManifest + +K8sManifest defines the paths of the YAML files, or the directories of the raw Kubernetes manifests, which will be jointly appended to the Resources of Spec. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**paths** `required`|[str]|The paths of the YAML files, or the directories of the raw Kubernetes manifests.|| + +### Examples + +``` +import k8s_manifest + +accessories: { + "k8s_manifest": k8s_manifest.K8sManifest { + paths: [ + # The path of a YAML file. + "/path/to/my/k8s_manifest.yaml", + # The path of a directory containing K8s manifests. + "/dir/to/my/k8s_manifests" + ] + } +} +``` + + diff --git a/docs_versioned_docs/version-v0.13/6-reference/2-modules/1-developer-schemas/monitoring/prometheus.md b/docs_versioned_docs/version-v0.13/6-reference/2-modules/1-developer-schemas/monitoring/prometheus.md new file mode 100644 index 00000000..bf2e551e --- /dev/null +++ b/docs_versioned_docs/version-v0.13/6-reference/2-modules/1-developer-schemas/monitoring/prometheus.md @@ -0,0 +1,24 @@ +# prometheus + +## Schema Prometheus + +Prometheus can be used to define monitoring requirements + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**path**|str|The path to scrape metrics from.|"/metrics"| +|**port**|str|The port to scrape metrics from. When using Prometheus operator, this needs to be the port NAME. Otherwise, this can be a port name or a number.|container ports when scraping pod (monitorType is pod) and service port when scraping service (monitorType is service)| + +### Examples +```python +import monitoring as m + +"monitoring": m.Prometheus { + path: "/metrics" + port: "web" +} +``` + + diff --git a/docs_versioned_docs/version-v0.13/6-reference/2-modules/1-developer-schemas/network/network.md b/docs_versioned_docs/version-v0.13/6-reference/2-modules/1-developer-schemas/network/network.md new file mode 100644 index 00000000..daa33121 --- /dev/null +++ b/docs_versioned_docs/version-v0.13/6-reference/2-modules/1-developer-schemas/network/network.md @@ -0,0 +1,51 @@ +# network + +## Schema Network + +Network defines the exposed port of Service, which can be used to describe how the Service
get accessed. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**ports**|[[Port](#schema-port)]|The list of ports which the Workload should get exposed.|| + +### Examples +```python +import network as n + +"network": n.Network { + ports: [ + n.Port { + port: 80 + public: True + } + ] +} +``` + +## Schema Port + +Port defines the exposed port of Workload, which can be used to describe how the Workload get accessed. + +| name | type | description | default value | +| --- | --- | --- | --- | +|**port** `required`|int|The exposed port of the Workload.|80| +|**protocol** `required`|"TCP" | "UDP"|The protocol to access the port.|"TCP"| +|**public** `required`|bool|Public defines whether the port can be accessed through Internet.|False| +|**targetPort**|int|The backend container port. If empty, set it the same as the port.|| + +### Examples + +```python +import network as n + +port = n.Port { + port: 80 + targetPort: 8080 + protocol: "TCP" + public: True +} +``` + + diff --git a/docs_versioned_docs/version-v0.13/6-reference/2-modules/1-developer-schemas/opsrule/opsrule.md b/docs_versioned_docs/version-v0.13/6-reference/2-modules/1-developer-schemas/opsrule/opsrule.md new file mode 100644 index 00000000..8313090a --- /dev/null +++ b/docs_versioned_docs/version-v0.13/6-reference/2-modules/1-developer-schemas/opsrule/opsrule.md @@ -0,0 +1,35 @@ +# opsrule + +## Schema OpsRule + +OpsRule describes operation rules for various Day-2 Operations. Once declared, these
operation rules will be checked before any Day-2 operations. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**maxUnavailable**|int | str|The maximum percentage of the total pod instances in the component that can be
simultaneously unhealthy.|"25%"| + +```python +import opsrule as o +import kam.v1.app_configuration +import kam.v1.workload as wl +import kam.v1.workload.container as c + +helloworld : ac.AppConfiguration { + workload: service.Service { + containers: { + "nginx": c.Container { + image: "nginx:v1" + } + } + } + accessories: { + "opsrule" : o.OpsRule { + maxUnavailable: "30%" + } + } +} +``` + + diff --git a/docs_versioned_docs/version-v0.13/6-reference/2-modules/1-developer-schemas/workload/job.md b/docs_versioned_docs/version-v0.13/6-reference/2-modules/1-developer-schemas/workload/job.md new file mode 100644 index 00000000..52194488 --- /dev/null +++ b/docs_versioned_docs/version-v0.13/6-reference/2-modules/1-developer-schemas/workload/job.md @@ -0,0 +1,251 @@ +# job + +## Schemas +- [Job](#schema-job) + - [Container](#schema-container) + - [Filespec](#schema-filespec) + - [LifeCycle](#schema-lifecycle) + - [Probe](#schema-probe) + - [Exec](#schema-exec) + - [Http](#schema-http) + - [Tcp](#schema-tcp) + - [Secret](#schema-secret) + +## Schema Job + +Job is a kind of workload profile that describes how to run your application code. This
is typically used for tasks that take from a few seconds to a few days to complete. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**annotations**|{str:str}|Annotations are key/value pairs that attach arbitrary non-identifying metadata to the workload.|| +|**containers** `required`|{str:[Container](../internal/container#schema-container)}|Containers defines the templates of containers to be ran.
More info: https://kubernetes.io/docs/concepts/containers|| +|**labels**|{str:str}|Labels are key/value pairs that are attached to the workload.|| +|**replicas**|int|Number of container replicas based on this configuration that should be ran.|| +|**schedule** `required`|str|The scheduling strategy in Cron format. More info: https://en.wikipedia.org/wiki/Cron.|| +|**secrets**|{str:[Secret](../internal/secret/secret.md#schema-secret)}|Secrets can be used to store small amount of sensitive data e.g. password, token.|| + +### Examples +```python +# Instantiate a job with busybox image and runs every hour + +import kam.workload as wl +import kam.workload.container as c + +echoJob : wl.Job { + containers: { + "busybox": c.Container{ + image: "busybox:1.28" + command: ["/bin/sh", "-c", "echo hello"] + } + } + schedule: "0 * * * *" +} +``` + +### Base Schema +[WorkloadBase](../internal/common#schema-workloadbase) + +## Schema Container + +Container describes how the Application's tasks are expected to be run. Depending on
the replicas parameter 1 or more containers can be created from each template. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**args**|[str]|Arguments to the entrypoint.
Args will overwrite the CMD value set in the Dockfile, otherwise the Docker
image's CMD is used if this is not provided.|| +|**command**|[str]|Entrypoint array. Not executed within a shell.
Command will overwrite the ENTRYPOINT value set in the Dockfile, otherwise the Docker
image's ENTRYPOINT is used if this is not provided.|| +|**dirs**|{str:str}|Collection of volumes mount into the container's filesystem.
The dirs parameter is a dict with the key being the folder name in the container and the value
being the referenced volume.|| +|**env**|{str:str}|List of environment variables to set in the container.
The value of the environment variable may be static text or a value from a secret.|| +|**files**|{str:[FileSpec](#filespec)}|List of files to create in the container.
The files parameter is a dict with the key being the file name in the container and the value
being the target file specification.|| +|**image** `required`|str|Image refers to the Docker image name to run for this container.
More info: https://kubernetes.io/docs/concepts/containers/images|| +|**lifecycle**|[lc.Lifecycle](../internal/container/lifecycle/lifecycle.md#schema-lifecycle)|Lifecycle refers to actions that the management system should take in response to container lifecycle events.|| +|**livenessProbe**|[p.Probe](../internal/container/probe/probe.md#schema-probe)|LivenessProbe indicates if a running process is healthy.
Container will be restarted if the probe fails.|| +|**readinessProbe**|[p.Probe](../internal/container/probe/probe.md#schema-probe)|ReadinessProbe indicates whether an application is available to handle requests.|| +|**resources**|{str:str}|Map of resource requirements the container should run with.
The resources parameter is a dict with the key being the resource name and the value being
the resource value.|| +|**startupProbe**|[p.Probe](../internal/container/probe/probe.md#schema-probe)|StartupProbe indicates that the container has started for the first time.
Container will be restarted if the probe fails.|| +|**workingDir**|str|The working directory of the running process defined in entrypoint.
Default container runtime will be used if this is not specified.|| + +### Examples +```python +import kam.workload.container as c + +web = c.Container { + image: "nginx:latest" + command: ["/bin/sh", "-c", "echo hi"] + env: { + "name": "value" + } + resources: { + "cpu": "2" + "memory": "4Gi" + } +} +``` + +## Schema FileSpec + +FileSpec defines the target file in a Container. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**content**|str|File content in plain text.|| +|**contentFrom**|str|Source for the file content, reference to a secret of configmap value.|| +|**mode** `required`|str|Mode bits used to set permissions on this file, must be an octal value
between 0000 and 0777 or a decimal value between 0 and 511|"0644"| + +### Examples +```python +import kam.workload.container as c + +tmpFile = c.FileSpec { + content: "some file contents" + mode: "0777" +} +``` + +### Schema Lifecycle + +Lifecycle describes actions that the management system should take in response to container lifecycle events. + +#### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**postStart**| | |The action to be taken after a container is created.
More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks|| +|**preStop**| | |The action to be taken before a container is terminated due to an API request or
management event such as liveness/startup probe failure, preemption, resource contention, etc.
More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks|| +#### Examples + +``` +import kam.workload.container.probe as p +import kam.workload.container.lifecycle as lc + +lifecycleHook = lc.Lifecycle { + preStop: p.Exec { + command: ["preStop.sh"] + } + postStart: p.Http { + url: "http://localhost:80" + } +} +``` + +### Schema Exec + +Exec describes a "run in container" action. + +#### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**command** `required`|[str]|The command line to execute inside the container.|| +#### Examples + +``` +import kam.workload.container.probe as p + +execProbe = p.Exec { + command: ["probe.sh"] +} +``` + +### Schema Http + +Http describes an action based on HTTP Get requests. + +#### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**headers**|{str:str}|Collection of custom headers to set in the request|| +|**url** `required`|str|The full qualified url to send HTTP requests.|| +#### Examples + +``` +import kam.workload.container.probe as p + +httpProbe = p.Http { + url: "http://localhost:80" + headers: { + "X-HEADER": "VALUE" + } +} +``` + +### Schema Probe + +Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic. There are three probe types: readiness, liveness, and startup. + +#### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**failureThreshold**|int|Minimum consecutive failures for the probe to be considered failed after having succeeded.|| +|**initialDelaySeconds**|int|The number of seconds before health checking is activated.
More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes|| +|**periodSeconds**|int|How often (in seconds) to perform the probe.|| +|**probeHandler** `required`|[Exec](#exec) | [Http](#http) | [Tcp](#tcp)|The action taken to determine the alive or health of a container|| +|**successThreshold**|int|Minimum consecutive successes for the probe to be considered successful after having failed.|| +|**terminationGracePeriod**|int|Duration in seconds before terminate gracefully upon probe failure.|| +|**timeoutSeconds**|int|The number of seconds after which the probe times out.
More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes|| +#### Examples + +``` +import kam.workload.container.probe as p + +probe = p.Probe { + probeHandler: p.Http { + path: "/healthz" + } + initialDelaySeconds: 10 +} +``` + +### Schema Tcp + +Tcp describes an action based on opening a socket. + +#### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**url** `required`|str|The full qualified url to open a socket.|| +#### Examples + +``` +import kam.workload.container.probe as p + +tcpProbe = p.Tcp { + url: "tcp://localhost:1234" +} +``` + +## Schema Secret + +Secret can be used to store sensitive data. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**data**|{str:str}|Data contains the non-binary secret data in string form.|| +|**immutable**|bool|Immutable, if set to true, ensures that data stored in the Secret cannot be updated.|| +|**params**|{str:str}|Collection of parameters used to facilitate programmatic handling of secret data.|| +|**type** `required`|"basic" | "token" | "opaque" | "certificate" | "external"|Type of secret, used to facilitate programmatic handling of secret data.|| + +### Examples +```python +import kam.workload.secret as sec + +basicAuth = sec.Secret { + type: "basic" + data: { + "username": "" + "password": "" + } +} +``` + + diff --git a/docs_versioned_docs/version-v0.13/6-reference/2-modules/1-developer-schemas/workload/service.md b/docs_versioned_docs/version-v0.13/6-reference/2-modules/1-developer-schemas/workload/service.md new file mode 100644 index 00000000..8dc74ccf --- /dev/null +++ b/docs_versioned_docs/version-v0.13/6-reference/2-modules/1-developer-schemas/workload/service.md @@ -0,0 +1,248 @@ +# service + +## Schemas +- [Service](#schema-service) + - [Container](#schema-container) + - [Filespec](#schema-filespec) + - [LifeCycle](#schema-lifecycle) + - [Probe](#schema-probe) + - [Exec](#schema-exec) + - [Http](#schema-http) + - [Tcp](#schema-tcp) + - [Secret](#schema-secret) + +## Schema Service + +Service is a kind of workload profile that describes how to run your application code. This
is typically used for long-running web applications that should "never" go down, and handle
short-lived latency-sensitive web requests, or events. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**annotations**|{str:str}|Annotations are key/value pairs that attach arbitrary non-identifying metadata to the workload.|| +|**containers** `required`|{str:}|Containers defines the templates of containers to be ran.
More info: https://kubernetes.io/docs/concepts/containers|| +|**labels**|{str:str}|Labels are key/value pairs that are attached to the workload.|| +|**replicas**|int|Number of container replicas based on this configuration that should be ran.|| +|**secrets**|{str:[Secret](../internal/secret/secret.md#schema-secret)}|Secrets can be used to store small amount of sensitive data e.g. password, token.|| + +### Examples +```python +# Instantiate a long-running service and its image is "nginx:v1" + +import kam.workload as wl +import kam.workload.container as c + +nginxSvc : service.Service { + containers: { + "nginx": c.Container { + image: "nginx:v1" + } + } +} +``` + +### Base Schema +[WorkloadBase](../internal/common#schema-workloadbase) + +## Schema Container + +Container describes how the Application's tasks are expected to be run. Depending on
the replicas parameter 1 or more containers can be created from each template. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**args**|[str]|Arguments to the entrypoint.
Args will overwrite the CMD value set in the Dockfile, otherwise the Docker
image's CMD is used if this is not provided.|| +|**command**|[str]|Entrypoint array. Not executed within a shell.
Command will overwrite the ENTRYPOINT value set in the Dockfile, otherwise the Docker
image's ENTRYPOINT is used if this is not provided.|| +|**dirs**|{str:str}|Collection of volumes mount into the container's filesystem.
The dirs parameter is a dict with the key being the folder name in the container and the value
being the referenced volume.|| +|**env**|{str:str}|List of environment variables to set in the container.
The value of the environment variable may be static text or a value from a secret.|| +|**files**|{str:[FileSpec](#filespec)}|List of files to create in the container.
The files parameter is a dict with the key being the file name in the container and the value
being the target file specification.|| +|**image** `required`|str|Image refers to the Docker image name to run for this container.
More info: https://kubernetes.io/docs/concepts/containers/images|| +|**lifecycle**|[lc.Lifecycle](../internal/container/lifecycle/lifecycle.md#schema-lifecycle)|Lifecycle refers to actions that the management system should take in response to container lifecycle events.|| +|**livenessProbe**|[p.Probe](../internal/container/probe/probe.md#schema-probe)|LivenessProbe indicates if a running process is healthy.
Container will be restarted if the probe fails.|| +|**readinessProbe**|[p.Probe](../internal/container/probe/probe.md#schema-probe)|ReadinessProbe indicates whether an application is available to handle requests.|| +|**resources**|{str:str}|Map of resource requirements the container should run with.
The resources parameter is a dict with the key being the resource name and the value being
the resource value.|| +|**startupProbe**|[p.Probe](../internal/container/probe/probe.md#schema-probe)|StartupProbe indicates that the container has started for the first time.
Container will be restarted if the probe fails.|| +|**workingDir**|str|The working directory of the running process defined in entrypoint.
Default container runtime will be used if this is not specified.|| + +### Examples +```python +import kam.workload.container as c + +web = c.Container { + image: "nginx:latest" + command: ["/bin/sh", "-c", "echo hi"] + env: { + "name": "value" + } + resources: { + "cpu": "2" + "memory": "4Gi" + } +} +``` + +## Schema FileSpec + +FileSpec defines the target file in a Container. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**content**|str|File content in plain text.|| +|**contentFrom**|str|Source for the file content, reference to a secret of configmap value.|| +|**mode** `required`|str|Mode bits used to set permissions on this file, must be an octal value
between 0000 and 0777 or a decimal value between 0 and 511|"0644"| + +### Examples +```python +import kam.workload.container as c + +tmpFile = c.FileSpec { + content: "some file contents" + mode: "0777" +} +``` + +### Schema Lifecycle + +Lifecycle describes actions that the management system should take in response to container lifecycle events. + +#### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**postStart**| | |The action to be taken after a container is created.
More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks|| +|**preStop**| | |The action to be taken before a container is terminated due to an API request or
management event such as liveness/startup probe failure, preemption, resource contention, etc.
More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks|| +#### Examples + +``` +import kam.workload.container.probe as p +import kam.workload.container.lifecycle as lc + +lifecycleHook = lc.Lifecycle { + preStop: p.Exec { + command: ["preStop.sh"] + } + postStart: p.Http { + url: "http://localhost:80" + } +} +``` + +### Schema Exec + +Exec describes a "run in container" action. + +#### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**command** `required`|[str]|The command line to execute inside the container.|| +#### Examples + +``` +import kam.workload.container.probe as p + +execProbe = p.Exec { + command: ["probe.sh"] +} +``` + +### Schema Http + +Http describes an action based on HTTP Get requests. + +#### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**headers**|{str:str}|Collection of custom headers to set in the request|| +|**url** `required`|str|The full qualified url to send HTTP requests.|| +#### Examples + +``` +import kam.workload.container.probe as p + +httpProbe = p.Http { + url: "http://localhost:80" + headers: { + "X-HEADER": "VALUE" + } +} +``` + +### Schema Probe + +Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic. There are three probe types: readiness, liveness, and startup. + +#### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**failureThreshold**|int|Minimum consecutive failures for the probe to be considered failed after having succeeded.|| +|**initialDelaySeconds**|int|The number of seconds before health checking is activated.
More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes|| +|**periodSeconds**|int|How often (in seconds) to perform the probe.|| +|**probeHandler** `required`|[Exec](#exec) | [Http](#http) | [Tcp](#tcp)|The action taken to determine the alive or health of a container|| +|**successThreshold**|int|Minimum consecutive successes for the probe to be considered successful after having failed.|| +|**terminationGracePeriod**|int|Duration in seconds before terminate gracefully upon probe failure.|| +|**timeoutSeconds**|int|The number of seconds after which the probe times out.
More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes|| +#### Examples + +``` +import kam.workload.container.probe as p + +probe = p.Probe { + probeHandler: p.Http { + path: "/healthz" + } + initialDelaySeconds: 10 +} +``` + +### Schema Tcp + +Tcp describes an action based on opening a socket. + +#### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**url** `required`|str|The full qualified url to open a socket.|| +#### Examples + +``` +import kam.workload.container.probe as p + +tcpProbe = p.Tcp { + url: "tcp://localhost:1234" +} +``` + +## Schema Secret + +Secret can be used to store sensitive data. + +### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**data**|{str:str}|Data contains the non-binary secret data in string form.|| +|**immutable**|bool|Immutable, if set to true, ensures that data stored in the Secret cannot be updated.|| +|**params**|{str:str}|Collection of parameters used to facilitate programmatic handling of secret data.|| +|**type** `required`|"basic" | "token" | "opaque" | "certificate" | "external"|Type of secret, used to facilitate programmatic handling of secret data.|| + +### Examples +```python +import kam.workload.secret as sec + +basicAuth = sec.Secret { + type: "basic" + data: { + "username": "" + "password": "" + } +} +``` + + diff --git a/docs_versioned_docs/version-v0.13/6-reference/2-modules/2-workspace-configs/_category_.json b/docs_versioned_docs/version-v0.13/6-reference/2-modules/2-workspace-configs/_category_.json new file mode 100644 index 00000000..81444988 --- /dev/null +++ b/docs_versioned_docs/version-v0.13/6-reference/2-modules/2-workspace-configs/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Workspace Configs" +} \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.13/6-reference/2-modules/2-workspace-configs/database/mysql.md b/docs_versioned_docs/version-v0.13/6-reference/2-modules/2-workspace-configs/database/mysql.md new file mode 100644 index 00000000..66225f5b --- /dev/null +++ b/docs_versioned_docs/version-v0.13/6-reference/2-modules/2-workspace-configs/database/mysql.md @@ -0,0 +1,52 @@ +# mysql + +## Module MySQL + +MySQL describes the attributes to locally deploy or create a cloud provider managed mysql database instance for the workload. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**cloud**
Cloud specifies the type of the cloud vendor. |"aws" \| "alicloud"|Undefined|**required**| +|**username**
Username specifies the operation account for the mysql database. |str|"root"|optional| +|**category**
Category specifies the edition of the mysql instance provided by the cloud vendor. |str|"Basic"|optional| +|**securityIPs**
SecurityIPs specifies the list of IP addresses allowed to access the mysql instance provided by the cloud vendor. |[str]|["0.0.0.0/0"]|optional| +|**privateRouting**
PrivateRouting specifies whether the host address of the cloud mysql instance for the workload to connect with is via public network or private network of the cloud vendor. |bool|true|optional| +|**size**
Size specifies the allocated storage size of the mysql instance. |int|10|optional| +|**subnetID**
SubnetID specifies the virtual subnet ID associated with the VPC that the cloud mysql instance will be created in. |str|Undefined|optional| +|**databaseName**
databaseName specifies the database name. |str|Undefined|optional| + +### Examples + +```yaml +# MySQL workspace configs for AWS RDS +modules: + mysql: + path: oci://ghcr.io/kusionstack/mysql + version: 0.2.0 + configs: + default: + cloud: aws + size: 20 + instanceType: db.t3.micro + privateRouting: false + databaseName: "my-mysql" +``` + +```yaml +# MySQL workspace configs for Alicloud RDS +modules: + mysql: + path: oci://ghcr.io/kusionstack/mysql + version: 0.2.0 + configs: + default: + cloud: alicloud + size: 20 + instanceType: mysql.n2.serverless.1c + category: serverless_basic + privateRouting: false + subnetID: [your-subnet-id] + databaseName: "my-mysql" +``` \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.13/6-reference/2-modules/2-workspace-configs/database/postgres.md b/docs_versioned_docs/version-v0.13/6-reference/2-modules/2-workspace-configs/database/postgres.md new file mode 100644 index 00000000..aed20616 --- /dev/null +++ b/docs_versioned_docs/version-v0.13/6-reference/2-modules/2-workspace-configs/database/postgres.md @@ -0,0 +1,55 @@ +# postgres + +## Module PostgreSQL + +PostgreSQL describes the attributes to locally deploy or create a cloud provider managed postgres database instance for the workload. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**cloud**
Cloud specifies the type of the cloud vendor. |"aws" \| "alicloud"|Undefined|**required**| +|**username**
Username specifies the operation account for the postgres database. |str|"root"|optional| +|**category**
Category specifies the edition of the postgres instance provided by the cloud vendor. |str|"Basic"|optional| +|**securityIPs**
SecurityIPs specifies the list of IP addresses allowed to access the postgres instance provided by the cloud vendor. |[str]|["0.0.0.0/0"]|optional| +|**privateRouting**
PrivateRouting specifies whether the host address of the cloud postgres instance for the workload to connect with is via public network or private network of the cloud vendor. |bool|true|optional| +|**size**
Size specifies the allocated storage size of the postgres instance. |int|10|optional| +|**subnetID**
SubnetID specifies the virtual subnet ID associated with the VPC that the cloud postgres instance will be created in. |str|Undefined|optional| +|**databaseName**
databaseName specifies the database name. |str|Undefined|optional| + +### Examples + +```yaml +# PostgreSQL workspace configs for AWS RDS +modules: + postgres: + path: oci://ghcr.io/kusionstack/postgres + version: 0.2.0 + configs: + default: + cloud: aws + size: 20 + instanceType: db.t3.micro + securityIPs: + - 0.0.0.0/0 + databaseName: "my-postgres" +``` + +```yaml +# PostgreSQL workspace configs for Alicloud RDS +modules: + postgres: + path: oci://ghcr.io/kusionstack/postgres + version: 0.2.0 + configs: + default: + cloud: alicloud + size: 20 + instanceType: pg.n2.serverless.1c + category: serverless_basic + privateRouting: false + subnetID: [your-subnet-id] + securityIPs: + - 0.0.0.0/0 + databaseName: "my-postgres" +``` \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.13/6-reference/2-modules/2-workspace-configs/inference/inference.md b/docs_versioned_docs/version-v0.13/6-reference/2-modules/2-workspace-configs/inference/inference.md new file mode 100644 index 00000000..4cdb853a --- /dev/null +++ b/docs_versioned_docs/version-v0.13/6-reference/2-modules/2-workspace-configs/inference/inference.md @@ -0,0 +1,50 @@ +# inference + +## Index + +- v1 + - [Inference](#inference) + +## Schemas + +### Inference + +Inference is a module schema consisting of model, framework and so on + +#### Attributes + +| name | type | description | default value | +| --- | --- | --- | --- | +|**framework** `required`|"Ollama" \| "KubeRay"|The framework or environment in which the model operates.|| +|**model** `required`|str|The model name to be used for inference.|| +|**num_ctx**|int|The size of the context window used to generate the next token.|2048| +|**num_predict**|int|Maximum number of tokens to predict when generating text.|128| +|**system**|str|The system message, which will be set in the template.|""| +|**temperature**|float|A parameter determines whether the model's output is more random and creative or more predictable.|0.8| +|**template**|str|The full prompt template, which will be sent to the model.|""| +|**top_k**|int|A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.|40| +|**top_p**|float|A higher value (e.g. 0.9) will give more diverse answers, while a lower value (e.g. 0.5) will be more conservative.|0.9| +#### Examples + +``` +import inference.v1.infer + +accessories: { + "inference@v0.1.0": infer.Inference { + model: "llama3" + framework: "Ollama" + + system: "You are Mario from super mario bros, acting as an assistant." + template: "{{ if .System }}<|im_start|>system {{ .System }}<|im_end|> {{ end }}{{ if .Prompt }}<|im_start|>user {{ .Prompt }}<|im_end|> {{ end }}<|im_start|>assistant" + + top_k: 40 + top_p: 0.9 + temperature: 0.8 + + num_predict: 128 + num_ctx: 2048 + } +} +``` + + diff --git a/docs_versioned_docs/version-v0.13/6-reference/2-modules/2-workspace-configs/k8s_manifest/k8s_manifest.md b/docs_versioned_docs/version-v0.13/6-reference/2-modules/2-workspace-configs/k8s_manifest/k8s_manifest.md new file mode 100644 index 00000000..ab960c65 --- /dev/null +++ b/docs_versioned_docs/version-v0.13/6-reference/2-modules/2-workspace-configs/k8s_manifest/k8s_manifest.md @@ -0,0 +1,25 @@ +# k8s_manifest + +## Module K8sManifest + +K8sManifest defines the paths of the YAML files, or the directories of the raw Kubernetes manifests, which will be jointly appended to the Resources of Spec. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**paths**
The paths of the YAML files, or the directories of the raw Kubernetes manifests. |[str]|Undefined|**optional**| + +### Examples + +```yaml +modules: + k8s_manifest: + path: oci://ghcr.io/kusionstack/k8s_manifest + version: 0.1.0 + configs: + default: + paths: + - /path/to/k8s_manifest.yaml + - /dir/to/k8s_manifest/ +``` \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.13/6-reference/2-modules/2-workspace-configs/monitoring/prometheus.md b/docs_versioned_docs/version-v0.13/6-reference/2-modules/2-workspace-configs/monitoring/prometheus.md new file mode 100644 index 00000000..55628423 --- /dev/null +++ b/docs_versioned_docs/version-v0.13/6-reference/2-modules/2-workspace-configs/monitoring/prometheus.md @@ -0,0 +1,43 @@ +# monitoring + +`monitoring` can be used to define workspace-level monitoring configurations. + +## Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**operatorMode**
Whether the Prometheus instance installed in the cluster runs as a Kubernetes operator or not. This determines the different kinds of resources Kusion manages.|true \| false|false|optional| +|**monitorType**
The kind of monitor to create. It only applies when operatorMode is set to True.|"Service" \| "Pod"|"Service"|optional| +|**interval**
The time interval which Prometheus scrapes metrics data. Only applicable when operator mode is set to true.
When operator mode is set to false, the scraping interval can only be set in the scraping job configuration, which kusion does not have permission to manage directly.|str|30s|optional| +|**timeout**
The timeout when Prometheus scrapes metrics data. Only applicable when operator mode is set to true.
When operator mode is set to false, the scraping timeout can only be set in the scraping job configuration, which kusion does not have permission to manage directly.|str|15s|optional| +|**scheme**
The scheme to scrape metrics from. Possible values are http and https.|"http" \| "https"|http|optional| + +### Examples +```yaml +modules: + monitoring: + path: oci://ghcr.io/kusionstack/monitoring + version: 0.2.0 + configs: + default: + operatorMode: True + monitorType: Pod + scheme: http + interval: 30s + timeout: 15s + low_frequency: + operatorMode: False + interval: 2m + timeout: 1m + projectSelector: + - foo + - bar + high_frequency: + monitorType: Service + interval: 10s + timeout: 5s + projectSelector: + - helloworld + - wordpress + - prometheus-sample-app +``` \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.13/6-reference/2-modules/2-workspace-configs/networking/network.md b/docs_versioned_docs/version-v0.13/6-reference/2-modules/2-workspace-configs/networking/network.md new file mode 100644 index 00000000..05609acc --- /dev/null +++ b/docs_versioned_docs/version-v0.13/6-reference/2-modules/2-workspace-configs/networking/network.md @@ -0,0 +1,26 @@ +# network + +`network` can be used to define workspace-level networking configurations. + +## Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**type**
The specific cloud vendor that provides load balancer.| "alicloud" \| "aws"|Undefined|**required**| +| **labels**
The attached labels of the port.|{str:str}|Undefined|optional| +| **annotations**
The attached annotations of the port.|{str:str}|Undefined|optional| + +### Examples + +```yaml +modules: + path: oci://ghcr.io/kusionstack/network + version: 0.2.0 + configs: + default: + type: alicloud + labels: + kusionstack.io/control: "true" + annotations: + service.beta.kubernetes.io/alibaba-cloud-loadbalancer-spec: slb.s1.small +``` \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.13/6-reference/2-modules/2-workspace-configs/opsrule/opsrule.md b/docs_versioned_docs/version-v0.13/6-reference/2-modules/2-workspace-configs/opsrule/opsrule.md new file mode 100644 index 00000000..0c3d29c1 --- /dev/null +++ b/docs_versioned_docs/version-v0.13/6-reference/2-modules/2-workspace-configs/opsrule/opsrule.md @@ -0,0 +1,22 @@ +# opsrule + +`opsrule` can be used to define workspace-level operational rule configurations. + +## Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**maxUnavailable**
The maximum percentage of the total pod instances in the component that can be
simultaneously unhealthy.|int \| str|Undefined|optional| + + +### Examples + +```yaml +modules: + opsrule: + path: oci://ghcr.io/kusionstack/opsrule + version: 0.2.0 + configs: + default: + maxUnavailable: "40%" +``` \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.13/6-reference/2-modules/2-workspace-configs/workload/job.md b/docs_versioned_docs/version-v0.13/6-reference/2-modules/2-workspace-configs/workload/job.md new file mode 100644 index 00000000..da659136 --- /dev/null +++ b/docs_versioned_docs/version-v0.13/6-reference/2-modules/2-workspace-configs/workload/job.md @@ -0,0 +1,26 @@ +# job + +`job` can be used to define workspace-level job configuration. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +| **replicas**
Number of container replicas based on this configuration that should be ran. |int|2| optional | +| **labels**
Labels are key/value pairs that are attached to the workload. |{str: str}|Undefined| optional | +| **annotations**
Annotations are key/value pairs that attach arbitrary non-identifying metadata to the workload. |{str: str}|Undefined| optional | + +### Examples +```yaml +modules: + job: + path: oci://ghcr.io/kusionstack/job + version: 0.1.0 + configs: + default: + replicas: 3 + labels: + label-key: label-value + annotations: + annotation-key: annotation-value +``` \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.13/6-reference/2-modules/2-workspace-configs/workload/service.md b/docs_versioned_docs/version-v0.13/6-reference/2-modules/2-workspace-configs/workload/service.md new file mode 100644 index 00000000..9c76a44c --- /dev/null +++ b/docs_versioned_docs/version-v0.13/6-reference/2-modules/2-workspace-configs/workload/service.md @@ -0,0 +1,28 @@ +# service + +`service` can be used to define workspace-level service configuration. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +| **replicas**
Number of container replicas based on this configuration that should be ran. |int|2| optional | +| **labels**
Labels are key/value pairs that are attached to the workload. |{str: str}|Undefined| optional | +| **annotations**
Annotations are key/value pairs that attach arbitrary non-identifying metadata to the workload. |{str: str}|Undefined| optional | +| **type**
Type represents the type of workload used by this Service. Currently, it supports several
types, including Deployment and CollaSet. |"Deployment" \| "CollaSet"| Deployment |**required**| + +### Examples +```yaml +modules: + service: + path: oci://ghcr.io/kusionstack/service + version: 0.2.0 + configs: + default: + replicas: 3 + labels: + label-key: label-value + annotations: + annotation-key: annotation-value + type: CollaSet +``` \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.13/6-reference/2-modules/3-naming-conventions.md b/docs_versioned_docs/version-v0.13/6-reference/2-modules/3-naming-conventions.md new file mode 100644 index 00000000..ab7f668c --- /dev/null +++ b/docs_versioned_docs/version-v0.13/6-reference/2-modules/3-naming-conventions.md @@ -0,0 +1,34 @@ +--- +id: naming-conventions +sidebar_label: Resource Naming Conventions +--- + +# Resource Naming Conventions + +Kusion will automatically create Kubernetes or Terraform resources for the applications, many of which do not require users' awareness. This document will introduce the naming conventions for these related resources. + +## Kubernetes Resources + +Kusion adheres to specific rules when generating the Kubernetes resources for users' applications. The table below lists some common Kubernetes resource naming conventions. Note that `Namespace` can now be specified by users. + +| Resource | Concatenation Rule | Example ID | +| -------- | ------------------ | ---------- | +| Namespace | `` | v1:Namespace:wordpress-local-db | +| Deployment | ``-``-`` | apps/v1:Deployment:wordpress-local-db:wordpress-local-db-dev-wordpress | +| CronJob | ``-``-`` | batch/v1:CronJob:helloworld:helloworld-dev-helloworld | +| Service | ``-``-``-` or ` | v1:Service:helloworld:helloworld-dev-helloworld-public | + +## Terraform Resources + +Similarly, Kusion also adheres to specific naming conventions when generating the Terraform Resources. Some common resources are listed below. + +| Resource | Concatenation Rule | Example ID | +| -------- | ------------------ | ---------- | +| random_password | ``-`` | hashicorp:random:random_password:wordpress-db-mysql | +| aws_security_group | ``-`` | hashicorp:aws:aws_security_group:wordpress-db-mysql | +| aws_db_instance | `` | hashicorp:aws:aws_db_instance:wordpress-db | +| alicloud_db_instance | `` | aliyun:alicloud:alicloud_db_instance:wordpress-db | +| alicloud_db_connection | `` | aliyun:alicloud:alicloud_db_connection:wordpress | +| alicloud_rds_account | `` | aliyun:alicloud:alicloud_rds_account:wordpress | + +The `` is composed of two parts, one of which is the `key` of database declared in `AppConfiguration` and the other is the `suffix` declared in `workspace` configuration. Kusion will concatenate the database key and suffix, convert them to uppercase, and replace `-` with `_`. And the `` supported now includes `mysql` and `postgres`. diff --git a/docs_versioned_docs/version-v0.13/6-reference/2-modules/_category_.json b/docs_versioned_docs/version-v0.13/6-reference/2-modules/_category_.json new file mode 100644 index 00000000..4dadaa75 --- /dev/null +++ b/docs_versioned_docs/version-v0.13/6-reference/2-modules/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Kusion Modules" +} diff --git a/docs_versioned_docs/version-v0.13/6-reference/2-modules/index.md b/docs_versioned_docs/version-v0.13/6-reference/2-modules/index.md new file mode 100644 index 00000000..744892c4 --- /dev/null +++ b/docs_versioned_docs/version-v0.13/6-reference/2-modules/index.md @@ -0,0 +1,45 @@ +# Kusion Modules + +KusionStack presets application configuration models described by KCL, where the model is called **Kusion Model**. The GitHub repository [KusionStack/catalog](https://github.com/KusionStack/catalog) is used to store these models, which is known as **Kusion Model Library**. + +The original intention of designing Kusion Model is to enhance the efficiency and improve the experience of YAML users. Through the unified application model defined by code, abstract and encapsulate complex configuration items, omit repetitive and derivable configurations, and supplement with necessary verification logic. Only the necessary attributes get exposed, users get an out-of-the-box, easy-to-understand configuration interface, which reduces the difficulty and improves the reliability of the configuration work. + +Kusion Model Library currently provides the Kusion Model `AppConfiguration`. The design of `AppConfiguration` is developer-centric, based on Ant Group's decades of practice in building and managing hyperscale IDP (Internal Developer Platform), and the best practice of community. `AppConfiguration` describes the full lifecycle of an application. + +A simple example of using `AppConfiguration` to describe an application is as follows: + +```bash +wordpress: ac.AppConfiguration { + workload: service.Service { + containers: { + "wordpress": c.Container { + image: "wordpress:latest" + env: { + "WORDPRESS_DB_HOST": "secret://wordpress-db/hostAddress" + "WORDPRESS_DB_PASSWORD": "secret://wordpress-db/password" + } + resources: { + "cpu": "1" + "memory": "2Gi" + } + } + } + replicas: 2 + ports: [ + n.Port { + port: 80 + public: True + } + ] + } + + database: db.Database { + type: "alicloud" + engine: "MySQL" + version: "5.7" + size: 20 + instanceType: "mysql.n2.serverless.1c" + category: "serverless_basic" + } +} +``` \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.13/6-reference/3-roadmap.md b/docs_versioned_docs/version-v0.13/6-reference/3-roadmap.md new file mode 100644 index 00000000..f411009e --- /dev/null +++ b/docs_versioned_docs/version-v0.13/6-reference/3-roadmap.md @@ -0,0 +1,15 @@ +# Roadmap + +For a finer-grained view of our roadmap and what is being worked on for a release, please refer to the [Roadmap](https://github.com/orgs/KusionStack/projects/24) + +## Expand Kusion Module Ecosystem to meet more scenarios + +We plan to expand the range of Kusion modules. This includes not only cloud services but also popular cloud-native projects such as Prometheus, Backstage, Crossplane, etc. By leveraging the ecosystem of CNCF projects and Terraform providers, we aim to enrich the Kusion module ecosystem to meet more scenarios. + +## LLM (Large Language Models) Operation + +Kusion is essentially designed to tackle team collaboration challenges. The LLM operations also involve many collaborative tasks. We believe Kusion can boost the operational efficiency of LLM engineers in this setting as well. + +## Kusion Server + +Currently, Kusion is a command-line tool, which has its pros and cons. Through our discussions with community users, we‘ve discovered that some of them prefer a long-running service with a web portal. We’re planning to build this form of Kusion, and have already started developing some features. diff --git a/docs_versioned_docs/version-v0.13/6-reference/_category_.json b/docs_versioned_docs/version-v0.13/6-reference/_category_.json new file mode 100644 index 00000000..a3b4dd92 --- /dev/null +++ b/docs_versioned_docs/version-v0.13/6-reference/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Reference" +} diff --git a/docs_versioned_docs/version-v0.13/7-faq/1-install-error.md b/docs_versioned_docs/version-v0.13/7-faq/1-install-error.md new file mode 100644 index 00000000..a0fde76a --- /dev/null +++ b/docs_versioned_docs/version-v0.13/7-faq/1-install-error.md @@ -0,0 +1,39 @@ +--- +sidebar_position: 1 +--- + +# Installation + +## 1. Could not find `libintl.dylib` + +This problem is that some tools depends on the `Gettext` library, but macOS does not have this library by default. You can try to solve it in the following ways: + +1. (Skip this step for non-macOS m1) For macOS m1 operating system, make sure you have a homebrew arm64e-version installed in /opt/homebrew, otherwise install the arm version of brew with the following command + +``` +/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" +# add to path +export PATH=/opt/homebrew/bin:$PATH +``` + +2. `brew install gettext` +3. Make sure `libintl.8.dylib` exists in `/usr/local/opt/gettext/lib` directory +4. If brew is installed in another directory, the library can be created by copying it to the corresponding directory + +## 2. macOS system SSL related errors + +Openssl dylib library not found or SSL module is not available problem + +1. (Skip this step for non-macOS m1) For macOS m1 operating system, make sure you have a homebrew arm64e-version installed in /opt/homebrew, otherwise install the arm version of brew with the following command + +``` +/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" +# add to path +export PATH=/opt/homebrew/bin:$PATH +``` + +2. Install openssl (version 1.1) via brew + +``` +brew install openssl@1.1 +``` diff --git a/docs_versioned_docs/version-v0.13/7-faq/2-kcl.md b/docs_versioned_docs/version-v0.13/7-faq/2-kcl.md new file mode 100644 index 00000000..596aa881 --- /dev/null +++ b/docs_versioned_docs/version-v0.13/7-faq/2-kcl.md @@ -0,0 +1,7 @@ +--- +sidebar_position: 2 +--- + +# KCL + +Visit the [KCL website](https://kcl-lang.io/docs/user_docs/support/faq-kcl) for more documents. \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.13/7-faq/_category_.json b/docs_versioned_docs/version-v0.13/7-faq/_category_.json new file mode 100644 index 00000000..7c4b229f --- /dev/null +++ b/docs_versioned_docs/version-v0.13/7-faq/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "FAQ" +} diff --git a/docs_versioned_docs/version-v0.9/concepts/_category_.json b/docs_versioned_docs/version-v0.9/concepts/_category_.json new file mode 100644 index 00000000..1b300a64 --- /dev/null +++ b/docs_versioned_docs/version-v0.9/concepts/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Concepts", + "position": 3 +} diff --git a/docs_versioned_docs/version-v0.9/concepts/appconfiguration.md b/docs_versioned_docs/version-v0.9/concepts/appconfiguration.md new file mode 100644 index 00000000..907bb196 --- /dev/null +++ b/docs_versioned_docs/version-v0.9/concepts/appconfiguration.md @@ -0,0 +1,94 @@ +--- +sidebar_position: 4 +sidebar_label: AppConfiguration +--- + +# Declarative Application Configuration Model - AppConfiguration + +## Abstract + +Kusion consumes the declarative configuration that describes the application, and delivers intent to the target runtime including Kubernetes, clouds, or on-prem infrastructure. In order to do that, we need a declarative model that can best describe the intent for a given application. + +This design document elaborates on the core design considerations and a high-level architecture of the next-generation declarative application configuration model. The AppConfiguration model is expected to continuously iterate over time, with the purpose to better encapsulate the application configuration needs declaratively in its full lifecycle and reduce cognitive complexity as much as possible for the developers. + +AppConfiguration consists of multiple elements that each represents a significant portion in the application lifecycle, either the application workload itself, its dependencies, relevant deployment workflows or operational expectations. + +## Motivation + +In AntGroup, we have heavily invested in the efforts to enable application delivery on a massive scale. What we have observed in the past few years is a trend of continuous evolution of infrastructure complexity over time, as a result of the increasing business needs. + +We are motivated to find a new paradigm that highlights collaborations between different parts of the software organizations and enables self-service ability as much as possible to get to a mature level of standardization and efficiency in application delivery. + +The centerpiece of this paradigm is a consistent, comprehensive and declarative application model that captures the application needs in an intuitive and self-explanatory way. + +This paradigm will also require a core workflow that KusionStack advocates, an effort of retrofitting that workflow based on different organizational needs, a golden path that represents industry best practices, and a shift in organizational culture. We won't go into those details in this document. + +## Design + +### Core Principles + +#### Developer First + +The AppConfiguration model serves as the interface for the application developers. The model design should favor the perspective of application developers, rather than platform or infrastructure developers. The primary purpose of a unified and abstract application delivery model is to be able to define an application with concepts and semantics that are intuitive and easy for developers to understand, without the need for any advanced knowledge on infrastructure. The goal is to reduce the cognitive burden of application developers by hiding the increasing complexity of the underlying infrastructure, be it different clouds, runtimes, or product offerings. + +Developers should be able to describe an application as simple as "I want a database of type X and version Y to go along my application". + +#### Application-Centric + +In practice, the end-to-end delivery of a production-grade application typically involves more than provisioning the computing resource and bootstrapping the workload. It also includes managing a variety of dependent resources the application workload depends on, such as networking, storage, database, middleware, monitoring and alerting, etc. + +AppConfiguration proposes an application-centric approach, where the dependencies of an application can be kept together along with any operational (Day2) expectations. Everything the application needs to be production-available is captured inside a single, declarative source of truth centered around the AppConfiguration model. AppConfiguration should serve as the consistent and comprehensive abstraction of the application needs through its entire lifecycle. + +#### Platform Agnostic + +AppConfiguration should avoid locking into any specific tooling, technology stack or infrastructure providers. Kusion is built with the philosophy that benefits from an open and diverse ecosystem, where any infrastructure provider can be included in the form of plugins. + +The design of AppConfiguration should emphasize separation of concern between the roles that write application business logic and those that manage platform level configurations. In the context of using public cloud, the AppConfiguration model should support multi-cloud deployment out-of-the-box. The configurations should be "Write Once Deploy Everywhere". + +### Model Architecture + +The AppConfiguration model consolidates all the necessary components and their dependent accessories for the application deployment, along with any workflow, policy and operational requirements into one standardized, infrastructure-independent declarative specification. This declarative specification represents the intuitive user intent for the application, which drives a standardized and efficient application delivery and operation process in a hybrid environment. This enables application developers the ability to self-service based on concepts and semantics that are intuitive and self-explanatory. + +![appconfig.png](/img/docs/concept/appconfig.png) + +AppConfiguration consists of five core concepts, namely `Components`, `Topologies`, `Pipeline`, `PolicySets`, and `Dependency`. We will walk through these concepts one by one. + +### Core Concepts + +#### Component + +`Components` defines the foundation of any application configuration. Generally speaking, we believe that a comprehensive application description should at least consist of a core deployable workload that is frequently iterated and a collection of any other core services that the workload depends on, such as databases, caches or any other cloud services. + +Components are conceptually split into two categories, `Workload` and `Accessories`. The former revolves around the configuration for the computing resource. The latter represents any third-party runtime capabilities and operational requirements that the application needs. Each AppConfiguration consists of exactly one workload and any number of accessories. + +Simply put, we can define `Components` with the following expression: + +`Components = Workload + Accessories` + +The concept of `Components` and `Accessories` itself is implicit when [authoring the configuration files](../config-walkthrough/overview). You can define the workload and any type of accessories (such as database or monitoring) directly under the AppConfiguration model. + +From a collaboration perspective, platform developers and SREs are responsible for continuously adding any new schema (as abstractions for the underlying infrastructure) and implementations that can be used out-of-the-box. Application developers SREs should be able to leverage the corresponding schemas to cover the evolving application needs. This helps software organizations achieve separation of concern, so that different roles can focus on the subject matter they are an expert of. + +#### Pipeline + +In most of the cases, the platform is capable of providing a consistent application delivery process that can meet most application needs. In the case that an application warrants any customization in the delivery workflow, the `Pipeline` section in AppConfiguration provides an approach to extend the workflow as needed. + +A typical delivery workflow is made of several stages, each corresponds to some logic that needs to be executed, such as manual approval, data transfer, coordinated multi-cluster release, notification, etc. Implementation-wise, the execution of each stage should be carried out with a plugin, developed and managed by the platform owners. + +#### Topologies + +In reality, what we have observed for production-grade applications is that they usually need to be deployed to a wide range of different targets including different clouds, regions, availability zones or runtimes for availability/cost/regulation/performance or disaster recovery related reasons. The `Topologies` section in AppConfiguration highlights the different deployment targets in the application delivery and provides a single pane of glass that overlooks the entire deployment topology. + +#### PolicySets + +The `PolicySets` section is responsible for defining the set of rules and procedures that should be followed in the application delivery process. They generally represent the guidelines with the purpose of minimizing any technical, security or compliance risks. Some of examples include release strategies, risk management policies, and self-healing strategies. The collections of policies are expected to be managed as a joint effort from all the stakeholders, including platform owners, infrastructure owners, and security and compliance stakeholders. Some policy sets (usually security and compliance related) are expected to be mandatory. Some can be switched on and off by the application owner (self-healing strategy for instance) depending on their specific needs. + +#### Dependency + +In a production-scale environment, there are usually intricate dependencies between multiple applications. The `Dependency` section is responsible for describing the dependencies between multiple applications. + +## References + +1. Score - https://docs.score.dev/docs/overview/ +2. Acornfile - https://docs.acorn.io/authoring/overview +3. KubeVela - https://kubevela.io/docs/getting-started/core-concept diff --git a/docs_versioned_docs/version-v0.9/concepts/arch.md b/docs_versioned_docs/version-v0.9/concepts/arch.md new file mode 100644 index 00000000..86e70903 --- /dev/null +++ b/docs_versioned_docs/version-v0.9/concepts/arch.md @@ -0,0 +1,14 @@ +--- +sidebar_position: 1 +--- + +# Architecture +![](https://raw.githubusercontent.com/KusionStack/kusion/main/docs/workflow.png) + + +KusionStack includes two core components: + +- [Kusion](https://github.com/KusionStack/kusion): The engine to deliver intentions to Kubernetes and Clouds +- [Catalog](https://github.com/KusionStack/catalog): Catalog of shared Kusion Models and Generators. + +The image above illustrates the workflow of KusionStack and how it works. In the next section, we will describe each of these components in detail. \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.9/concepts/glossary.md b/docs_versioned_docs/version-v0.9/concepts/glossary.md new file mode 100644 index 00000000..2468cf53 --- /dev/null +++ b/docs_versioned_docs/version-v0.9/concepts/glossary.md @@ -0,0 +1,47 @@ +--- +sidebar_position: 2 +--- + +import TOCInline from '@theme/TOCInline'; + +# Glossary + +This page lists and defines technical terms that are widely used across KusionStack. Words such as `Project`, `Stack`, etc. can be overloaded in the technical community, so this page attempts to clarify their meaning in the context of KusionStack. + + node.level === 2 || node.level === 4)} + minHeadingLevel={2} + // Show h4 headings in addition to the default h2 and h3 headings + maxHeadingLevel={4} +/> + +## Project + +A project in Kusion is any folder which contains a `project.yaml` file and is linked to a Git repository. Usually the mapping between project and repository is 1:1, also you can have multiple projects connected to a single repository (for example, a monorepo). And a project is composed of one or more applications. + +The purpose of the "project" is to bundle application configurations and a refer to Git repository. Specifically, it includes logical configurations for internal pieces to orchestrate the application, and it bundles these configurations in a way to fit different roles, e.g. developer, SRE, to cover the whole life-cycle of application development. + +From the perspective of the application development life cycle, the configuration described by the project is decoupled from the application code, takes the immutable image as input, and users could conduct the operation, and maintenance of the application in an independent configuration code base. + +## Stack + +A stack in Kusion is any folder which contains a `stack.yaml` file under belonging project directory. Stack provides a mechanism to isolate multiple deploys of same application, it's the target workspace that an application will be deployed to, also the the smallest operation unit that can be configured and deployed independently. Stacks are commonly used to denote different phases of software development lifecycle e.g. development, staging, and production. + +A project can have as many stacks as you need. By default, Kusion creates a default stack for you when you start a new project using the kusion init command. + +Stacks let's you chose on which cluster your applications will be deployed to. + +## Application + +An application in Kusion is declared using the `AppConfiguration` schema and represents a basic unit that is deployed. + +You can create multiple applications within a single project so they can share common configurations. This can be useful if you have several applications that are closely related, such as a backend system for content management and a frontend system for content delivery and display. + +## High Level Schema + +![High_Level_Schema](/img/docs/user_docs/concepts/high-level-schema.png) + + + + diff --git a/docs_versioned_docs/version-v0.9/concepts/index.md b/docs_versioned_docs/version-v0.9/concepts/index.md new file mode 100644 index 00000000..fd42aaee --- /dev/null +++ b/docs_versioned_docs/version-v0.9/concepts/index.md @@ -0,0 +1,7 @@ +# Concepts + +In this section, we will introduce the architecture of KusionStack and some core concepts. + +import DocCardList from '@theme/DocCardList'; + + diff --git a/docs_versioned_docs/version-v0.9/concepts/intent.md b/docs_versioned_docs/version-v0.9/concepts/intent.md new file mode 100644 index 00000000..100a5435 --- /dev/null +++ b/docs_versioned_docs/version-v0.9/concepts/intent.md @@ -0,0 +1,17 @@ +# Intent + +The Intent represents the operational intentions that you aim to deliver using Kusion. These intentions are expected to contain all components throughout the software development lifecycle (SDLC), including resources (workload, database, load balancer, etc.), dependencies, and policies. The Kusion module generators are responsible for converting all AppConfigurations and environment configurations into the Intent. Once the Intent is generated, the Kusion Engine takes charge of updating the actual infrastructures to match the Intent. + +## Purpose + +### Single Source of Truth + +In Kusion's workflow, platform engineer build Kusion modules and provide environment configurations, application developers choose Kusion modules they need and deploy operational intentions to an environment with related environment configurations. They can also input dynamic paramters like the container image when execute the `kusion build` command. So the final operational intentions includes configuratins written by application developers, environment configurations and dynamic inputs. Due to this reason, we introduce **Intent** to represent the SSoT(Single Source of Truth) of Kusion. It is the result of `kusion build` which contains all operational intentions from different sources. + +### Consistency + +Delivering an application to different environments with identical configurations is a common practice, especially for applications that require scalable distribution. In such cases, an immutable configuration package is helpful. By utilizing the Intent, all configurations and changes are stored in a single file. As the Intent is the input of Kusion, it ensures consistency across different environments whenever you execute Kusion with the same Intent file. + +### Rollback and Disaster Recovery + +The ability to roll back is crucial in reducing incident duration. Rolling back the system to previously validated version is much faster compared to attempting to fix it during an outage. We regard a validated Intent as a snapshot of the system and recommend to sotre the Intent in a version control systems like Git. This enables better change management practices and makes it simpler to roll back to previous versions if needed. In case of a failure or outage, having a validated Intent simplifies the rollback process, ensuring that the system can be quickly recovered. \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.9/concepts/kusion.md b/docs_versioned_docs/version-v0.9/concepts/kusion.md new file mode 100644 index 00000000..fa4ec06d --- /dev/null +++ b/docs_versioned_docs/version-v0.9/concepts/kusion.md @@ -0,0 +1,36 @@ +--- +sidebar_position: 3 +--- + +# How Kusion Works + +Kusion is the platform engineering engine of [KusionStack](https://github.com/KusionStack). It delivers intentions described with Kusion Models defined in [Catalog](https://github.com/KusionStack/catalog) to Kubernetes, Clouds and On-Prem infrastructures. + +

+ +

+ + It consists of 3 parts: `Operation Engine`, `Runtimes` and `State`, we will describe each of these components below. + + +## Operation Engine + +Operation Engine is the entry point of the Kusion Engine and is responsible for Kusion basic operations like `preview`, `apply`, `destroy`, etc. The main workflow of this part is to parse resources described in the operation intention (Spec), figure out which resource should be modified according to the specified operation, and execute this operation to the real infra resources. During this workflow, Runtimes and State will be involved. + +## Runtimes + +Runtime is an interface between the actual infrastructure and Kusion. All operations attempting to manipulate an infra resource should be delegated to one Runtime to make this operation affect the actual infrastructure. On the other hand, any infrastructure that implements the Runtime interfaces can be managed by Kusion. + +## State +State is a record of an operation's result. It is a mapping between resources managed by Kusion and the actual infra resources. State is often used as a data source for 3-way merge/diff in operations like `apply` and `preview`. + +State can be stored in many storage mediums like filesystems, OSS, databases, HTTP servers, etc. + +## How Kusion works +Let's get operation `Preview` as an example to demonstrate how the three parts cooperate in an actual operation. + + 1. `Operation Engine` parses resources in operation intentions(Spec) and converts them into a DAG + 2. Walk this DAG: + 1. Get the latest `State` from the actual infra by the `Runtime` + 2. Get the last operation `State` from the `State` storage medium + 3. Merge/Diff three states: desired state described in Spec, live state from `Runtime` and prior state from `State` storage medium, and return the diff result to the console. diff --git a/docs_versioned_docs/version-v0.9/config-walkthrough/_category_.json b/docs_versioned_docs/version-v0.9/config-walkthrough/_category_.json new file mode 100644 index 00000000..2f88670d --- /dev/null +++ b/docs_versioned_docs/version-v0.9/config-walkthrough/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Configuration Walkthrough", + "position": 4 +} diff --git a/docs_versioned_docs/version-v0.9/config-walkthrough/_tagging.md b/docs_versioned_docs/version-v0.9/config-walkthrough/_tagging.md new file mode 100644 index 00000000..fde580da --- /dev/null +++ b/docs_versioned_docs/version-v0.9/config-walkthrough/_tagging.md @@ -0,0 +1,5 @@ +--- +sidebar_position: 10 +--- + +# Tag Management \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.9/config-walkthrough/base_override.md b/docs_versioned_docs/version-v0.9/config-walkthrough/base_override.md new file mode 100644 index 00000000..643063c2 --- /dev/null +++ b/docs_versioned_docs/version-v0.9/config-walkthrough/base_override.md @@ -0,0 +1,87 @@ +--- +sidebar_position: 3 +--- + +# Base and Override + +In practice, what we have observed for production-grade applications is that they usually need to be deployed to a wide range of different targets, be it different environments in the SDLC, or different clouds, regions or runtimes for cost/regulation/performance or disaster recovery related reasons. + +In that context, we advocate for a pattern where you can leverage some Kusion and KCL features to minimize the amount of duplicate configurations, by separating the common base application configuration and environment-specific ones. + +:::info + +The file names in the below examples don't matter as long as they are called out and appear in the correct order in the `entries` field (the field is a list) in `kcl.mod`. The files with common configurations should appear first in the list and stack-specific ones last. The latter one takes precedence. + +The configurations also don't have be placed into a single `.k` file. For complex projects, they can be broken down into smaller organized `.k` files for better readability. +::: + +Base configuration defined in `base/base.k`: +``` +import catalog.models.schema.v1 as ac +import catalog.models.schema.v1.workload as wl +import catalog.models.schema.v1.workload.network as n +import catalog.models.schema.v1.workload.container as c + +myapp: ac.AppConfiguration { + workload: wl.Service { + containers: { + "myapp": c.Container { + image: "" + resources: { + "cpu": "500m" + "memory": "512Mi" + } + } + } + replicas: 1 + ports: [ + n.Port { + port: 80 + } + ] + } +} +``` + +Environment-specific configuration defined in `dev/main.k`: +``` +import catalog.models.schema.v1 as ac + +# main.k declares customized configurations for dev stack. +myapp: ac.AppConfiguration { + workload: wl.Service { + containers: { + "myapp": c.Container { + # dev stack has different app configuration from the base + image = "gcr.io/google-samples/gb-frontend:v5" + resources = { + "cpu": "250m" + "memory": "256Mi" + } + } + } + replicas = 2 + } +} +``` + +Alternatively, you could locate a specific property (in this case below, the `Container` object) in the `AppConfiguration` object using the dot selector shorthand(such as `workload.containers.myapp` or `workload.replicas` below): +``` +import catalog.models.schema.v1 as ac + +# main.k declares customized configurations for dev stack. +myapp: ac.AppConfiguration { + workload.replicas = 2 + workload.containers.myapp: { + # dev stack has different app configuration + image = "gcr.io/google-samples/gb-frontend:v5" + resources = { + "cpu": "250m" + "memory": "256Mi" + } + } +} +``` +This is especially useful when the application configuration is complex but the override is relatively straightforward. + +The two examples above are equivalent when overriding the base. \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.9/config-walkthrough/database.md b/docs_versioned_docs/version-v0.9/config-walkthrough/database.md new file mode 100644 index 00000000..11f1f864 --- /dev/null +++ b/docs_versioned_docs/version-v0.9/config-walkthrough/database.md @@ -0,0 +1,277 @@ +--- +sidebar_position: 6 +--- + +# Managed Databases + +The `database` attribute in the `AppConfiguration` instance is used to describe the specification for any databases needed for the application. + +You can currently have only one `database` per `AppConfiguration`. + +## Import + +In the examples below, we are using schemas defined in the `catalog` package. For more details on KCL package import, please refer to the [Configuration File Overview](../config-walkthrough/overview). + +The `import` statements needed for the following walkthrough: +``` +import catalog.models.schema.v1 as ac +import catalog.models.schema.v1.workload as wl +import catalog.models.schema.v1.accessories.database as db +``` + +## Types of Database offerings + +As of version 0.9.0, Kusion supports the following database offerings on the cloud: +- Relational Database Service (RDS) on [AWS](https://aws.amazon.com/rds/) +- Relational Database Service (RDS) on [AliCloud](https://www.alibabacloud.com/product/databases) + +More database types on more cloud vendors will be added in the future. + +Alternatively, Kusion also supports creating a database at `localhost` for local testing needs. A local database is quicker to stand up and easier to manage. It also eliminates the need for an account and any relevant costs with the cloud providers in the case that a local testing environment is sufficient. + +:::info + +You do need a local Kubernetes cluster to run the database workloads. You can refer to [Minikube](https://minikube.sigs.k8s.io/docs/start/) or [Kind](https://kind.sigs.k8s.io/docs/user/quick-start/) to get started. +To see an end-to-end use case for standing up a local testing environment including a local database, please refer to the [Kusion Quickstart](../getting-started/deliver-wordpress). +::: + +## Cloud Credentials and Permissions + +Kusion provisions databases on the cloud via [terraform](https://www.terraform.io/) providers. For it to create _any_ cloud resources, it requires a set of credentials that belongs to an account that has the appropriate write access, as well as a provider region so the terraform provider can be initialized properly. + +For AWS, the environment variables needed: +``` +export AWS_ACCESS_KEY_ID="xxxxxxxxxxx" # replace it with your AccessKey +export AWS_SECRET_ACCESS_KEY="xxxxxxx" # replace it with your SecretKey +export AWS_PROVIDER_REGION="xx-xxxx-x" # replace it with your AWS Region +``` + +For AliCloud, the environment variables needed: +``` +export ALICLOUD_ACCESS_KEY="xxxxxxxxx" # replace it with your AccessKey +export ALICLOUD_SECRET_KEY="xxxxxxxxx" # replace it with your SecretKey +export ALICLOUD_PROVIDER_REGION="xx-xxxxxxx" # replace it with your AliCloud Region +``` + +The user account that owns these credentials would need to have the proper permission policies attached to create databases and security groups. If you are using the cloud-managed policies, the policies needed to provision a database and configure firewall rules are listed below. + +For AWS: +- `AmazonVPCFullAccess` for creating and managing database firewall rules via security group +- `AmazonRDSFullAccess` for creating and managing RDS instances + +For AliCloud: +- `AliyunVPCFullAccess` for creating and managing database firewall rules via security group +- `AliyunRDSFullAccess` for creating and managing RDS instances + +Alternatively, you can use customer managed policies if the cloud provider built-in policies don't meet your needs. The list of permissions needed are in the [AmazonRDSFullAccess Policy Document](https://docs.aws.amazon.com/aws-managed-policy/latest/reference/AmazonRDSFullAccess.html#AmazonRDSFullAccess-json) and [AmazonVPCFullAccess Policy Document](https://docs.aws.amazon.com/aws-managed-policy/latest/reference/AmazonVPCFullAccess.html). It will most likely be a subset of the permissions in the policy documents. + +## Configure Database + +### Provision a Cloud Database + +Assuming the steps in the [Cloud Credentials and Permissions](#cloud-credentials-and-permissions) section is setup properly, you can now provision cloud databases via Kusion. + +#### AWS RDS Instance +To provision an AWS RDS instance with MySQL v5.7: +``` +wordpress: ac.AppConfiguration { + # ... + database: db.Database { + type: "aws" + engine: "MySQL" + version: "5.7" + size: 20 + instanceType: "db.t3.micro" + securityIPs = ["0.0.0.0/0"] + } +} +``` + +It's highly recommended to replace `0.0.0.0/0` and closely manage the whitelist of IPs that can access the database for security purposes. The `0.0.0.0/0` in the example above or if `securityIPs` is omitted altogether will allow connections from anywhere which would typically be a security bad practice. + +The supported `engine` values are `MySQL`, `MariaDB`, `Postgres` and `SQLServer-SE`. + +The supported engine versions can be found in: +- [MySQL versions](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/MySQL.Concepts.VersionMgmt.html) +- [MariaDB versions](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/MariaDB.Concepts.VersionMgmt.html#MariaDB.Concepts.VersionMgmt.Supported) +- [PostgreSQL versions](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_PostgreSQL.html#PostgreSQL.Concepts.General.DBVersions) +- [Microsoft SQL Server versions](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_SQLServer.html#SQLServer.Concepts.General.VersionSupport) + +The `instanceType` field determines the computation and memory capacity of the RDS instance. The `db.t3.micro` instance type in the example above represents the `db.t3` instance class with a size of `micro`. In the same `db.t3` instance family there are also `db.t3.small`, `db.t3.medium`, `db.t3.2xlarge`, etc. + +The full list of supported `instanceType` values can be found [here](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html#Concepts.DBInstanceClass.Support). + +You can also adjust the storage capacity for the database instance by changing the `size` field which is storage size measured in gigabytes. The minimum is 20. More details can be found [here](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Storage.html#Concepts.Storage.GeneralSSD). + +#### AliCloud RDS Instance + +To provision an AWS RDS instance with MySQL v5.7. AliCloud RDS has several additional fields such as `category`, `subnetID` and `privateRouting`: +``` +wordpress: ac.AppConfiguration { + # ... + database: db.Database { + type: "alicloud" + engine: "MySQL" + version: "8.0" + size: 20 + instanceType: "mysql.n2.serverless.1cc" + category = "serverless_basic" + subnetID = "{your-alicloud-vswitch-id}" + securityIPs = ["0.0.0.0/0"] + privateRouting = False + } +} +``` + +We will walkthrough `subnetID` and `privateRouting` in the [Configure Network Access](#configure-network-access) section. + +The supported `engine` values are `MySQL`, `MariaDB`, `PostgreSQL` and `SQLServer`. + +The supported engine versions can be found in: +- [MySQL versions](https://www.alibabacloud.com/help/en/rds/apsaradb-rds-for-mysql/major-version-lifecycle-description) +- [MariaDB versions](https://www.alibabacloud.com/help/en/rds/developer-reference/api-rds-2014-08-15-createdbinstance) +- [PostgreSQL versions](https://www.alibabacloud.com/help/en/rds/apsaradb-rds-for-postgresql/lifecycles-of-major-engine-versions) +- [Microsoft SQL Server versions](https://www.alibabacloud.com/help/en/rds/apsaradb-rds-for-sql-server/release-notes-for-minor-engine-versions-of-apsaradb-rds-for-sql-server) + +A summarized version can be found [here](https://www.alibabacloud.com/help/en/rds/developer-reference/api-rds-2014-08-15-createdbinstance) in the `EngineVersion` parameter. + +The full list of supported `instanceType` values can be found in: +- [MySQL instance types(x86)](https://www.alibabacloud.com/help/en/rds/apsaradb-rds-for-mysql/primary-apsaradb-rds-for-mysql-instance-types#concept-2096487) +- [MariaDB instance types](https://www.alibabacloud.com/help/en/rds/apsaradb-rds-for-mariadb/instance-types#concept-2096591) +- [PostgreSQL instance types](https://www.alibabacloud.com/help/en/rds/apsaradb-rds-for-postgresql/primary-apsaradb-rds-for-postgresql-instance-types#concept-2096578) +- [Microsoft SQL Server instance types](https://www.alibabacloud.com/help/en/rds/apsaradb-rds-for-sql-server/primary-apsaradb-rds-for-sql-server-instance-types#concept-2096545) + +### Local Database + +To deploy a local database with MySQL v8.0: +``` +wordpress: ac.AppConfiguration { + # ... + database: db.Database { + type: "local" + engine: "MySQL" + version: "8.0" + instanceType: "local" + } +} +``` + +The supported `engine` values are `MySQL` and `MariaDB` as of version 0.9.0. Kusion will stand up a `mysql` deployment and expose it as a service in the local Kubernetes cluster for local workloads to connect to. + +## Database Credentials + +There is no need to manage the database credentials manually. Kusion will automatically generate a random password, set it as the credential when creating the database, and then inject the hostname, username and password into the application runtime. + +You have the option to BYO (Bring Your Own) username for the database credential by specifying the `username` attribute in the `database`: +``` +wordpress: ac.AppConfiguration { + # ... + database: db.Database { + # ... + username: "my_username" + } +} +``` + +You cannot bring your own password. The password will always be managed by Kusion automatically. + +The database credentials are injected into the environment variables of the application container. You can access them via the following env vars: +``` +# env | grep KUSION_DB +KUSION_DB_HOST=wordpress.xxxxxxxx.us-east-1.rds.amazonaws.com +KUSION_DB_USERNAME=xxxxxxxxx +KUSION_DB_PASSWORD=xxxxxxxxx +``` + +You can use these environment variables out of the box. Or most likely, your application might retrieve the connection details from a different set of environment variables. In that case, you can map the kusion environment variables to the ones expected by your application using the `$()` expression. + +This example below will assign the value of `KUSION_DB_HOST` into `WORDPRESS_DB_HOST`, `WORDPRESS_DB_USER` into `WORDPRESS_DB_USER`, likewise for `KUSION_DB_PASSWORD` and `WORDPRESS_DB_PASSWORD`: +``` +wordpress: ac.AppConfiguration { + workload: wl.Service { + containers: { + wordpress: c.Container { + image = "wordpress:6.3-apache" + env: { + "WORDPRESS_DB_HOST": "$(KUSION_DB_HOST)" + "WORDPRESS_DB_USER": "$(KUSION_DB_USERNAME)" + "WORDPRESS_DB_PASSWORD": "$(KUSION_DB_PASSWORD)" + } + # ... + } + } + # ... + } + database: db.Database { + # ... + } +} +``` + +## Configure Network Access + +You can also optionally configure the network access to the database as part of the `AppConfiguration`. This is highly recommended because it dramatically increases the security posture of your cloud environment in the means of least privilege principle. + +The `securityIPs` field in the `Database` schema declares the list of network addresses that are allowed to access the database. The network addresses are in the [CIDR notation](https://aws.amazon.com/what-is/cidr/) and can be either a private IP range ([RFC-1918](https://datatracker.ietf.org/doc/html/rfc1918) and [RFC-6598](https://datatracker.ietf.org/doc/html/rfc6598) address) or a public one. + +If the database need to be accessed from a public location (which should most likely not be the case in a production environment), `securityIPs` need to include the public IP address of the traffic source (For instance, if the RDS database needs to be accessed from your computer). + +To configure AWS RDS to restrict network access from a VPC with a CIDR of `10.0.1.0/24` and a public IP of `103.192.227.125`: +``` +wordpress: ac.AppConfiguration { + # ... + database: db.Database { + type: "aws" + ... + securityIPs = ["10.0.1.0/24", "103.192.227.125/32"] + } +} +``` + +Depending on the cloud provider, the default behavior of the database firewall settings may differ if omitted. + +### Subnet ID + +On AWS, you have the option to launch the RDS instance inside a specific VPC if a `subnetID` is present in the application configuration. By default, if `subnetID` is not provided, the RDS will be created in the default VPC for that account. However, the recommendation is to self-manage your VPCs to provider better isolation from a network security perspective. + +On AliCloud, the `subnetID` is required. The concept of subnet maps to VSwitch in AliCloud. + +To place the RDS instance into a specific VPC on AWS: +``` +wordpress: ac.AppConfiguration { + # ... + database: db.Database { + type: "aws" + ... + subnetID: "subnet-xxxxxxxxxxxxxxxx" # replace it with your vpc subnet ID + } +} +``` + +### Private Routing + +There is an option to enforce private routing on certain cloud providers if both the workload and the database are running on the cloud. + +On AliCloud, you can set the `privateRouting` flag to `True`. The database host generated will be a private FQDN that is only resolvable and accessible from within the AliCloud VPCs. Setting `privateRouting` flag to `True` when `type` is `aws` is a no-op. + +To enforce private routing on AliCloud: +``` +wordpress: ac.AppConfiguration { + # ... + database: db.Database { + type: "alicloud" + ... + privateRouting: true + } +} +``` + +Kusion will then generate a private FQDN and inject it into the application runtime as the environment variable `KUSION_DB_HOST` for the application to use. A complete list of Kusion-managed environment variable can be found [here](../reference/model/naming-conventions#list-of-magic-variables). + +Otherwise when using the public FQDN to connect to a database from the workload, the route will depend on cloud provider's routing preference. The options are generally either: +- Travel as far as possible on the cloud provider's global backbone network, or also referred to as cold potato routing, or +- Egress as early as possible to the public Internet and re-enter the cloud provider's datacenter later, or also referred to as hot potato routing + +The prior generally has better performance but is also more expensive. + +You can find a good read on the [AWS Blog](https://aws.amazon.com/blogs/architecture/internet-routing-and-traffic-engineering/) or the [Microsoft Learn](https://learn.microsoft.com/en-us/azure/virtual-network/ip-services/routing-preference-overview). \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.9/config-walkthrough/kcl_basics.md b/docs_versioned_docs/version-v0.9/config-walkthrough/kcl_basics.md new file mode 100644 index 00000000..b3db5bc6 --- /dev/null +++ b/docs_versioned_docs/version-v0.9/config-walkthrough/kcl_basics.md @@ -0,0 +1,144 @@ +--- +sidebar_position: 2 +--- + +# KCL Basics + +## Table of Content +- [Variable assignments](#variable-assignments) +- [Common built-in types](#common-built-in-types) +- [Lists and maps](#lists-and-maps) +- [Conditional statements](#conditional-statements) +- [The : and = operator](#the--and--operator) +- [Advanced KCL capabilities](#advanced-kcl-capabilities) + +[KCL](https://kcl-lang.io/) is the choice of configuration language consumed by Kusion. KCL is an open source constraint-based record and functional language. KCL works well with a large number of complex configurations via modern programming language technology and practice, and is committed to provide better modularity, scalability, stability and extensibility. + +## Variable assignments + +There are two ways to initialize a variable in KCL. You can either use the `:` operator or the `=` operator. We will discuss the difference between them in [this section later](#the--and--operator). + +Here are the two ways to create a variable and initialize it: +``` +foo = "Foo" # Declare a variable named `foo` and its value is a string literal "Foo" +bar: "Bar" # Declare a variable named `bar` and its value is a string literal "Bar" +``` + +You will be able to override a variable assignment via the `=` operator. We will discuss this in depth in the [`:` and `=` operator section](#the--and--operator). + +## Common built-in types + +KCL supports `int`, `float`, `bool` and `string` as the built-in types. + +Other types are defined in the packages that are imported into the application configuration files. One such example would be the `AppConfiguration` object (or `Container`, `Probe`, `Port` object, etc) that are defined in the `catalog` repository. + +## Lists and maps + +Lists are represented using the `[]` notation. +An example of lists: +``` +list0 = [1, 2, 3] +list1 = [4, 5, 6] +joined_list = list0 + list1 # [1, 2, 3, 4, 5, 6] +``` + +Maps are represented using the `{}` notation. +An example of maps: +``` +a = {"one" = 1, "two" = 2, "three" = 3} +b = {'one' = 1, 'two' = 2, 'three' = 3} +assert a == b # True +assert len(a) == 3 # True +``` + +## Conditional statements +You can also use basic control flow statements when writing the configuration file. + +An example that sets the value of `replicas` conditionally based on the value of `containers.myapp.resources.cpu`: +``` +import catalog.models.schema.v1 as ac +import catalog.models.schema.v1.workload as wl +import catalog.models.schema.v1.workload.container as c + +myapp: ac.AppConfiguration { + workload: wl.Service { + containers: { + "myapp": c.Container { + image: "" + resources: { + "cpu": "500m" + "memory": "512Mi" + } + } + } + replicas: 1 if containers.myapp.resources.cpu == "500m" else 2 + } +} +``` + +For more details on KCL's control flow statements, please refer to the [KCL documentation](https://kcl-lang.io/docs/reference/lang/tour#control-flow-statements). + +## The `:` and `=` operator + +You might have noticed there is a mixed usage of the `:` and `=` in the samples above. + +:::info + +**TLDR: The recommendation is to use `:` in the common configurations, and `=` for override in the environment-specific configurations.** +::: + +In KCL: +- `:` represents a union-ed value assignment. In the pattern `identifier: E` or `identifier: T E`, the value of the expression `E` with optional type annotation `T` will be merged and union-ed into the element value. +- `=` represents a value override. In the pattern `identifier = E` or `identifier = T E`, The value of the expression `E` with optional type annotation `T` will override the `identifier` attribute value. + +Let's take a look at an example: +``` +# This is one configuration that will be merged. +config: Config { + data.d1 = 1 +} +# This is another configuration that will be merged. +config: Config { + data.d2 = 2 +} +``` + +The above is equivalent to the snippet below since the two expressions for `config` get merged/union-ed into one: +``` +config: Config { + data.d1 = 1 + data.d2 = 1 +} +``` + +whereas using the `=` operators will result in a different outcome: +``` +# This is first configuration. +config = Config { + data.d1 = 1 +} +# This is second configuration that will override the prior one. +config = Config { + data.d2 = 2 +} +``` + +The config above results in: +``` +config: Config { + data.d2 = 2 +} +``` + +Please note that the `:` attribute operator represents an idempotent merge operation, and an error will be thrown when the values that need to be merged conflict with each other. + +``` +data0 = {id: 1} | {id: 2} # Error:conflicting values between {'id': 2} and {'id': 1} +data1 = {id: 1} | {id = 2} # Ok, the value of `data` is {"id": 2} +``` + +More about `:` and `=` operator can be found in the [KCL documentation](https://kcl-lang.io/docs/reference/lang/tour#config-operations). + +## Advanced KCL capabilities + +For more advanced KCL capabilities, please visit the [KCL website](https://kcl-lang.io/docs/user_docs/support/faq-kcl). \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.9/config-walkthrough/monitoring.md b/docs_versioned_docs/version-v0.9/config-walkthrough/monitoring.md new file mode 100644 index 00000000..8e364e77 --- /dev/null +++ b/docs_versioned_docs/version-v0.9/config-walkthrough/monitoring.md @@ -0,0 +1,118 @@ +--- +sidebar_position: 8 +--- + +# Application Monitoring + +The `monitoring` attribute in the `AppConfiguration` instance is used to describe the specification for the collection of monitoring requirements for the application. + +As of version 0.9.0, Kusion supports integration with Prometheus by managing scraping behaviors in the configuration file. + +:::info + +The `monitoring` attribute requires the target cluster to have installed Prometheus correctly, either as a Kubernetes operator or a server/agent. + +More about how to set up Prometheus can be found in the [Prometheus User Guide for Kusion](../guides/observability/prometheus) +::: + +## Import + +In the examples below, we are using schemas defined in the `catalog` package. For more details on KCL package import, please refer to the [Configuration File Overview](../config-walkthrough/overview). + +The `import` statements needed for the following walkthrough: +``` +import catalog.models.schema.v1 as ac +import catalog.models.schema.v1.workload as wl +import catalog.models.schema.v1.monitoring as m +``` + +## Project-level configurations + +In addition to the KCL configuration file, there are also project-level configurations that can be set in the `project.yaml` in hte project root directory. + +By separating configurations that the developers are interested in and those that platform owners are interested in, we can reduce the cognitive complexity of the application configuration and achieve separation of concern. + +In the context of `monitoring`, there are two flags you can set in `project.yaml` that will alter the behavior of Kusion. + +:::info + +If you have initialized the projects with `kusion init`, the `project.yaml` should be automatically created for you. +::: + +### Operator mode + +The `operatorMode` flag indicates to Kusion whether the Prometheus instance installed in the cluster runs as a Kubernetes operator or not. This determines the different kinds of resources Kusion manages. + +To see more about different ways to run Prometheus in the Kubernetes cluster, please refer to the [design documentation](https://github.com/KusionStack/kusion/blob/main/docs/prometheus.md#prometheus-installation). + +Most cloud vendors provide an out-of-the-box monitoring solutions for workloads running in a managed-Kubernetes cluster (EKS, AKS, etc), such as AWS CloudWatch, Azure Monitor, etc. These solutions mostly involve installing an agent (CloudWatch Agent, OMS Agent, etc) in the cluster and collecting the metrics to a centralized monitoring server. In those cases, you don't need to set `operatorMode` to `True`. It only needs to be set to `True` when you have an installation of the [Prometheus operator](https://github.com/prometheus-operator/prometheus-operator) running inside the Kubernetes cluster. + +:::info + +For differences between [Prometheus operator](https://github.com/prometheus-operator/prometheus-operator), [kube-prometheus](https://github.com/prometheus-operator/kube-prometheus) and the [community kube-prometheus-stack helm chart](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack), the details are documented [here](https://github.com/prometheus-operator/prometheus-operator#prometheus-operator-vs-kube-prometheus-vs-community-helm-chart). +::: + +### Monitor types + +The `monitorType` flag indicates the kind of monitor Kusion will create. It only applies when `operatorMode` is set to `True`. As of version 0.9.0, Kusion provides options to scrape metrics from either the application pods or its corresponding Kubernetes services. This determines the different kinds of resources Kusion manages when Prometheus runs as an operator in the target cluster. + +A sample `project.yaml` with Prometheus settings: +``` +# The project basic info +name: multi-stack-project +generator: + type: AppConfiguration +prometheus: + operatorMode: True + monitorType: Service +``` + +To instruct Prometheus to scrape from pod targets instead: +``` +# The project basic info +name: multi-stack-project +generator: + type: AppConfiguration +prometheus: + operatorMode: True + monitorType: Pod +``` + +If the `prometheus` section is missing from the `project.yaml`, Kusion defaults `operatorMode` to false. + +## Managing Scraping Configuration +To create scrape configuration for the application: +``` +myapp: ac.AppConfiguration { + workload: wl.Service { + # ... + } + monitoring: m.Prometheus{ + interval: "30s" + timeout: "15s" + path: "/metrics" + port: "web" + scheme: "http" + } +} +``` + +The example above will instruct the Prometheus job to scrape metrics from the `/metrics` endpoint of the application every 30 seconds. + +To instruct Prometheus to scrape from `actuator/metrics` on port `9099` instead: +``` +myapp: ac.AppConfiguration { + workload: wl.Service { + # ... + } + monitoring: m.Prometheus{ + interval: "10s" + timeout: "5s" + path: "/actuator/metrics" + port: "9099" + scheme: "http" + } +} +``` + +More details about how the Prometheus integration works can be found in the [design documentation](https://github.com/KusionStack/kusion/blob/main/docs/prometheus). \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.9/config-walkthrough/networking.md b/docs_versioned_docs/version-v0.9/config-walkthrough/networking.md new file mode 100644 index 00000000..e4f4ee9d --- /dev/null +++ b/docs_versioned_docs/version-v0.9/config-walkthrough/networking.md @@ -0,0 +1,116 @@ +--- +sidebar_position: 5 +--- + +# Application Networking + +In addition to configuring application's [container specifications](workload#configure-containers), you can also configure its networking behaviors, including how to expose the application and how it can be accessed. + +In future versions, this will also include ingress-based routing strategy and DNS configurations. + +## Import + +In the examples below, we are using schemas defined in the `catalog` package. For more details on KCL package import, please refer to the [Configuration File Overview](overview). + +The `import` statements needed for the following walkthrough: +``` +import catalog.models.schema.v1 as ac +import catalog.models.schema.v1.workload as wl +import catalog.models.schema.v1.workload.network as n +``` + +## Private vs Public Access + +Private network access means the service can only be access from within the target cluster. + +Public access is implemented using public load balancers on the cloud as of v0.9.0. This generally requires a Kubernetes cluster that is running on the cloud with a vendor-specific service controller. + +Any ports defined default to private access unless explicitly specified. + +To expose port 80 to be accessed privately: +``` +myapp: ac.AppConfiguration { + workload: wl.Service { + # ... + ports: [ + n.Port { + port: 80 + } + ] + } +} +``` + +To expose port 80 to be accessed publicly on AWS using an AWS Load Balancer: +``` +myapp: ac.AppConfiguration { + workload: wl.Service { + # ... + ports: [ + n.Port { + type: "aws" + port: 80 + public: True + } + ] + } +} +``` + +## Mapping ports + +To expose a port `80` that maps to a different port `8088` on the container: +``` +myapp: ac.AppConfiguration { + workload: wl.Service { + # ... + ports: [ + n.Port { + port: 80 + targetPort: 8088 + } + ] + } +} +``` + +## Exposing multiple ports + +You can also expose multiple ports and configure them separately. + +To expose port 80 to be accessed publicly on an AliCloud load balancer, and port 9099 for private access (to be scraped by Prometheus, for example): +``` +myapp: ac.AppConfiguration { + workload: wl.Service { + # ... + ports: [ + n.Port { + type: "aliyun" + port: 80 + public: True + } + n.Port { + port: 9099 + } + ] + } +} +``` + +## Choosing protocol + +To expose a port using the `UDP` protocol: +``` +myapp: ac.AppConfiguration { + workload: wl.Service { + # ... + ports: [ + n.Port { + port: 80 + targetPort: 8088 + protocol: "UDP" + } + ] + } +} +``` \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.9/config-walkthrough/operational_rules.md b/docs_versioned_docs/version-v0.9/config-walkthrough/operational_rules.md new file mode 100644 index 00000000..6318913d --- /dev/null +++ b/docs_versioned_docs/version-v0.9/config-walkthrough/operational_rules.md @@ -0,0 +1,55 @@ +--- +sidebar_position: 9 +--- + +# Operational Rules + +The `opsRule` attribute in the `AppConfiguration` instance is used to describe the specification for the collection of operational rule requirements for the application. Operational rules are used as a preemptive measure to police and stop any unwanted changes. + +:::info + +The `opsRules` attribute requires the target cluster to have installed the [KusionStack-operating](https://github.com/KusionStack/operating) controllers properly. +::: + +## Import + +In the examples below, we are using schemas defined in the `catalog` package. For more details on KCL package import, please refer to the [Configuration File Overview](../config-walkthrough/overview). + +The `import` statements needed for the following walkthrough: +``` +import catalog.models.schema.v1 as ac +import catalog.models.schema.v1.workload as wl +import models.schema.v1.trait as t +``` + +## Max Unavailable Replicas + +Currently, `OpsRule` supports setting a `maxUnavailable` parameter, which specifies the maximum number of pods that can be rendered unavailable at any time. It can be either a fraction of the total pods for the current application or a fixed number. This operational rule is particularly helpful against unexpected changes or deletes to the workloads. It can also prevents too many workloads from going down during an application upgrade. + +More rules will be available in future versions of Kusion. + +To set `maxUnavailable` to a percentage of pods: +``` +myapp: ac.AppConfiguration { + workload: wl.Service { + containers: { + # ... + } + } + opsRule: t.OpsRule { + maxUnavailable: "30%" + } +} +``` + +To set `maxUnavailable` to a fixed number of pods: +``` +myapp: ac.AppConfiguration { + workload: wl.Service { + # ... + } + opsRule: t.OpsRule { + maxUnavailable: 2 + } +} +``` \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.9/config-walkthrough/overview.md b/docs_versioned_docs/version-v0.9/config-walkthrough/overview.md new file mode 100644 index 00000000..1f62de1d --- /dev/null +++ b/docs_versioned_docs/version-v0.9/config-walkthrough/overview.md @@ -0,0 +1,209 @@ +--- +sidebar_position: 1 +--- + +# Configuration File Overview + +Kusion consumes one or more declarative configuration files (written in KCL) that describe the application, and delivers intent to the target runtime including Kubernetes, clouds, or on-prem infrastructure. + +This documentation series walks you through the odds and ends of managing such configuration files. + +## Table of Content +- [Directory Structure](#directory-structure) +- [AppConfiguration Model](#appconfiguration-model) +- [Authoring Configuration Files](#authoring-configuration-files) + - [Identifying KCL file](#identifying-kcl-file) + - [KCL Packages and imports](#kcl-packages-and-import) + - [Understanding kcl.mod](#understanding-kclmod) + - [Building blocks](#building-blocks) + - [Instantiate an application](#instantiating-an-application) + - [Using kusion init](#using-kusion-init) + - [Using references](#using-references) + +## Directory Structure + +Kusion expects the configuration file to be placed in a certain directory structure because it might need some metadata (that is not stored in the application configuration itself) in order to proceed. + +:::info + +See [Glossary](../concepts/glossary) for more details about Project and Stack. +::: + +A sample multi-stack directory structure looks like the following: +``` +~/playground$ tree multi-stack-project/ +multi-stack-project/ +├── README.md +├── base +│   └── base.k +├── dev +│   ├── kcl.mod +│   ├── kcl.mod.lock +│   ├── main.k +│   └── stack.yaml +├── prod +│   ├── kcl.mod +│   ├── kcl.mod.lock +│   ├── main.k +│   └── stack.yaml +└── project.yaml +``` + +In general, the directory structure follows a hierarchy where the top-level is the project configurations, and the sub-directories represent stack-level configurations. + +You may notice there is a `base` directory besides all the stacks. The `base` directory is not mandatory, but rather a place to store common configurations between different stacks. A common pattern we observed is to use stacks to represent different stages (dev, stage, prod, etc.) in the software development lifecycle, and/or different deployment targets (azure-eastus, aws-us-east-1, etc). A project can have as many stacks as needed. + +In practice, the applications deployed into dev and prod might very likely end up with a similar set of configurations except a few fields such as the application image (dev might be on newer versions), resource requirements (prod might require more resources), etc. + +As a general best practice, we recommend managing the common configurations in `base.k` as much as possible to minimize duplicate code. We will cover how override works in [Base and Override](base_override). + +## AppConfiguration Model + +`AppConfiguration` is the out-of-the-box model we build that describes an application. It serves as the declarative intent for a given application. + +The schema for `AppConfiguration` is defined in the [KusionStack/catalog](https://github.com/KusionStack/catalog) repository. It is designed as a unified, application-centric model that encapsulates the comprehensive configuration details and in the meantime, hides the complexity of the infrastructure as much as possible. + +`AppConfiguration` consists of multiple sub-components that each represent either the application workload itself, its dependencies, relevant workflows or operational expectations. We will deep dive into the details on how to author each of these elements in this upcoming documentation series. + +For more details on the `AppConfiguration`, please refer to the [design documentation - WIP](https://github.com/KusionStack/kusion/pull/420/files). + +## Authoring Configuration Files + +[KCL](https://kcl-lang.io/) is the choice of configuration language consumed by Kusion. KCL is an open source constraint-based record and functional language. KCL works well with a large number of complex configurations via modern programming language technology and practice, and is committed to provide better modularity, scalability, stability and extensibility. + +### Identifying KCL file + +KCL files are identified with `.k` suffix in the filename. + +### KCL Packages and Import + +Similar to most modern General Programming Languages (GPLs), KCL packages are used to organize collections of related KCL source files into modular and re-usable units. + +In the context of Kusion, we use KCL packages to define models that could best abstract the behavior of an application. Specifically, we provide an official out-of-the-box KCL package(will keep iterating) with the name [catalog](https://github.com/KusionStack/catalog). When authoring an application configuration file, you can simply import the [catalog](https://github.com/KusionStack/catalog) package in the source code and use all the schemas (including AppConfiguration) defined in the `catalog` package. + +Similarly, if the schemas in the [catalog](https://github.com/KusionStack/catalog) package does not meet your needs, you can always fork it and make modifications, then import the modified package; or create a brand new package altogether and import it. + +The Kusion ecosystem can be easily expanded in this manner. + +An example of the import looks like the following: +``` +### import from the official catalog package +import catalog.models.schema.v1 as ac +import catalog.models.schema.v1.workload as wl +import catalog.models.schema.v1.workload.container as c + +### import my own modified package +import my_own_catalog.models.schema.v1 as moc +import my_other_package.schema.v1.redis as myredis +``` + +Take `import catalog.models.schema.v1.workload as wl` as an example, the `.models.schema.v1.workload` part after `import catalog` represents the relative path of a specific schema to import. In this case, the `workload` schemas is defined under `models/schema/v1/workload` directory in the `catalog` package. + +### Understanding kcl.mod + +Much similar to the concept of `go.mod`, Kusion uses `kcl.mod` as the source of truth to manage metadata (such as package name, dependencies, etc.) for the current package. Kusion will also auto-generate a `kcl.mod.lock` as the dependency lock file. + +The most common usage for `kcl.mod` is to manage the dependency of your application configurations. + +:::info + +Please note this `kcl.mod` will be automatically generated if you are using `kusion init` to initialize a project with a template. You will only need to modify this file if you are modifying the project metadata outside the initialization process, such as upgrading the dependency version or adding a new dependency altogether, etc. +:::info + +There are 3 sections in a `kcl.mod` file: +- `package`, representing the metadata for the current package. +- `dependencies`, describing the packages the current package depend on. Supports referencing either a git repository or an OCI artifact. +- `profile`, defining the behavior for Kusion. In the example below, it describes the list of files Kusion should look for when parsing the application configuration. + +An example of `kcl.mod`: +``` +[package] +name = "multi-stack-project" +edition = "0.5.0" +version = "0.1.0" + +[dependencies] +catalog = { git = "https://github.com/KusionStack/catalog.git", tag = "0.1.0" } +# Uncomment the line below to use your own modified package +# my-package = ghcr.io/kcl-lang/my-package + +[profile] +entries = ["../base/base.k", "main.k"] +``` + +### Building Blocks + +Configuration files consist of building blocks that are made of instances of schemas. An `AppConfiguration` instance consists of several child schemas, most of which are optional. The only mandatory one is the `workload` instance. We will take a closer look in the [workload walkthrough](workload). The order of the building blocks does NOT matter. + +The major building blocks as of version `0.9.0`: +``` +myapp: ac.AppConfiguration { + workload: wl.Service { + containers: { + "myapp": c.Container {} + ... + } + ports: [] + secrets: {} + } + database: d.Database{} + monitoring: m.Prometheus{} + opsRule: t.OpsRule {} + ... +} +``` + +We will deep dive into each one of the building blocks in this documentation series. + +### Instantiating an application + +In Kusion's out-of-the-box experience, an application is identified with an instance of `AppConfiguration`. You may have more than one application in the same project or stack. + +Here's an example of a configuration that can be consumed by Kusion (assuming it is placed inside the proper directory structure that includes project and stack configurations, with a `kcl.mod` present): + +``` +import catalog.models.schema.v1 as ac +import catalog.models.schema.v1.workload as wl +import catalog.models.schema.v1.workload.network as n +import catalog.models.schema.v1.workload.container as c + +gocity: ac.AppConfiguration { + workload: wl.Service { + containers: { + "gocity": c.Container { + image = "howieyuen/gocity:latest" + resources: { + "cpu": "500m" + "memory": "512Mi" + } + } + } + replicas: 1 + ports: [ + n.Port { + port: 4000 + } + ] + } +} +``` + +Don't worry about what `workload` or `ports` stand for at the moment. We will deep dive into each one of them in this upcoming documentation series. + +### Using `kusion init` + +Kusion offers a `kusion init` sub-command which initializes a new project using some pre-built templates, which saves you from the hassle to manually build the aforementioned directory structure that Kusion expects. + +There is a built-in template `single-stack-sample` in the kusion binary that can be used offline. + +We also maintain a [kusion-templates repository](https://github.com/KusionStack/kusion-templates) that hosts a list of more comprehensive project scaffolds. You can access them via `kusion init --online` command which requires connectivity to `github.com`. + +The pre-built templates are meant to help you get off the ground quickly with some simple out-of-the-box examples. You can refer to the [QuickStart documentation](../getting-started/deliver-wordpress) for some step-by-step tutorials. + +### Using references + +The reference documentation for the `catalog` package is located in [Reference](../reference/model/catalog_models/doc_app_configuration). + +If you are using the `catalog` package out of the box, the reference documentation provides a comprehensive view for each schema involved, including all the attribute names and description, their types, default value if any, and whether a particular attribute is required or not. There will also be an example attached to each schema reference. + +We will also deep dive into some common examples in the upcoming sections. \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.9/config-walkthrough/secret.md b/docs_versioned_docs/version-v0.9/config-walkthrough/secret.md new file mode 100644 index 00000000..fdd11305 --- /dev/null +++ b/docs_versioned_docs/version-v0.9/config-walkthrough/secret.md @@ -0,0 +1,95 @@ +--- +sidebar_position: 7 +--- + +# Secret Management + +You can manage application secrets via the `secrets` attribute in the `workload` schema. Depending on the context of your application, this might include pieces of credentials required to access a third-party application. + +If your application depends on any cloud resources that are managed by Kusion, their credentials are automatically managed by Kusion (generated and injected into application runtime environment variable). You shouldn't have to manually create those. + +:::info +If your application workloads are also running on the cloud, it's recommended to leverage identity-based keyless authentication as much as possible to minimize the nuisance of secret management. Application identities will be supported in a future version of Kusion. + +::: + +## Import + +In the examples below, we are using schemas defined in the `catalog` package. For more details on KCL package import, please refer to the [Configuration File Overview](overview). + +The `import` statements needed for the following walkthrough: +``` +import catalog.models.schema.v1 as ac +import catalog.models.schema.v1.workload as wl +import catalog.models.schema.v1.workload.secret as sec +``` + +## Creating a secret + +As of version 0.9.0, Kusion supports creating secrets by turning the `secrets` declared in the configuration files into Kubernetes secrets. + +:::info + +As a general principle, storing secrets in a plain text configuration file is highly discouraged. The recommended approach is to store the secrets in a third-party vault (such as Hashicorp Vault, AWS Secrets Manager and KMS, Azure Key Vault, etc) and retrieve the secret in the runtime only. +::: + +Create a secret with the type `Opaque`: +``` +myapp: ac.AppConfiguration { + workload: wl.Service { + # ... + secrets: { + "my-secret": sec.Secret { + type: "opaque" + data: { + "hello": "world" + "foo": "bar" + } + } + } + } +} +``` + +Create a secret with the type `kubernetes.io/basic-auth`: +``` +myapp: ac.AppConfiguration { + workload: wl.Service { + # ... + secrets: { + "my-secret": sec.Secret { + type: "basic" + data: { + "username": "admin" + "password": "******" + } + } + } + } +} +``` + +When creating a `kubernetes.io/basic-auth` type secret, the `data` field must have at least one of `username` or `password`. + +For more details about the secret types, please see the [Kubernetes secret documentation](https://kubernetes.io/docs/concepts/configuration/secret/). + +## Immutable secrets + +You can also declare a secret as immutable to prevent it from being changed accidentally. + +To declare a secret as immutable: +``` +myapp: ac.AppConfiguration { + workload: wl.Service { + # ... + secrets: { + "my-secret": sec.Secret { + # ... + immutable: True + } + } + } +} +``` + +You can change a secret from mutable to immutable but not the other way around. That is because the Kubelet will stop watching secrets that are immutable. As the name suggests, you can only delete and re-create immutable secrets but you cannot change them. \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.9/config-walkthrough/workload.md b/docs_versioned_docs/version-v0.9/config-walkthrough/workload.md new file mode 100644 index 00000000..7df8f430 --- /dev/null +++ b/docs_versioned_docs/version-v0.9/config-walkthrough/workload.md @@ -0,0 +1,337 @@ +--- +sidebar_position: 4 +--- + +# Workload + +The `workload` attribute in the `AppConfiguration` instance is used to describe the specification for the application workload. The application workload generally represents the computing component for the application. It is the only required field when instantiating an `AppConfiguration`. + +A `workload` maps to an `AppConfiguration` instance 1:1. If there are more than one workload, they should be considered different applications. + +## Table of Content +- [Import](#import) +- [Types of workloads](#types-of-workloads) +- [Configure containers](#configure-containers) + - [Application image](#application-image) + - [Resource Requirements](#resource-requirements) + - [Health Probes](#health-probes) + - [Lifecycle Hooks](#lifecycle-hooks) + - [Create Files](#create-files) + - [Customize container initialization](#customize-container-initialization) +- [Configure Replicas](#configure-replicas) +- [Differences between Services and Jobs](#differences-between-services-and-jobs) +- [Workload References](#workload-references) + +## Import + +In the examples below, we are using schemas defined in the `catalog` package. For more details on KCL package import, please refer to the [Configuration File Overview](overview). + +The `import` statements needed for the following walkthrough: +``` +import catalog.models.schema.v1 as ac +import catalog.models.schema.v1.workload as wl +import catalog.models.schema.v1.workload.container as c +import catalog.models.schema.v1.workload.container.probe as p +import catalog.models.schema.v1.workload.container.lifecycle as lc +``` + +## Types of Workloads + +There are currently two types of workloads defined in the `AppConfiguration` model: +- `Service`, representing a long-running, scalable workload type that should "never" go down and respond to short-lived latency-sensitive requests. This workload type is commonly used for web applications and services that expose APIs. +- `Job`, representing batch tasks that take from a few seconds to days to complete and then stop. These are commonly used for batch processing that is less sensitive to short-term performance fluctuations. + +To instantiate a `Service`: +``` +myapp: ac.AppConfiguration { + workload: wl.Service {} +} +``` + +To instantiate a `Job`: +``` +myapp: ac.AppConfiguration { + workload: wl.Job {} +} +``` + +Of course, the `AppConfiguration` instances above is not sufficient to describe an application. We still need to provide more details in the `workload` section. + +## Configure containers + +Kusion is built on top of cloud-native philosophies. One of which is that applications should run as loosely coupled microservices on abstract and self-contained software units, such as containers. + +The `containers` attribute in a workload instance is used to define the behavior for the containers that run application workload. The `containers` attribute is a map, from the name of the container to the `catalog.models.schema.v1.workload.container.Container` Object which includes the container configurations. + +:::info + +The name of the container is in the context of the configuration file, so you could refer to it later. It's not referring to the name of the container in the Kubernetes cluster (or any other runtime). +::: + +Everything defined in the `containers` attribute is considered an application container, as opposed to a sidecar container. Sidecar containers will be introduced in a different attribute in a future version. + +In most of the cases, only one application container is needed. Ideally, we recommend mapping an `AppConfiguration` instance to a microservice in the microservice terminology. + +We will walkthrough the details of configuring a container using an example of the `Service` type. + +To add an application container: +``` +myapp: ac.AppConfiguration { + workload: wl.Service { + containers: { + "myapp": c.Container {} + } + } +} +``` + +### Application image + +The `image` attribute in the `Container` schema specifies the application image to run. This is the only required field in the `Container` schema. + +To specify an application image: +``` +myapp: ac.AppConfiguration { + workload: wl.Service { + containers: { + "myapp": c.Container { + image: "gcr.io/google-samples/gb-frontend:v5" + } + # ... + } + } +} +``` + +### Resource Requirements + +The `resources` attribute in the `Container` schema specifies the application resource requirements such as cpu and memory. + +You can specify an upper limit (which maps to resource limits only) or a range as the resource requirements (which maps to resource requests and limits in Kubernetes). + +To specify an upper bound (only resource limits): +``` +myapp: ac.AppConfiguration { + workload: wl.Service { + containers: { + "myapp": c.Container { + image: "gcr.io/google-samples/gb-frontend:v5" + resources: { + "cpu": "500m" + "memory": "512Mi" + } + # ... + } + } + } +} +``` + +To specify a range (both resource requests and limits): +``` +myapp: ac.AppConfiguration { + workload: wl.Service { + containers: { + "myapp": c.Container { + image: "gcr.io/google-samples/gb-frontend:v5" + # Sets requests to cpu=250m and memory=256Mi + # Sets limits to cpu=500m and memory=512Mi + resources: { + "cpu": "250m-500m" + "memory": "256Mi-512Mi" + } + # ... + } + } + } +} +``` + +### Health Probes + +There are three types of `Probe` defined in a `Container`: +- `livenessProbe` - used to determine if the container is healthy and running +- `readinessProbe` - used to determine if the container is ready to accept traffic +- `startupProbe` - used to determine if the container has started properly. Liveness and readiness probes don't start until `startupProbe` succeeds. Commonly used for containers that takes a while to start + +The probes are optional. You can only have one Probe of each kind for a given `Container`. + +To configure a `Http` type `readinessProbe` that probes the health via HTTP request and a `Exec` type `livenessProbe` which executes a command: +``` +myapp: ac.AppConfiguration { + workload: wl.Service { + containers: { + "myapp": c.Container { + image: "gcr.io/google-samples/gb-frontend:v5" + # ... + # Configure an Http type readiness probe at /healthz + readinessProbe: p.Probe { + probeHandler: p.Http { + url: "/healthz" + } + initialDelaySeconds: 10 + timeoutSeconds: 5 + periodSeconds: 15 + successThreshold: 3 + failureThreshold: 1 + } + # Configure an Exec type liveness probe that executes probe.sh + livenessProbe: p.Probe { + probeHandler: p.Exec { + command: ["probe.sh"] + } + initialDelaySeconds: 10 + } + } + } + } +} +``` + +### Lifecycle Hooks + +You can also configure lifecycle hooks that triggers in response to container lifecycle events such as liveness/startup probe failure, preemption, resource contention, etc. + +There are two types that is currently supported: +- `PreStop` - triggers before the container is terminated. +- `PostStart` - triggers after the container is initialized. + +``` +myapp: ac.AppConfiguration { + workload: wl.Service { + containers: { + "myapp": c.Container { + image: "gcr.io/google-samples/gb-frontend:v5" + # ... + # Configure lifecycle hooks + lifecycle: lc.Lifecycle { + # Configures an Exec type pre-stop hook that executes preStop.sh + preStop: p.Exec { + command: ["preStop.sh"] + } + # Configures an Http type pre-stop hook at /post-start + postStart: p.Http { + url: "/post-start" + } + } + } + } + } +} +``` + +### Create Files + +You can also create files on-demand during the container initialization. + +To create a custom file and mount it to `/home/admin/my-file` when the container starts: +``` +myapp: ac.AppConfiguration { + workload: wl.Service { + containers: { + "myapp": c.Container { + image: "gcr.io/google-samples/gb-frontend:v5" + } + # ... + # Creates a file during container startup + files: { + "/home/admin/my-file": c.FileSpec { + content: "some file contents" + mode: "0777" + } + } + } + } +} +``` + +### Customize container initialization + +You can also customize the container entrypoint via `command`, `args`, and `workingDir`. These should **most likely not be required**. In most of the cases, the entrypoint details should be baked into the application image itself. + +To customize the container entrypoint: +``` +myapp: ac.AppConfiguration { + workload: wl.Service { + containers: { + "myapp": c.Container { + image: "gcr.io/google-samples/gb-frontend:v5" + # ... + # This command will overwrite the entrypoint set in the image Dockerfile + command: ["/usr/local/bin/my-init-script.sh"] + # Extra arguments append to command defined above + args: [ + "--log-dir=/home/my-app/logs" + "--timeout=60s" + ] + # Run the command as defined above, in the directory "/tmp" + workingDir: "/tmp" + } + } + } +} +``` + +## Configure Replicas + +The `replicas` field in the `workload` instance describes the number of identical copies to run at the same time. It is generally recommended to have multiple replicas in production environments to eliminate any single point of failure. In Kubernetes, this corresponds to the `spec.replicas` field in the relevant workload manifests. + +To configure a workload to have a replica count of 3: +``` +myapp: ac.AppConfiguration { + workload: wl.Service { + containers: { + # ... + } + replicas: 3 + # ... + } + # ... +} +``` + +Autoscaling will be supported in a future version of Kusion, at which point you will be able to specify a range of replicas. + +## Differences between Services and Jobs + +The two types of workloads, namely `Service` and `Job`, share a majority of the attributes with some minor differences. + +### Exposure + +A `Service` usually represents a long-running, scalable workload that responds to short-lived latency-sensitive requests and never go down. Hence a `Service` has an additional attribute that determines how it is exposed and can be accessed. A `Job` does NOT the option to be exposed. We will explore more in the [application networking walkthrough](networking). + +### Workload Implementations + +Kusion also supports multiple kinds of Kubernetes workload implementations for a `Service` type workload. The current supported kinds are `Deployment`(default) and `CollaSet`, which is a workload type defined in the [KusionStack-operating toolkit](https://github.com/KusionStack/operating) under the KusionStack family. You can learn more about `CollaSet` [here](https://kusionstack.io/). + +To specify a `CollaSet` kind `Service`: +``` +myapp: ac.AppConfiguration { + workload: wl.Service { + type: "CollaSet" + # ... + } +} +``` + +If `type` is not provided, Kusion defaults to use the Kubernetes `Deployment`. + +### Job Schedule + +A `Job` can be configured to run in a recurring manner. In this case, the job will have a cron-format schedule that represents its recurring schedule. + +To configure a job to run at 21:00 every night: +``` +myapp: wl.Job { + containers: { + # ... + } + schedule: "0 21 * * *" + } +``` + +## Workload References + +You can find workload references [here](../reference/model/catalog_models/workload/doc_service). + +You can find workload schema source [here](https://github.com/KusionStack/catalog/tree/main/models/schema/v1/workload). \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.9/getting-started/_category_.json b/docs_versioned_docs/version-v0.9/getting-started/_category_.json new file mode 100644 index 00000000..3562d433 --- /dev/null +++ b/docs_versioned_docs/version-v0.9/getting-started/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Getting Started", + "position": 1 +} diff --git a/docs_versioned_docs/version-v0.9/getting-started/_deliver-first-project.md b/docs_versioned_docs/version-v0.9/getting-started/_deliver-first-project.md new file mode 100644 index 00000000..6672e285 --- /dev/null +++ b/docs_versioned_docs/version-v0.9/getting-started/_deliver-first-project.md @@ -0,0 +1,144 @@ +--- +sidebar_position: 1 +--- + +# Deliver Your First Project on Kubernetes + +This tutorial will demonstrate how to deliver a simple single-app, single-stack project on Kubernetes in one Kusion command. + +## Prerequisites + +Before we start, we need to complete the following steps: + +1、Install Kusion + +We recommend using HomeBrew(Mac), Scoop(Windows), or an installation shell script to manage Kusion installation. +See [Download and Install](install) for more details. + +2、Running Kubernetes cluster + +There must be a running Kubernetes cluster and a [kubectl](https://Kubernetes.io/docs/tasks/tools/#kubectl) command line tool. +If you don't have a cluster yet, you can use [Minikube](https://minikube.sigs.k8s.io/docs/tutorials/multi_node/) to start one of your own. + +## Init Project + +We can start by initializing this tutorial project with online templates: + +```shell +kusion init --online +``` + +All init templates are listed as follows: + +```shell +~/playground$ kusion init --online +? Please choose a template: [Use arrows to move, type to filter] +> code-city Code City metaphor for visualizing Go source code in 3D. + deployment-multi-stack A minimal kusion project of multiple stacks + deployment-single-stack A minimal kusion project of single stack + wordpress A sample wordpress project +``` + +Select `code-city` and press `Enter`. After that, we will see hints below and use the default value to config this project and stack. + +![](/img/docs/user_docs/getting-started/init-gocity.gif) + +The directory structure looks like the following: + +```shell +cd code-city && tree +``` + +```shell +~/playground$ tree code-city/ +code-city/ +├── dev +│   ├── kcl.mod +│   ├── kcl.mod.lock +│   ├── main.k +│   └── stack.yaml +└── project.yaml + +2 directories, 5 files +``` + +:::info +More details about the directory structure can be found in +[Concepts](../concepts/glossary). +::: + +### Review Config Files + +Let's take a look at the configuration files located at `code-city/dev/main.k`: +```python +import catalog.models.schema.v1 as ac +import catalog.models.schema.v1.workload as wl +import catalog.models.schema.v1.workload.network as n +import catalog.models.schema.v1.workload.container as c + +gocity: ac.AppConfiguration { + workload: wl.Service { + containers: { + "gocity": c.Container { + image = "howieyuen/gocity:latest" + resources: { + "cpu": "500m" + "memory": "512Mi" + } + } + } + replicas: 1 + ports: [ + n.Port { + port: 4000 + } + ] + } +} +``` + +The configuration file `main.k` includes an `AppConfiguration` with the name `gocity`. The `gocity` application includes a workload of type `wl.Service`, which runs on 1 replica and exposes port 4000 to be accessed. This model hides the major complexity of Kubernetes resources such as `Namespace`,`Deployment` and `Service`, while providing the concepts that are application-centric and infrastructure-agnostic. + +:::info +More details about the Models can be found in [Catalog](https://github.com/KusionStack/catalog) +::: + +## Delivery + +```shell +cd code-city/dev && kusion apply --watch +``` + +Go to the `dev` folder and we will deliver this App into a Kubernetes cluster with one command `kusion apply --watch` + +![](/img/docs/user_docs/getting-started/apply.gif) + +Check `Deploy` status + +```shell +kubectl -n gocity get deploy +``` + +The expected output is shown as follows: + +```shell +~/playground/code-city/dev$ kubectl -n gocity get deploy +NAME READY UP-TO-DATE AVAILABLE AGE +gocity-dev-gocity 1/1 1 1 3m37s +``` + +Port-forward our App with the `service` + +```shell +kubectl port-forward -n gocity svc/gocity-dev-gocity-private 4000:4000 +``` + +```shell +~/playground/code-city/dev$ kubectl port-forward -n gocity svc/gocity-dev-gocity-private 4000:4000 +Forwarding from 127.0.0.1:4000 -> 4000 +Forwarding from [::1]:4000 -> 4000 +``` + +Visit [http://localhost:4000/#/github.com/KusionStack/kusion](http://localhost:4000/#/github.com/KusionStack/kusion) in your browser and enjoy. + +![](/img/docs/user_docs/getting-started/gocity.png) diff --git a/docs_versioned_docs/version-v0.9/getting-started/_konfig.md b/docs_versioned_docs/version-v0.9/getting-started/_konfig.md new file mode 100644 index 00000000..b059de6a --- /dev/null +++ b/docs_versioned_docs/version-v0.9/getting-started/_konfig.md @@ -0,0 +1,62 @@ +--- +sidebar_position: 9 +--- + +# Kusion 模型库 + +**Kusion 模型库**也叫做 `Kusion Model`,是 KusionStack 中预置的、用 KCL 描述的配置模型,它提供给用户开箱即用、高度抽象的配置接口,模型库最初朴素的出发点就是改善 YAML 用户的效率和体验,我们希望通过将代码更繁杂的模型抽象封装到统一的模型中,从而简化用户侧配置代码的编写。 + +Konfig 仓库地址:https://github.com/KusionStack/konfig + +![](/img/docs/user_docs/getting-started/konfig-arch-01.png) + +## 1. 目录结构 + +先克隆 Kusion 模型库:`git clone git@github.com:KusionStack/Konfig.git`。 + +Konfig 配置大库整体结构如下: + +```bash +. +├── Makefile # 通过 Makefile 封装常用命令 +├── README.md # 配置大库说明 +├── appops # 应用运维目录,用来放置所有应用的 KCL 运维配置 +│ ├── clickhouse-operator +│ ├── code-city +│ ├── guestbook +│ ├── http-echo +│ └── nginx-example +├── base # Kusion Model 模型库 +│ ├── examples # Kusion Model 样例代码 +│ │ ├── monitoring # 监控配置样例 +│ │ ├── native # Kubernetes 资源配置样例 +│ │ └── server # 云原生应用运维配置模型样例 +│ └── pkg +│ ├── kusion_kubernetes # Kubernetes 底层模型库 +│ ├── kusion_models # 核心模型库 +│ └── kusion_prometheus # Prometheus 底层模型库 +├── hack # 放置一些脚本 +└── kcl.mod # 大库配置文件,通常用来标识大库根目录位置以及大库所需依赖 +``` + +## 2. 测试 Konfig 代码 + +在安装完成 Kusion 工具之后,在 Konfig 根目录执行 `make check-all` 测试大库全部应用(参考 [Konfig](/docs/user_docs/concepts/konfig)),或者执行 `make check-http-echo` 测试 `appops/http-echo` 应用。 + +如果需要单独测试 `appops/http-echo` 应用的 dev 版本,可以进入 `appops/http-echo/dev` 目录执行 `kusion compile` 命令(或者通过更底层的 `kcl -Y kcl.yaml ./ci-test/settings.yaml` 命令),输出的文件在 `appops/http-echo/dev/ci-test/stdout.golden.yaml` 文件。 + +## 3. 添加应用 + +在 [快速开始/Usecases](/docs/user_docs/getting-started/deliver-wordpress.md) 我们已经展示如何快速添加一个应用(参考 [Konfig](/docs/user_docs/concepts/konfig))。 + +## 4. Konfig 架构图 + +之所以用一个大的仓库管理全部的 IaC 配置代码,是由于不同代码包的研发主体不同,会引发出包管理和版本管理的问题,从而导致平台侧需要支持类似编译平台的能力。采用大库模式下,业务配置代码、基础配置代码在一个大库中,因此代码间的版本依赖管理比较简单,平台侧处理也比较简单,定位唯一代码库的目录及文件即可,代码互通,统一管理,便于查找、修改、维护(大库模式也是 Google 等头部互联网公司内部实践的模式)。 + +下面是 Konfig 的架构图: + +![](/img/docs/user_docs/getting-started/konfig-arch-01.png) + +核心模型内部通过前端模型和后端模型两层抽象简化前端用户的配置代码,底层模型则是通过 KCL OpenAPI 工具自动生成。 + +模型的更详细文档可参考 [参考手册/Kusion 模型库](/docs/user_docs/reference/model)。 diff --git a/docs_versioned_docs/version-v0.9/getting-started/_kusion-ide.mdx b/docs_versioned_docs/version-v0.9/getting-started/_kusion-ide.mdx new file mode 100644 index 00000000..2b5f1587 --- /dev/null +++ b/docs_versioned_docs/version-v0.9/getting-started/_kusion-ide.mdx @@ -0,0 +1,91 @@ +--- +sidebar_position: 3 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Use VS Code Kusion + +The [VS Code Kusion extension](https://marketplace.visualstudio.com/items?itemName=KusionStack.kusion) provides convenient operations to deliver KCL configurations to Clouds. + +## Before You Begin + +You could begin with VS Code Kusion using cloud IDE [GitHub Codespaces](https://github.com/features/codespaces), or you could install it on your desktop VS Code. + + + + +Conguratulations! 🎉🎉 There's no environment setting up required! You could directly [create a workspace with bundled Kusion](https://github.com/codespaces/new?hide_repo_select=true&ref=main&repo=488867056&machine=standardLinux32gb&devcontainer_path=.devcontainer.json). + + + + +**Step 1.** Install Kusion on your system. Please refer to [install kusion](/docs/user_docs/getting-started/install-kusion). + +**Step 2.** Install the Kusion extension for Visual Studio Code. This extension requires the VS Code 1.68+. + +**Step 3.** You need to have a Kubernetes Cluster and place the kubeConfig file at `~/.kube/config`. + + + + +## Get Started to Deliver Your First App + +Here's an example for you to quickly get started to deliver [code city](https://wettel.github.io/codecity.html) application to the clouds with the VS Code Kusion Extension. All the steps could be interactivly experienced in the `Getting started with Kusion` walkthrough within VS Code. + +To find the Kusion walkthrough: Open the Command Palatte > type and select `Welcome: Open Walkthrough...` > then type and select `kusion`. + + +### 1. Abstract: Define Your Models + + + + +The monorepo [konfig](https://github.com/KusionStack/konfig) is already opened in your workspace. It contains classical atractions of application configuration for you to quick start. + + + + +You could clone the monorepo [konfig](https://github.com/KusionStack/konfig) which contains classical atractions of application configuration for you to quick start: + +```shell +git clone https://github.com/KusionStack/konfig.git +``` + +Then open it with your VS Code. + + + + +### 2. Config: New Kusion Project + +You could quickly create a new kusion project from archetype. To do that, click the `Create Kusion Project` button on the walkthrough (or, type `Kusion: Create` in the Command Palatte), and select a project template(For example using the `code-city` template you could deploy an application to visualize software as 3D cities). + +![](/img/docs/user_docs/getting-started/create-project.gif) + +### 3. Preview: YAML Representation + +Now let's preview the YAML representation of our Config previously created by clicking the data preview button or type and select `Kusion: Open Data Preview To the Side`. + +![](/img/docs/user_docs/getting-started/data-preview.gif) + +### 4. Runtime Diff and Go online + +To view the runtime diff of the current stack, you could right-click at the configuraion main file and select `Diff with Runtime and Apply` to open the runtime diff page. + +Then confirm the diff and make the changes go online. + +![](/img/docs/user_docs/getting-started/config-diff-apply.gif) diff --git a/docs_versioned_docs/version-v0.9/getting-started/deliver-wordpress.md b/docs_versioned_docs/version-v0.9/getting-started/deliver-wordpress.md new file mode 100644 index 00000000..e4d93cf7 --- /dev/null +++ b/docs_versioned_docs/version-v0.9/getting-started/deliver-wordpress.md @@ -0,0 +1,170 @@ +--- +sidebar_position: 2 +--- + +# Deliver the WordPress Application on Kubernetes + +In this tutorial we will walk through how to deploy a WordPress application on Kubernetes with Kusion. The WordPress application will interact with MySQL, which is declared as a database accessory in the config codes and will be automatically created and managed by Kusion. + +## Prerequisites + +Before we start to play with this example, we need to have the Kusion CLI installed and run a Kubernetes cluster. Here are some helpful documentations: + +- Install [Kusion CLI](install-kusion) +- Install [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl) CLI and run a [Kubernetes](https://kubernetes.io/) cluster. Some light and convenient options for local deployment include [k3s](https://docs.k3s.io/quick-start), [k3d](https://k3d.io/v5.4.4/#installation), and [MiniKube](https://minikube.sigs.k8s.io/docs/tutorials/multi_node/). + +## Init Project + +We can start by initializing this tutorial project with online templates: + +```shell +kusion init --online +``` + +All init templates are listed as follows: + +```shell +➜ kusion_playground kusion init --online +? Please choose a template: [Use arrows to move, type to filter] + code-city Code City metaphor for visualizing Go source code in 3D. + deployment-multi-stack A minimal kusion project of multiple stacks + deployment-single-stack A minimal kusion project of single stack +> wordpress A sample wordpress project + wordpress-cloud-rds A sample wordpress project with cloud rds +``` + +Please select `wordpress` and press `Enter`, after which we will see the hints below and use the default value to config this project and stack. + +![](/img/docs/user_docs/getting-started/init-wordpress-with-local-db.gif) + +The directory structure looks like the following: + +```shell +cd wordpress && tree +``` + +```shell +➜ kusion_playground cd wordpress && tree +. +├── dev +│   ├── kcl.mod +│   ├── kcl.mod.lock +│   ├── main.k +│   └── stack.yaml +└── project.yaml + +1 directory, 5 files +➜ wordpress +``` + +:::info +More details about the directory structure can be found in [Concepts](../concepts/glossary). +::: + +### Review Config Files + +Now let's have a glance at the configuration files at `dev/main.k`: + +```python +import catalog.models.schema.v1 as ac +import catalog.models.schema.v1.trait as t +import catalog.models.schema.v1.workload as wl +import catalog.models.schema.v1.workload.container as c +import catalog.models.schema.v1.workload.container.probe as p +import catalog.models.schema.v1.workload.secret as sec +import catalog.models.schema.v1.workload.network as n +import catalog.models.schema.v1.monitoring as m +import catalog.models.schema.v1.accessories.database as db + +# main.k declares reusable configurations for dev stacks. +wordpress: ac.AppConfiguration { + workload: wl.Service { + containers: { + wordpress: c.Container { + image = "wordpress:6.3" + env: { + "WORDPRESS_DB_HOST": "$(KUSION_DB_HOST)" + "WORDPRESS_DB_USER": "$(KUSION_DB_USERNAME)" + "WORDPRESS_DB_PASSWORD": "$(KUSION_DB_PASSWORD)" + "WORDPRESS_DB_NAME": "mysql" + } + resources: { + "cpu": "500m" + "memory": "512Mi" + } + } + } + replicas: 1 + ports: [ + n.Port { + port: 80 + } + ] + } + database: db.Database { + type: "local" + engine: "mysql" + version: "8.0" + } +} +``` + +The configuration file `main.k` includes an `AppConfiguration` with the name of `wordpress`. The `wordpress` application includes a wordload of type `wl.Service`, which runs on 1 replica and exposes `80` to be accessed. Besides, it declares a local `db.Database` accessory with the engine of `mysql:8.0` for the application. The necessary Kubernetes resources for deploying and using the local database will be generated, and users can get the `host address`, `username` and `paasword` through the [magic variables for sensitive database information](../reference/model/naming-conventions#sensitive-database-information) of Kusion in application containers. + +This model hides the major complexity of Kubernetes resources such as `Namespace`, `Deployment` and `Service`, which providing the concepts that are application-centric and infrastructure-agnostic. + +:::info +More details about the Models can be found in [Catalog](https://github.com/KusionStack/catalog) +::: + +## Delivery + +```shell +cd dev && kusion apply --watch +``` + +Go to the `dev` folder and we will deliver the WordPress application into the Kubernetes cluster with one command `kusion apply --watch`. + +![](/img/docs/user_docs/getting-started/apply-wordpress-with-local-db.gif) + +Check `Deployment` status. + +```shell +kubectl -n wordpress get deployment +``` + +The expected output is shown as follows: + +```shell +➜ dev kubectl -n wordpress get deploy +NAME READY UP-TO-DATE AVAILABLE AGE +wordpress-dev-wordpress 1/1 1 1 2m23s +wordpress-db-local-deployment 1/1 1 1 2m23s +``` + +Port-forward our WordPress with the `Service`. + +```shell +kubectl port-forward -n wordpress service/wordpress-dev-wordpress-private 12345:80 +``` + +```shell +➜ dev kubectl port-forward -n wordpress service/wordpress-dev-wordpress-private 12345:80 +Forwarding from 127.0.0.1:12345 -> 80 +Forwarding from [::1]:12345 -> 80 + +``` + +Now we can visit [http://localhost:12345](http://localhost:12345) in our browser and enjoy! + +![](/img/docs/user_docs/getting-started/wordpress-site-page.png) + +## Delete WordPress Application + +We can delete the WordPress application and related database resources using the following command line: + +```shell +kusion destroy --yes +``` + +![](/img/docs/user_docs/getting-started/wordpress-with-local-db-destroy.gif) diff --git a/docs_versioned_docs/version-v0.9/getting-started/getting-started.md b/docs_versioned_docs/version-v0.9/getting-started/getting-started.md new file mode 100644 index 00000000..4ac5f2b3 --- /dev/null +++ b/docs_versioned_docs/version-v0.9/getting-started/getting-started.md @@ -0,0 +1,2 @@ +# Get Started +This section includes a quick overview of KusionStack and how to deploy a cloud-native app with it. \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.9/getting-started/install-kusion.md b/docs_versioned_docs/version-v0.9/getting-started/install-kusion.md new file mode 100644 index 00000000..2663c1d3 --- /dev/null +++ b/docs_versioned_docs/version-v0.9/getting-started/install-kusion.md @@ -0,0 +1,56 @@ +--- +sidebar_position: 1 +sidebar_label: Install Kusion +id: install-kusion +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Install Kusion + +You can install the latest Kusion CLI on MacOS and Linux. Choose the one you prefer from the methods below. + + + + +The recommended method for installing on MacOS and Linux is to use the brew package manager. + +**Install Kusion** + +```bash +brew install KusionStack/tap/kusion +``` + +**Update Kusion** + +```bash +brew upgrade KusionStack/tap/kusion +``` + +**Uninstall Kusion** + +```bash +brew uninstall KusionStack/tap/kusion +``` + +```mdx-code-block + + +``` + +**Install Kusion** + +```bash +curl https://www.kusionstack.io/scripts/install.sh | sh +``` + +**Uninstall Kusion** + +```bash +curl https://www.kusionstack.io/scripts/uninstall.sh | sh +``` + +```mdx-code-block + + +``` diff --git a/docs/user_docs/guides/_category_.json b/docs_versioned_docs/version-v0.9/guides/_category_.json similarity index 100% rename from docs/user_docs/guides/_category_.json rename to docs_versioned_docs/version-v0.9/guides/_category_.json diff --git a/docs_versioned_docs/version-v0.9/guides/_kubevela/_category_.json b/docs_versioned_docs/version-v0.9/guides/_kubevela/_category_.json new file mode 100644 index 00000000..85eb16a0 --- /dev/null +++ b/docs_versioned_docs/version-v0.9/guides/_kubevela/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "KubeVela", + "position": 5 +} diff --git a/docs_versioned_docs/version-v0.9/guides/_kubevela/kusionstack-helps-users-better-use-kubevela.md b/docs_versioned_docs/version-v0.9/guides/_kubevela/kusionstack-helps-users-better-use-kubevela.md new file mode 100644 index 00000000..e73ad52d --- /dev/null +++ b/docs_versioned_docs/version-v0.9/guides/_kubevela/kusionstack-helps-users-better-use-kubevela.md @@ -0,0 +1,351 @@ +--- +sidebar_position: 1 +--- + +# KusionStack Helps Users Better Use KubeVela + +## Introduction + +In recent years, Kubernetes has gradually become the de facto standard for cloud-native technology infrastructure, unifying the orchestration and scheduling of infrastructure resources. However, Kubernetes does not provide an operational model from an application-centric perspective, which results in a higher cognitive cost for application developers, impacts the application management experience, and ultimately reduces development efficiency. To address this issue, the **KubeVela** platform and the **KusionStack** technology stack have emerged. + +KubeVela is an excellent application delivery server solution that can serve as a highly available control plane for the centralized management of scalable applications. As a CNCF (Cloud Native Computing Foundation) incubating project, KubeVela has garnered significant attention and participation from a large number of users and developers due to its novel technical concepts and relatively high completion. + +KusionStack is an **application-centric** delivery and operation technology stack that acts as a client entry point for writing and applying application configurations. It enables configuration model abstraction and complexity reduction, configuration field type checking and validation, as well as unified orchestration of Kubernetes and cloud resources. As a result, KubeVela users can benefit from a better experience in areas like hybrid resource operation, team collaboration, and shift-left security. + +## KubeVela + +KubeVela is an out-of-the-box modern application delivery and management platform built on top of Kubernetes clusters, various cloud platforms, and diverse IoT devices. It achieves application delivery and management in hybrid and multi-cloud environments through the **Open Application Model (OAM)**, a modular, scalable, and portable cloud-native application abstraction standard. + +![KubeVela usage example](/img/docs/user_docs/guides/kubevela/kubevela_usage_example.png) + +KubeVela translates all the components required for application deployment and various operational traits into an infrastructure-agnostic deployment plan. As a control plane application delivery engine, KubeVela is primarily integrated into PaaS (Platform as a Service) platforms in the form of a Kubernetes Custom Resource Definition (CRD) controller. Users can achieve standardized application delivery by writing **KubeVela Application** resource YAML files. A typical KubeVela Application is shown below. + +![KubeVela application example](/img/docs/user_docs/guides/kubevela/kubevela_application_example.png) + +## KusionStack + +KusionStack is an open-source programmable engineering stack to facilitate app-centric development and unified operation, inspired by the phrase **'Fusion on the Kubernetes'**. The main motivation is to help platform and app developers to develop and deliver in a self-serviceable, fast, reliable, and collaborative way. + +KusionStack consists of a series of tools and products. Among them, KCL provides programmability similar to modern programming languages, Kusion turns blueprints into reality with powerful engines and orchestration capabilities, and Konfig holds app delivery models and components. Users can choose to use one of them, such as KCL, or use them in combination. + +- **KCL**: Configuration and policy programming language for application developers, along with its protocols, toolchains, and IDE plugins +- **Kusion**: Operation engine, toolchains, service, IDE workspace +- **Konfig**: Shared repository of application models and components, and CI suite for GitOps workflows (such as GitHub Actions suite) + +![KusionStack workflow example](/img/docs/user_docs/guides/kubevela/kusionstack_workflow_example.png) + +## KusionStack Helps Users Better Use KubeVela + +In enterprise-level scenarios such as **large-scale application configuration management**, different teams (infrastructure, platform, business) with different roles (developers, SREs) may simultaneously have unified delivery and differentiated cloud resource management requirements for hundreds or even thousands of applications. In such situations, challenges arise on how to achieve better multi-team, multi-role collaboration and how to manage hybrid resources with applications in a unified way. + +KusionStack, when combined with KubeVela, serves as an operational technology carrier that helps users enhance the efficiency and security of application delivery and management in large-scale environments. It facilitates a seamless experience in addressing the complexities of coordinating multiple teams and roles while managing applications at the center of hybrid resource unification. + +### Application-centric Unified Resource Management + +KubeVela offers rich extensible mechanisms, supporting users to create Terraform-like component definitions (ComponentDefinition). Users can declare HCL (HashiCorp Configuration Language) code in YAML files describing their applications to manage resources in multi-cloud and hybrid environments. However, **writing HCL code in YAML may lead to a somewhat fragmented user experience**. + +On the other hand, KusionStack uses KCL (Kusion Configuration Language) to provide **a unified description of application-dependent resources**. Developers can declare both Kubernetes resources and IaaS (Infrastructure as a Service) cloud resources (such as VPC, OSS, RDS, etc.) in a single configuration file, simplifying cognitive overhead and improving the operational experience. + +![Unified Description of Application's K8s & IaaS Cloud Resource Configuration Example in a KCL File](/img/docs/user_docs/guides/kubevela/unified_description_in_kcl.png) + +### Multi-team and Multi-role Collaboration + +KusionStack and KubeVela are both systems designed for the separation of concerns, where users are naturally divided into two roles: **Developer** and **Platform**. Different roles can focus solely on their concerned configuration items to improve development and operational efficiency. + +However, in large-scale application delivery scenarios, KubeVela Application YAML files and related Custom Resource Definitions (CRDs) for each application are often scattered across different locations, making it challenging to manage and collaborate effectively. KusionStack addresses this issue by adopting the Konfig repository pattern, providing support for large-scale collaboration. All team members can collaborate on writing and auditing application configuration code in the same Git repository. Additionally, KusionStack utilizes KCL for unified operational intent descriptions, further enhancing collaboration efficiency. + +Moreover, KusionStack simplifies the writing of configuration code for developers by encapsulating various complex application models into a unified frontend + backend model. The frontend model serves as the user interface for developers to write configuration code and contains all configurable properties exposed by the platform, while abstracting away redundant and deducible configurations, presenting only the necessary attributes for user interaction. The backend model is the concrete implementation of the model, responsible for applying the properties defined in the frontend model. It includes resource rendering, compliance validation, and other logical fragments, improving the robustness and reusability of configuration code without requiring developers to be aware of the underlying complexity. + +![Developer & Platform Collaboration Example](/img/docs/user_docs/guides/kubevela/developer_and_platform_collab_example.png) + +### Shift-left Security + +When declaring KubeVela Applications, KubeVela users utilize YAML plaintext, which lacks client-side validation capabilities for configuration fields such as type, value constraints, and security compliance policies. + +KusionStack, on the other hand, employs a **Policy as Code (PaC)** mechanism, allowing for more convenient detection of potential issues during the writing of application configurations. This enables identifying problems before the CR (Custom Resource) is applied, reducing the risks associated with erroneous configurations taking effect. Additionally, the Kusion engine provides a three-way real-time diff comparison capability before the configuration takes effect, allowing users to preview configuration changes and thereby offering a safer workflow. + +![KCL Code Validation Example](/img/docs/user_docs/guides/kubevela/shift_left_security_example.png) + +![3-Way Real-time Diff Example](/img/docs/user_docs/guides/kubevela/3_way_diff_example.png) + +## The Integration Solution of KusionStack and KubeVela + +The proposed solution for integrating KubeVela with KusionStack is illustrated in the following diagram. In this setup, KusionStack serves as the client side for application delivery and management, providing a unified abstraction of application configuration models and unified orchestration of Kubernetes and cloud resources. Additionally, it supports multi-team & multi-role collaboration and configuration code risk validation capabilities. On the other hand, KubeVela acts as the control plane backend, responsible for the deployment of Kubernetes resources, while Terraform handles the management of cloud resources. + +![Workflow of KubeVela Integrated with KusionStack](/img/docs/user_docs/guides/kubevela/integration_solution.png) + +The corresponding workflow is as follows. + +- Users utilize KCL to write the Konfig application configuration model, which can include a mix of Kubernetes and Terraform resources. + +- Kubernetes resources in the application model instance will be rendered as KubeVela Application CRs (Custom Resources), while Terraform resources will be rendered as configuration data in Kusion Spec format. + +- Users deliver the KubeVela Application instances to KubeVela using the Kusion command-line tool, and the Terraform resources are delivered to Terraform to complete the resource deployment. + +## Practical Example: Deliver WordPress Application to Kubernetes and Clouds + +Here is an example of delivering a WordPress application to Kubernetes and the cloud using KusionStack in conjunction with KubeVela. WordPress is an open-source content management system (CMS) that can be used to create and manage various types of websites and handle multiple users and roles. In our practical example, the WordPress application will rely on AWS Relational Database Service (RDS) to provide a cloud-based database solution for managing WordPress site content, including articles, pages, comments, and user information. + +### Prerequisites + +- [Install Kusion](/docs/user_docs/getting-started/install-kusion) +- [Install KubeVela](https://kubevela.io/) +- [Deploy Kubernetes](https://kubernetes.io/) or [Kind](https://kind.sigs.k8s.io/) +- [Install Terraform](https://www.terraform.io/) +- Prepare an AWS account and create a user with the permissions for `AmazonVPCFullAccess` and `AmazonRDSFullAccess` to utilize the Amazon Relational Database Service (RDS). This user can create and manage resources in the AWS Identity and Access Management (IAM) console + +![aws iam account](/img/docs/user_docs/guides/kubevela/aws_iam_account.png) + +Additionally, we also need to configure the obtained AccessKey and SecretKey as environment variables: + +```bash +export AWS_ACCESS_KEY_ID="AKIAQZDxxxx" # replace it with your AccessKey +export AWS_SECRET_ACCESS_KEY="oE/xxxx" # replace it with your SecretKey +``` + +:::info +Alternatively, Kusion provides a **Secret as Code** solution for handling the AccessKey and SecretKey mentioned above. +::: + +### Review Project Structure and Config Codes + +#### Project Structure + +Firstly, let's clone the Konfig repo and enter the root directory: + +```shell +git clone git@github.com:KusionStack/konfig.git && cd konfig +``` + +Then we can locate the `wordpress-kubevela` project under the `appops/` directory, which is composed of the following files: + +```shell +cd appops/wordpress-kubevela && tree +. +├── README.md // Documentation +├── base // Common configuration for all stacks +│ └── base.k // Common config code file for all stacks +├── dev // Stack directory +│ ├── ci-test // CI test directory, storing test scripts and data +│ │ ├── settings.yaml // Configuration for test data and compiling +│ │ └── stdout.golden.yaml // Expected Spec YAML, which can be updated using make +│ ├── kcl.yaml // Multi-file compilation configuration for current stack +│ ├── main.k // Config codes for Developer in current stack +│ ├── platform.k // Config codes for Platform in current stack +│ └── stack.yaml // Meta information of current stack +└── project.yaml // Meta information of current project + +3 directories, 9 files +``` + +:::info +More details about the directory structure can be found in [Konfig](/docs/user_docs/concepts/glossary) +::: + +#### Config Codes + +The configuration code files we need to pay attention to mainly include `dev/main.k` and `dev/platform.k`. + +```python +# dev/main.k +import base.pkg.kusion_models.kube.frontend +import base.pkg.kusion_models.kube.frontend.storage + +# The application configuration in stack will overwrite +# the configuration with the same attribute in base. +# And main.k is for the configurations in concern of application developers. + +# defination of wordpress application frontend model +wordpress: frontend.Server { + # specify application image + image = "wordpress:4.8-apache" + + # use cloud database for the storage of wordpress + database = storage.DataBase { + # choose aws_rds as the cloud database + dataBaseType = "aws_rds" + dataBaseAttr = storage.DBAttr { + # choose the engine type and version of the database + databaseEngine = "MySQL" + databaseEngineVersion = "5.7" + # create database account + databaseAccountName = "root" + databaseAccountPassword = option("db_password") + # create internet access for the cloud database + internetAccess = True + } + } + + # NOTE: this configuration is an example of adding an environment variable in the main container + # uncommenting and re-deploying will add the environment variable "ENV_ADD_EXAMPLE" in the + # main container and the differnces will be shown by the command of "kusion apply" + mainContainer: { + env += [ + { + name = "ENV_ADD_EXAMPLE" + value = "wordpress-example" + } + ] + } +} +``` + +```python +# dev/platform.k +import base.pkg.kusion_models.kube.frontend +import base.pkg.kusion_models.kube.frontend.storage +import base.pkg.kusion_models.kube.metadata +import base.pkg.kusion_clouds.aws_backend.aws_config + +# The application configuration in stack will overwrite +# the configuration with the same attribute in base. +# And platform.k is for the configurations in concern of platform developers. + +_cloudResourceName = "{}-{}".format(metadata.__META_APP_NAME, metadata.__META_ENV_TYPE_NAME).lower() +_awsDependencyPrefix = "$kusion_path." + aws_config.awsProvider.namespace + ":" + aws_config.awsProvider.name + ":" + +# defination of wordpress application frontend model +wordpress: frontend.Server { + # add environment variable in main container with implicit alicloud resource dependency + mainContainer: { + env += [ + { + name = "WORDPRESS_DB_HOST" + value = _awsDependencyPrefix + aws_config.awsDBInstanceMeta.type + ":" + _cloudResourceName + ".address" + } + ] + } + + # supplement database related configuration code on the platform side + database: storage.DataBase { + dataBaseAttr: storage.DBAttr { + # specify instance type for aws or alicloud rds + # for aws rds + instanceType = "db.t3.micro" + + # for alicloud rds + # instanceType = "mysql.n2.serverless.1c" + + # specify cloud charge type for alicloud rds + # extraMap = { + # "cloudChargeType": "Serverless" + # } + } + } +} +``` + +Specifically: + +- `dev/main.k` contains config codes for **Developer** to concentrate on for the WordPress application deployment in the dev environment. In addition to the application container image, it also assigns an instance of a type `storage.DataBase` to the `frontend.Server.database` attribute, and thus declaring an RDS with MySQL as the database engine. +- `dev/platform.k` contains config codes for **Platform** to concentrate on for the WordPress application deployment in the dev environment. Here, the main purpose is to specify the domain name of the cloud database to be connected to for the WordPress application container and the RDS instance type. In addition, we can also declare the RDS charging type and other configurations in `dev/platform.k`. + +As shown above, benefiting from the advanced features of KCL concerning variable, function, and schema definition, we can abstract and encapsulate the RDS resources, which shields many properties that the Developer does not need to be aware of (such as the network segment of VPC and vSwitch behind RDS). The developer only needs to fill in a few necessary fields in the frontend model instance to complete the declaration of RDS resources, so that the application configuration can be organized more flexibly and efficiently. Moreover, under the collaboration of writing config codes in the Konfig repository, Developers and Platforms from different teams can perform their roles, only focusing on their own respective configuration items, thereby improving the collaboration efficiency of application development and operation. + +### Deliver WordPress Application + +We can complete the delivery of the WordPress application using the following command line: + +```shell +cd appops/wordpress-kubevela/dev && kusion apply --yes +``` + +![apply the wordpress application with aws rds](/img/docs/user_docs/guides/kubevela/kusion_apply.png) + +After all the resources reconciled, we can port-forward our local port (e.g. 12345) to the WordPress frontend service port (80) in the cluster: + +```shell +kubectl port-forward -n wordpress-kubevela svc/wordpress 12345:80 +``` + +![kubectl port-forward for wordpress](/img/docs/user_docs/guides/kubevela/port_forward.png) + +### Verify WordPress Application + +Next, we will verify the WordPress site service we just delivered, along with the creation of the RDS instance it depends on. We can start using the WordPress site by accessing the link of local-forwarded port [(http://localhost:12345)](http://localhost:12345) we just configured in the browser. + +![wordpress site page](/img/docs/user_docs/getting-started/wordpress-site-page.png) + +We can use `velaux` or `vela top` commands to view the KubeVela Application-related resources. + +```shell +vela port-forward addon-velaux -n vela-system 8080:80 +``` + +![velaux](/img/docs/user_docs/guides/kubevela/velaux.png) + +```shell +vela top +``` + +![vela top](/img/docs/user_docs/guides/kubevela/vela_top.png) + +In addition, we can also log in to the cloud service console page to view the RDS instance we just created. + +![rds info](/img/docs/user_docs/guides/kubevela/rds_info.png) +![rds detailed](/img/docs/user_docs/guides/kubevela/rds_detailed.png) + +### Modify WordPress Application + +#### Compliance Check of Config Code Modification + +Using KCL to write application config codes naturally has the ability to perform type-checking on configuration items. Validation logic can also be implemented through keywords like `assert` and `check`, making it more convenient to discover potential issues during the writing of application config codes and reduce the risk of delivering the application with the wrong configuration. + +When creating an RDS resource, for different types of cloud service vendors, we can declare the corresponding RDS instance type, and the Konfig backend model has added the validation logic for the RDS instance type through the `assert` keyword, so when we accidentally modify the RDS instance type to an invalid value in `dev/platform.k`, Kusion will throw a corresponding error during the compilation process before applying the resource. + +![KCL Assertion](/img/docs/user_docs/guides/kubevela/assert.png) + +#### Apply Config Code Modification + +As shown below, we can try to modify the config codes in the file `dev/main.k` to add an environment variable in the main container of the WordPress application. When using `kusion apply` to apply the config code modification, we can preview the resource changes with the capability of 3-way real-time diff of Kusion (note that we ignore the change of `metadata.managedFields` field in the following example for better demonstration). + +```python +# dev/main.k +# ... + +wordpress: frontend.Server { + # Some unchanged codes are omitted + # image = ... + # database = storage.DataBase{...} + + # NOTE: this configuration is an example of adding an environment variable in the main container + # uncommenting and re-deploying will add the environment variable "ENV_ADD_EXAMPLE" in the + # main container and the differnces will be shown by the command of "kusion apply" + mainContainer: { + env += [ + { + name = "ENV_ADD_EXAMPLE" + value = "wordpress-example" + } + ] + } +} +``` + +```shell +kusion apply --ignore-fields "metadata.managedFields" +``` + +![kusion apply diff](/img/docs/user_docs/guides/kubevela/kusion_apply_diff.png) + +### Delete WordPress Application + +We can delete the WordPress application and related RDS resources using the following command line. + +```shell +kusion destroy --yes +``` + +![kusion destroy](/img/docs/user_docs/guides/kubevela/kusion_destroy.png) + +## Summary + +This article introduces how KusionStack empowers developers and platform users to achieve efficient application delivery management using KubeVela. In the proposed integration solution, KusionStack serves as a unified abstraction and orchestration layer for hybrid application resource models, while KubeVela handles the activation of Kubernetes resources, and Terraform manages cloud resources. + +From the practical example of rapidly deploying WordPress to the cloud, we can observe the following features that KusionStack offers: + +- **Application-centric unified resource management**: Using KCL, developers can abstract and encapsulate Kubernetes and IaaS cloud resources required by the application, hiding properties that developers don't need to be aware of. This approach enables more freedom, flexibility, and efficiency in organizing application configurations and easily launching all dependencies, achieving an application-centric operations approach. + +- **Multi-team and multi-role collaboration**: By collaboratively writing configuration code in the Konfig repository, developers and platform users can focus on their respective responsibilities, enhancing collaboration efficiency between different roles and improving application development and operational cooperation. + +- **Shift-left compliance check**: Writing application configuration code with KCL allows for the detection of potential issues during the writing phase, reducing the risks associated with erroneous configurations taking effect. Additionally, Kusion offers a three-way real-time diff comparison capability before configuration activation, allowing users to preview configuration changes and providing a safer workflow. diff --git a/docs/user_docs/guides/adopting/_category_.json b/docs_versioned_docs/version-v0.9/guides/adopting/_category_.json similarity index 100% rename from docs/user_docs/guides/adopting/_category_.json rename to docs_versioned_docs/version-v0.9/guides/adopting/_category_.json diff --git a/docs_versioned_docs/version-v0.9/guides/argocd/_category_.json b/docs_versioned_docs/version-v0.9/guides/argocd/_category_.json new file mode 100644 index 00000000..a08bf626 --- /dev/null +++ b/docs_versioned_docs/version-v0.9/guides/argocd/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "ArgoCD", + "position": 2 +} diff --git a/docs_versioned_docs/version-v0.9/guides/cloud-resources/_category_.json b/docs_versioned_docs/version-v0.9/guides/cloud-resources/_category_.json new file mode 100644 index 00000000..cbd33ef5 --- /dev/null +++ b/docs_versioned_docs/version-v0.9/guides/cloud-resources/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Cloud Resources", + "position": 1 +} diff --git a/docs_versioned_docs/version-v0.9/guides/cloud-resources/database.md b/docs_versioned_docs/version-v0.9/guides/cloud-resources/database.md new file mode 100644 index 00000000..384fbab5 --- /dev/null +++ b/docs_versioned_docs/version-v0.9/guides/cloud-resources/database.md @@ -0,0 +1,330 @@ +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Deliver the WordPress Application with Cloud RDS + +This tutorial will demonstrate how to deploy a WordPress application with Kusion, which relies on both Kubernetes and IaaS resources provided by cloud vendors. We can learn how to declare the Relational Database Service (RDS) to provide a cloud-based database solution for our application from this article. + +## Prerequisites + +- [Install Kusion](../../getting-started/install-kusion) +- [Deploy Kubernetes](https://kubernetes.io/) or [Kind](https://kind.sigs.k8s.io/) or [Minikube](https://minikube.sigs.k8s.io/docs/tutorials/multi_node/) +- [Install Terraform](https://www.terraform.io/) +- Prepare a cloud service account and create a user with `VPCFullAccess` and `RDSFullAccess` permissions to use the Relational Database Service (RDS). This kind of user can be created and managed in the Identity and Access Management (IAM) console +- The environment that executes `kusion` need to have connectivity to terraform registry to download the terraform providers + +Additionally, we also need to configure the obtained AccessKey and SecretKey as environment variables, along with the region if you are using certain cloud provider: + + + + +```bash +export AWS_ACCESS_KEY_ID="AKIAQZDxxxx" # replace it with your AccessKey +export AWS_SECRET_ACCESS_KEY="oE/xxxx" # replace it with your SecretKey +export AWS_PROVIDER_REGION="xx-xxxx-x" # replace it with your AWS Region +``` + +![aws iam account](/img/docs/user_docs/getting-started/aws-iam-account.png) + +```mdx-code-block + + +``` + +```bash +export ALICLOUD_ACCESS_KEY="LTAI5txxx" # replace it with your AccessKey +export ALICLOUD_SECRET_KEY="nxuowIxxx" # replace it with your SecretKey +export ALICLOUD_PROVIDER_REGION="xx-xxxxxxx" # replace it with your AliCloud Region +``` + +![alicloud iam account](/img/docs/user_docs/getting-started/set-rds-access.png) + +```mdx-code-block + + +``` + +## Init Project + +We can start by initializing this tutorial project with online templates: + +```shell +kusion init --online +``` + +All init templates are listed as follows: + +```shell +➜ kusion_playground kusion init --online +? Please choose a template: wordpress-cloud-rds A sample wordpress project with cloud rds +This command will walk you through creating a new kusion project. + +Enter a value or leave blank to accept the (default), and press . +Press ^C at any time to quit. + +Project Config: +? Project Name: wordpress-cloud-rds +? AppName: wordpress +? ProjectName: wordpress +Stack Config: dev +? Image: wordpress:6.3 +Created project 'wordpress-cloud-rds' +``` + +Select `wordpress-cloud-rds` and press `Enter`. After that, we will see hints below and use the default value to config this project and stack. + + +![](/img/docs/user_docs/getting-started/init-wordpress-with-rds.gif) + +The directory structure looks like the following: + +```shell +➜ kusion_playground tree wordpress-cloud-rds +wordpress-cloud-rds +├── dev +│   ├── kcl.mod +│   ├── kcl.mod.lock +│   ├── main.k +│   ├── platform.k +│   └── stack.yaml +└── project.yaml + +1 directory, 6 files +``` + +:::info +More details about the directory structure can be found in +[Concepts](../../concepts/glossary). +::: + +### Review Config Files + +Let's take a look at the configuration files located in `wordpress-cloud-rds/dev`. + +`dev/main.k` +```python +import catalog.models.schema.v1 as ac +import catalog.models.schema.v1.trait as t +import catalog.models.schema.v1.workload as wl +import catalog.models.schema.v1.workload.container as c +import catalog.models.schema.v1.workload.container.probe as p +import catalog.models.schema.v1.workload.secret as sec +import catalog.models.schema.v1.workload.network as n +import catalog.models.schema.v1.monitoring as m +import catalog.models.schema.v1.accessories.database as db + +# main.k declares reusable configurations for dev stacks. +wordpress: ac.AppConfiguration { + workload: wl.Service { + containers: { + wordpress: c.Container { + image = "wordpress:6.3" + env: { + "WORDPRESS_DB_HOST": "$(KUSION_DB_HOST)" + "WORDPRESS_DB_USER": "$(KUSION_DB_USERNAME)" + "WORDPRESS_DB_PASSWORD": "$(KUSION_DB_PASSWORD)" + "WORDPRESS_DB_NAME": "mysql" + } + resources: { + "cpu": "500m" + "memory": "512Mi" + } + } + } + replicas: 1 + ports: [ + n.Port { + port: 80 + } + ] + } + database: db.Database { + type: "aws" + engine: "MySQL" + version: "8.0" + size: 20 + instanceType: "db.t3.micro" + } +} +``` + +`dev/platform.k` +```python +import catalog.models.schema.v1 as ac + +# platform.k declares customized configurations +wordpress: ac.AppConfiguration { + database: { + privateRouting = False + + # SubnetID defines the virtual subnet ID associated with the VPC that the rds + # instance will be created in. The rds instance won't be created in user's own VPC + # but in default VPC of cloud vendor, if this field is not provided. + # subnetID = [your-subnet-id] + + # category = "serverless_basic" + } +} +``` + +The template project defaults to use `AWS` RDS instance for the WordPress application. If you would like to try creating the `Alicloud` RDS instance, you could modify the `dev/main.k` and `dev/platform.k` to the following and replace the `[your-subnet-id]` in `dev/platform.k` with the `vSwitchID` to provision the database in. + +`dev/main.k` +```python +import catalog.models.schema.v1 as ac +import catalog.models.schema.v1.trait as t +import catalog.models.schema.v1.workload as wl +import catalog.models.schema.v1.workload.container as c +import catalog.models.schema.v1.workload.container.probe as p +import catalog.models.schema.v1.workload.secret as sec +import catalog.models.schema.v1.workload.network as n +import catalog.models.schema.v1.monitoring as m +import catalog.models.schema.v1.accessories.database as db + +# main.k declares reusable configurations for dev stacks. +wordpress: ac.AppConfiguration { + workload: wl.Service { + containers: { + wordpress: c.Container { + image = "wordpress:6.3" + env: { + "WORDPRESS_DB_HOST": "$(KUSION_DB_HOST)" + "WORDPRESS_DB_USER": "$(KUSION_DB_USERNAME)" + "WORDPRESS_DB_PASSWORD": "$(KUSION_DB_PASSWORD)" + "WORDPRESS_DB_NAME": "mysql" + } + resources: { + "cpu": "500m" + "memory": "512Mi" + } + } + } + replicas: 1 + ports: [ + n.Port { + port: 80 + } + ] + } + database: db.Database { + type: "alicloud" + engine: "MySQL" + version: "8.0" + size: 20 + instanceType: "mysql.n2.serverless.1c" + } +} +``` + +`dev/platform.k` +```python +import catalog.models.schema.v1 as ac + +# platform.k declares customized configurations +wordpress: ac.AppConfiguration { + database: { + privateRouting = False + + # SubnetID defines the virtual subnet ID associated with the VPC that the rds + # instance will be created in. The rds instance won't be created in user's own VPC + # but in default VPC of cloud vendor, if this field is not provided. + subnetID = [your-subnet-id] + + category = "serverless_basic" + } +} +``` + +The configuration code files you need to pay attention are mainly `dev/main.k` and `dev/platform.k`. Specifically: + +- `dev/main.k` contains configurations for the **Development team** to fill out on how the application should run in the dev environment. In addition to declaring its application container attributes such as image, environment variables, resource requirements, its network attributes such as opening the port 80 to provide service, it also declares that it needs an RDS instance. +- `dev/platform.k` contains config codes for **Platform team** to fill out for the WordPress application deployment in the dev environment on AliCloud. Here, the main purpose is to specify the details on the cloud database (such as how network access is managed, what category of service to get from the cloud vendor) for it to be connected from the WordPress application container. + +As shown above, benefiting from the advanced features of KCL concerning variable, function, and schema definition, we can abstract and encapsulate the RDS resources, which shields many properties that the Developer does not need to be aware of. The developer only needs to fill in a few necessary fields in the AppConfiguration instance to complete the declaration of RDS resources, so that the application configuration can be organized more flexibly and efficiently. + +:::info +More details about Catalog models can be found in [Catalog](https://github.com/KusionStack/catalog) +::: + +## Deliver WordPress Application + +You can complete the delivery of the WordPress application using the following command line: + +```shell +cd wordpress-cloud-rds && kusion apply --yes +``` + + + + +![apply the wordpress application with aws rds](/img/docs/user_docs/getting-started/apply-wordpress-application-with-aws-rds.png) + +```mdx-code-block + + +``` + +![apply the wordpress application with alicloud rds](/img/docs/user_docs/getting-started/apply-wordpress-application.png) + +```mdx-code-block + + +``` + +After all the resources reconciled, we can port-forward our local port (e.g. 12345) to the WordPress frontend service port (80) in the cluster: + +```shell +kubectl port-forward -n wordpress svc/wordpress-dev-wordpress-private 12345:80 +``` + +![kubectl port-forward for wordpress](/img/docs/user_docs/getting-started/wordpress-port-forward.png) + +## Verify WordPress Application + +Next, we will verify the WordPress site service we just delivered, along with the creation of the RDS instance it depends on. We can start using the WordPress site by accessing the link of local-forwarded port [(http://localhost:12345)](http://localhost:12345) we just configured in the browser. + +![wordpress site page](/img/docs/user_docs/getting-started/wordpress-site-page.png) + +In addition, we can also log in to the cloud service console page to view the RDS instance we just created. + + + + +![aws rds instance](/img/docs/user_docs/getting-started/aws-rds-instance.png) +![aws rds instance detailed information](/img/docs/user_docs/getting-started/aws-rds-instance-detail.png) + +```mdx-code-block + + +``` + +![alicloud rds instance](/img/docs/user_docs/getting-started/alicloud-rds-instance.png) + +```mdx-code-block + + +``` + +## Delete WordPress Application + +You can delete the WordPress application and related RDS resources using the following command line. + +```shell +kusion destroy --yes +``` + + + + +![kusion destroy wordpress with aws rds](/img/docs/user_docs/getting-started/kusion-destroy-wordpress-with-aws-rds.png) + +```mdx-code-block + + +``` + +![kusion destroy wordpress with alicloud rds](/img/docs/user_docs/getting-started/kusion-destroy-wordpress.png) + +```mdx-code-block + + \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.9/guides/cloud-resources/expose-service.md b/docs_versioned_docs/version-v0.9/guides/cloud-resources/expose-service.md new file mode 100644 index 00000000..dbce02fe --- /dev/null +++ b/docs_versioned_docs/version-v0.9/guides/cloud-resources/expose-service.md @@ -0,0 +1,122 @@ +# Expose Application Service Deployed on CSP Kubernetes + +Deploying application on the Kuberentes provided by CSP (Cloud Service Provider) is convenient and reliable, which is adopted by many individuals and enterprises. Kusion has a good integration with CSP Kuberentes service. You can deploy your application to the Kubernetes cluster, and expose the service in a quite easy way. + +This tutorial demonstrates how to expose service of the application deployed on CSP Kubernetes. In this article, *[exposing the service of nginx](https://github.com/KusionStack/konfig/blob/main/example/nginx/dev/main.k) (referred to "the example" in the below)* is given as an example. + +## Prerequisites + +Create a Kubernetes cluster, the following CSP Kubernetes services are supported. + +- [Alibaba Cloud Container Service for Kubernetes (ACK)](https://www.alibabacloud.com/product/kubernetes) +- [Amazon Elastic Kubernetes Service (EKS)](https://aws.amazon.com/eks). + +After creating the cluster, get the kube-config and export it, so that Kusion can access the cluster. + +```bash +export KUBE_CONFIG="" +``` + +## Expose Service Publicly + +If you want the application can be accessed from outside the cluster, you should expose the service publicly. Follow the steps below, you will simply hit the goal. + +### Write Configuration Code + +``` +import catalog.models.schema.v1 as ac +import catalog.models.schema.v1.workload as wl +import catalog.models.schema.v1.workload.container as c +import catalog.models.schema.v1.workload.network as n + +nginx: ac.AppConfiguration { + workload: wl.Service { + containers: { + nginx: c.Container { + image = "nginx:1.25.2" + resources: { + "cpu": "500m" + "memory": "512Mi" + } + } + } + replicas: 1 + ports: [ + n.Port { + type: "aliyun" + port: 80 + protocol: "TCP" + public: True + } + ] + } +} +``` + +The code shown above describes how to expose service publicly of the example on ACK. Kusion use schema `Port` to describe the network configuration, the primary fields of Port are as follows: + +- type: the CSP providing Kubernetes service, support `aliyun` and `aws` +- port: port number to expose service +- protocol: protocol to expose service, support `TCP` and `UDP` +- public: whether to public the service + +To public the service, you should assign the `type` (aliyun for ACK, aws for EKS), and set `public` as True. Besides, schema `Service` should be used to describe the workload configuration. + +That's all what you need to configure! Next, preview and apply the configuration, the application will get deployed and exposed publicly. + +:::info +Kusion uses Load Balancer (LB) provided by the CSP to expose service publicly. For more detailed network configuration, please refer to [Application Networking](../../config-walkthrough/networking) +::: + +### Preview and Apply + +Execute `kusion preview` under the stack path, you will get what will be created in the real infrastructure. The picture below gives the preview result of the example. A Namespace, Service and Deployment will be created, which meets the expectation. The service name has a suffix `public`, which shows it can be accessed publicly. + +![preview-public](/img/docs/user_docs/cloud-resources/expose-service/preview-public.png) + +Then, execute `kusion apply --yes` to do the real deploying job. Just a command and a few minutes, you have accomplished deploying application and expose it publicly. + +![apply-public](/img/docs/user_docs/cloud-resources/expose-service/apply-public.png) + +### Verify Accessibility + +In the example, the kubernetes Namespace whose name is nginx, and a Service and Deployment under the Namespace should be created. Use `kubectl get` to check, the Service whose type is `LoadBalancer` and Deployment are created indeed. And the Service has `EXTERNAL-IP` 106.5.190.109, which means it can be accessed from outside the cluster. + +![k8s-resource-public](/img/docs/user_docs/cloud-resources/expose-service/k8s-resource-public.png) + +Visit the `EXTERNAL-IP` via browser, the correct result is returned, which illustrates the servie get publicly exposed successfully. + +![result-public](/img/docs/user_docs/cloud-resources/expose-service/result-public.png) + +## Expose Service Inside Cluster + +If you only need the application can be accessed inside the cluster, just configure `Public` as False in schema `Port`. + +``` +import catalog.models.schema.v1 as ac +import catalog.models.schema.v1.workload as wl +import catalog.models.schema.v1.workload.network as n + +nginx: ac.AppConfiguration { + workload: wl.Service { + ... + ports: [ + n.Port { + ... + public: False + } + ] + } +} +``` + +Execute `kusion apply --yes`, the generated Service has suffix `private`. + +![apply-private](/img/docs/user_docs/cloud-resources/expose-service/apply-private.png) + +And the Service type is `ClusterIP`, only has `CLUSTER_IP` and no `EXTERNAL_IP`, which means it cannot get accessed from outside the cluster. + +![k8s-resource-private](/img/docs/user_docs/cloud-resources/expose-service/k8s-resource-private.png) + +## Summary +This tutorial demonstrates how to expose service of the application deployed on the CSP Kubernetes. By configuring schema Port, Kusion enables you expose service simply and efficiently. diff --git a/docs_versioned_docs/version-v0.9/guides/github-actions/_category_.json b/docs_versioned_docs/version-v0.9/guides/github-actions/_category_.json new file mode 100644 index 00000000..2f68effc --- /dev/null +++ b/docs_versioned_docs/version-v0.9/guides/github-actions/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "GitHub Actions", + "position": 3 +} diff --git a/docs_versioned_docs/version-v0.9/guides/github-actions/deploy-application-securely-and-efficiently-via-github-actions.md b/docs_versioned_docs/version-v0.9/guides/github-actions/deploy-application-securely-and-efficiently-via-github-actions.md new file mode 100644 index 00000000..e49ed770 --- /dev/null +++ b/docs_versioned_docs/version-v0.9/guides/github-actions/deploy-application-securely-and-efficiently-via-github-actions.md @@ -0,0 +1,82 @@ +--- +sidebar_position: 1 +--- + +# Deploy Application Securely and Efficiently via GitHub Actions + +This document provides the instruction to deploy your application securely and efficiently via GitHub Actions. + +Using git repository is a very reliable and common way to manage code, and the same goes for Kusion-managed configuration code. [GitHub Actions](https://docs.github.com/en/actions) is a CI/CD platform. By customizing [GitHub Actions workflow](https://docs.github.com/en/actions/using-workflows/about-workflows), the pipeline such as building, testing, and deploying will be executed automatically. + +Kusion has a commendable integration with Github Actions. You can use Github Actions to test configuration correctness, preview change, and deploy application. This tutorial demonstrates how to deploy and operate an application through GitHub Actions. + +## GitHub Actions Workflow + +[KusionStack/konfig](https://github.com/KusionStack/konfig) is the official example repository, and provides the GitHub Actions workflow [main.yml](https://github.com/KusionStack/konfig/blob/main/.github/workflows/main.yml). The main.yml is triggered by a push or a pull request on the main branch, and includes multiple jobs, which ensures the reliability of configuration code, and deploys the changed application. + +![workflow](/img/docs/user_docs/guides/github-actions/workflow.png) + +The workflow to deploy an application is shown above, which includes the following jobs: + +- Get changed project and stack +- Check project and stack structure +- Test code correctness +- Preview changed stack +- Apply changed stack + +These jobs ensure the security and efficiency of the application deployment. Next, this tutorial will introduce the usage and function of these jobs. To show how they work more visually, *[updating port configuration of multi-stack](https://github.com/KusionStack/konfig/actions/runs/6325390734) (referred to "the example" in the below)* is given as an example. + +## Get Changed Project and Stack + +As Kusion organizes code by **project** and **stack**, to deploy the affected applications, analyze the changed project and stack is the first step. + +The jobs, [get-changed-project-stack](https://github.com/KusionStack/konfig/blob/main/.github/workflows/main.yml#L10) perfectly accomplish the analysis. The main steps are as follows: + +- Obtain the list of changed files through `git diff`; +- Based on the changed file list, obtain the changed projects and stacks which are indicated by `project.yaml` and `stack.yaml` respectively. + +The [example](https://github.com/KusionStack/konfig/actions/runs/6325320773/job/17176584497) changes the file `example/multi-stack/base/base.k`, where the affected project is `example/multi-stack`, and the stack is `example/multi-stack/dev` and `example/multi-stack/prod`. Delightfully, the result, which is shown below, meets our expectation. + +![changed-project-stack](/img/docs/user_docs/guides/github-actions/changed-project-stack.png) + +## Check Project and Stack Structure + +The job [check-structure](https://github.com/KusionStack/konfig/blob/main/.github/workflows/main.yml#L39) guarantees the structure legality of the changed project and stack, so that Kusion CLI tools can be used correctly. The check items are as follows: + +- The field `name` is required in project.yaml; +- The field `name` is required in stack.yaml. + +The success of structure-check means the correctness of structure. A [pytest](https://docs.pytest.org/en/7.3.x/) report `check-structure-report` is also generated, and you can get it from [GithHub Actions Artifacts](https://docs.github.com/en/actions/managing-workflow-runs/downloading-workflow-artifacts) . + +The [example](https://github.com/KusionStack/konfig/actions/runs/6325320773/job/17176592318) passes the directory structure verification. It is clear from the report that the changed project and stack have get checked, and the result is passed. + +![check-structure](/img/docs/user_docs/guides/github-actions/check-structure.png) + +## Test Code Correctness + +Besides a rightful structure, the code must have correct syntax and semantics, and the job [test-correctness](https://github.com/KusionStack/konfig/blob/main/.github/workflows/main.yml#L65) ensures the correctness. `kusion compile` get executed on the changed stacks. If succeeded, there are no syntax errors; or the configuration code is illegal, and the following application deployment will fail. + +The report whose name is `test-correctness-report` get generated. + +The [example](https://github.com/KusionStack/konfig/actions/runs/6325320773/job/17176592034) passes the code correctness test. The report shows that the tested stack is `example/multi-stack/dev` and `example/multi-stack/prod`, and the result is passed. + +![test-correctness](/img/docs/user_docs/guides/github-actions/test-correctness.png) + +## Preview Changed Stack + +After passing the above jobs, security of the configuration change is guaranteed, and it's time to deploy your application. Before applying the change to the real infrastructure, it's necessary to get the expected result of the application deployment. The job [preview](https://github.com/KusionStack/konfig/blob/main/.github/workflows/main.yml#L90) calls `kusion preview` to get the expected change result, the result is uploaded to the artifact `preview-report`. If the result meets your requirement, you can go to the next job and deploy the application. + +The [example](https://github.com/KusionStack/konfig/actions/runs/6325320773/job/17176612053) changes stack `example/multi-stack/dev` and `example/multi-stack/prod`. The following picture shows the preview result of `example/multi-stack/prod`, where the result is to create a Kubernetes Namespace, Service and Deployment if call `kusion apply`. + +![preview](/img/docs/user_docs/guides/github-actions/preview.png) + + +## Apply Changed Stack +Finally, the last step is arrived, i.e. deploy application. The job [apply](https://github.com/KusionStack/konfig/blob/main/.github/workflows/main.yml#L121) calls `kusion apply` to apply the configuration change to the real infrastructure. If the job succeeded, the result will be uploaded to the artifact `apply-report`. + +For the stack `example/multi-stack/prod` in the [example](https://github.com/KusionStack/konfig/actions/runs/6325320773/job/17176645252), a Kubernetes Namespace, Service and Deployment get created, which is consistent with the preview result. + +![apply](/img/docs/user_docs/guides/github-actions/apply.png) + +## Summary +This tutorial demonstrates how Kusion integrates with GitHub Actions to deploy an application. By structure check, correctness test, preview and apply, Kusion with GitHub Actions enables you deploy application efficiently and securely. diff --git a/docs/user_docs/guides/guides.md b/docs_versioned_docs/version-v0.9/guides/guides.md similarity index 100% rename from docs/user_docs/guides/guides.md rename to docs_versioned_docs/version-v0.9/guides/guides.md diff --git a/docs_versioned_docs/version-v0.9/guides/observability/_category_.json b/docs_versioned_docs/version-v0.9/guides/observability/_category_.json new file mode 100644 index 00000000..dcde7d62 --- /dev/null +++ b/docs_versioned_docs/version-v0.9/guides/observability/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Automated Observability", + "position": 2 +} diff --git a/docs_versioned_docs/version-v0.9/guides/observability/prometheus.md b/docs_versioned_docs/version-v0.9/guides/observability/prometheus.md new file mode 100644 index 00000000..3677036a --- /dev/null +++ b/docs_versioned_docs/version-v0.9/guides/observability/prometheus.md @@ -0,0 +1,195 @@ +# Configure Monitoring Behavior With Prometheus + +This document provides the step-by-step instruction to set up monitoring for your application. + +As of today, kusion supports the configuration of Prometheus scraping behaviors for the target application. In the future, we will add more cloud-provider-native solutions, such as AWS CloudWatch, Azure Monitor, etc. + +The demo sample is mainly composed of the following components: + +- Namespace +- Deployment +- Service +- ServiceMonitor + +:::tip + +This guide requires you to have a basic understanding of Kubernetes and Prometheus. +If you are not familiar with the relevant concepts, please refer to the links below: + +- [Learn Kubernetes Basics](https://kubernetes.io/docs/tutorials/kubernetes-basics/) +- [Prometheus Introduction](https://prometheus.io/docs/introduction/overview/) +::: + +## Pre-requisite +Please refer to the [prerequisites](../working-with-k8s/deploy-application#prerequisites) in the guide for deploying an application. + +The example below also requires you to have [initialized the project](../working-with-k8s/deploy-application#initializing) using the `kusion init` command, which will generate a [`kcl.mod` file](../working-with-k8s/deploy-application#kclmod) under the project directory. + +## Setting up your own Prometheus + +There a quite a few ways to set up Prometheus in your cluster: +1. Installing a Prometheus operator +2. Installing a standalone Prometheus server +3. Installing a Prometheus agent and connect to a remote Prometheus server + +[The advice from the Prometheus team](https://github.com/prometheus-operator/prometheus-operator/issues/1547#issuecomment-401092041) is to use the `ServiceMonitor` or `PodMonitor` CRs via the Prometheus operator to manage scrape configs going forward[2]. + +In either case, you only have to do this setup once per cluster. This doc will use a minikube cluster and Prometheus operator as an example. + +### Installing Prometheus operator[3]. +To get the example in this user guide working, all you need is a running Prometheus operator. You can have that installed by running: +``` +LATEST=$(curl -s https://api.github.com/repos/prometheus-operator/prometheus-operator/releases/latest | jq -cr .tag_name) +curl -sL https://github.com/prometheus-operator/prometheus-operator/releases/download/${LATEST}/bundle.yaml | kubectl create -f - +``` + +This will install all the necessary CRDs and the Prometheus operator itself in the default namespace. Wait a few minutes, you can confirm the operator is up by running: +``` +kubectl wait --for=condition=Ready pods -l app.kubernetes.io/name=prometheus-operator -n default +``` + +### Make sure RBAC is properly set up +If you have RBAC enabled on the cluster, the following must be created for Prometheus to work properly: +``` +apiVersion: v1 +kind: ServiceAccount +metadata: + name: prometheus +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: prometheus +rules: +- apiGroups: [""] + resources: + - nodes + - nodes/metrics + - services + - endpoints + - pods + verbs: ["get", "list", "watch"] +- apiGroups: [""] + resources: + - configmaps + verbs: ["get"] +- apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: ["get", "list", "watch"] +- nonResourceURLs: ["/metrics"] + verbs: ["get"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: prometheus +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: prometheus +subjects: +- kind: ServiceAccount + name: prometheus + namespace: default +``` + +### Configure Prometheus instance via the operator +Once all of the above is set up, you can then configure the Prometheus instance via the operator: +``` +apiVersion: monitoring.coreos.com/v1 +kind: Prometheus +metadata: + name: prometheus +spec: + serviceAccountName: prometheus + serviceMonitorNamespaceSelector: {} + serviceMonitorSelector: {} + podMonitorNamespaceSelector: {} + podMonitorSelector: {} + resources: + requests: + memory: 400Mi +``` +This Prometheus instance above will be cluster-wide, picking up ALL the service monitors and pod monitors across ALL the namespaces. +You can adjust the requests and limits accordingly if you have a larger cluster. + +### Exposing the Prometheus portal (optional) +Once you have the managed Prometheus instance created via the Prometheus CR above, you should be able to see a service created called `prometheus-operated`: + +![prometheus-operated](/img/docs/user_docs/guides/prometheus/prometheus-operated.png) + +If you are also running on minikube, you can expose it onto your localhost via kubectl: +``` +kubectl port-forward svc/prometheus-operated 9099:9090 +``` + +You should then be able to see the Prometheus portal via `localhost:9099` in your browser: + +![prometheus-portal](/img/docs/user_docs/guides/prometheus/prometheus-portal.png) + +If you are running a non-local cluster, you can try to expose it via another way, through an ingress controller for example. + +## Using kusion to deploy your application with monitoring requirements + +At this point we are set up for good! Any new applications you deploy via kusion will now automatically have the monitoring-related resources created, should you declare you want it via the `monitoring` field in the `AppConfiguration` model. + +The monitoring in an AppConfiguration is declared in the `monitoring` field. See the example below for a full, deployable AppConfiguration. + +Please note we are using a new image `quay.io/brancz/prometheus-example-app` since the app itself need to expose metrics for Prometheus to scrape: + +`helloworld/dev/main.k`: +``` +import catalog.models.schema.v1 as ac +import catalog.models.schema.v1.workload as wl +import catalog.models.schema.v1.workload.container as c +import catalog.models.schema.v1.monitoring as m +import catalog.models.schema.v1.workload.network as n + +helloworld: ac.AppConfiguration { + workload: wl.Service { + containers: { + "monitoring-sample-app": c.Container { + image: "quay.io/brancz/prometheus-example-app:v0.3.0" + } + } + ports: [ + n.Port { + port: 8080 + } + ] + } + monitoring: m.Prometheus{ + interval: "30s" + timeout: "15s" + path: "/metrics" + scheme: "http" + } +} +``` + +The KCL file above represents an application with a service type workload, exposing the port 8080, and would like Prometheus to scrape the `/metrics` endpoint every 30 seconds. + +Running `kusion apply` would show that kusion will create a `Namespace`, a `Deployment`, a `Service` and a `ServiceMonitor`: +![kusion-apply-with-monitor](/img/docs/user_docs/guides/prometheus/kusion-apply-with-monitor.png) + +Continue applying all resources: +![kusion-apply-success](/img/docs/user_docs/guides/prometheus/kusion-apply-success.png) + +If we want to, we can verify the service monitor has been created successfully: +![service-monitor](/img/docs/user_docs/guides/prometheus/service-monitor.png) + +In a few seconds, you should be able to see in the Prometheus portal that the service we just deployed has now been discovered and monitored by Prometheus: +![prometheus-targets](/img/docs/user_docs/guides/prometheus/prometheus-targets.png) + +You can run a few simply queries for the data that Prometheus scraped from your application: +![prometheus-simple-query](/img/docs/user_docs/guides/prometheus/prometheus-simple-query.png) + +For more info about PromQL, you can find them [here](https://prometheus.io/docs/prometheus/latest/querying/basics/)[4]. + +## References +1. Prometheus: https://prometheus.io/docs/introduction/overview/ +2. Prometheus team advise: https://github.com/prometheus-operator/prometheus-operator/issues/1547#issuecomment-446691500 +3. Prometheus operator getting started doc: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/user-guides/getting-started.md +4. PromQL basics: https://prometheus.io/docs/prometheus/latest/querying/basics/ \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.9/guides/organizing-projects-stacks/_category_.json b/docs_versioned_docs/version-v0.9/guides/organizing-projects-stacks/_category_.json new file mode 100644 index 00000000..dd7dd2fd --- /dev/null +++ b/docs_versioned_docs/version-v0.9/guides/organizing-projects-stacks/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Project Best Practices", + "position": 8 +} diff --git a/docs_versioned_docs/version-v0.9/guides/working-with-k8s/1-deploy-application.md b/docs_versioned_docs/version-v0.9/guides/working-with-k8s/1-deploy-application.md new file mode 100644 index 00000000..d3e00c0e --- /dev/null +++ b/docs_versioned_docs/version-v0.9/guides/working-with-k8s/1-deploy-application.md @@ -0,0 +1,217 @@ +# Deploy Application + +This guide shows you how to use Kusion CLIs to complete the deployment of an application running in Kubernetes. +We call the abstraction of application operation and maintenance configuration as `AppConfiguration`, and its instance as `Application`. +It is essentially an configuration model that describes an application. The complete definition can be seen [here](../../reference/model/catalog_models/doc_app_configuration). + +In production, the application generally includes minimally several k8s resources: + +- Namespace +- Deployment +- Service + +:::tip + +This guide requires you to have a basic understanding of Kubernetes. +If you are not familiar with the relevant concepts, please refer to the links below: + +- [Learn Kubernetes Basics](https://kubernetes.io/docs/tutorials/kubernetes-basics/) +- [Namespace](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/) +- [Deployment](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/) +- [Service](https://kubernetes.io/docs/concepts/services-networking/service/) +::: + +## Prerequisites + +Before we start, we need to complete the following steps: + +1、Install Kusion + +We recommend using HomeBrew(Mac), Scoop(Windows), or an installation shell script to download and install Kusion. +See [Download and Install](../../getting-started/install-kusion) for more details. + +2、Running Kubernetes cluster + +There must be a running Kubernetes cluster and a [kubectl](https://Kubernetes.io/docs/tasks/tools/#kubectl) command line tool. +If you don't have a cluster yet, you can use [Minikube](https://minikube.sigs.k8s.io/docs/tutorials/multi_node/) to start one of your own. + +## Initializing + +This guide is to deploy an app using Kusion, relying on the Kusion CLI and a Kubernetes cluster. + +To initialize the application configuration: + +```bash +kusion init +``` + +The `kusion init` command will prompt you to enter required parameters, such as project name, project description, image address, etc. +You can keep pressing _Enter_ all the way to use the default values. + +The output is similar to: + +``` +✔ single-stack-sample A minimal kusion project of single stack +This command will walk you through creating a new kusion project. + +Enter a value or leave blank to accept the (default), and press . +Press ^C at any time to quit. + +Project Config: +✔ Project Name: helloworld +✔ AppName: helloworld +✔ ProjectName: helloworld +Stack Config: dev +✔ Image: gcr.io/google-samples/gb-frontend:v4 + +Created project 'helloworld' +``` + +Now, we have successfully initialized a project `helloworld` using the `single-stack-sample` template, which contains a `dev` stack. + +- `AppName` represents the name of the sample application, which is recorded in the generated `main.k` as the name of the `AppConfiguration` instance. +- `ProjectName` and `Project Name` represent the name of the sample project, which is used as the generated folder name and then recorded in the generated `project.yaml`. +- `Image` represents the image address of the application container. + +:::info + +See [Project&Stack](../../concepts/glossary) for more details about Project and Stack. +::: + +The directory structure is as follows: + +``` +helloworld + ├── README.md + ├── dev + │ ├── main.k + │ ├── kcl.mod + │ ├── kcl.mod.lock + │ └── stack.yaml + └── project.yaml + +1 directory, 6 files +``` + +The project directory has the following files that are automatically generated: +- `README.md` contains the generated README from a template. +- `project.yaml` represents project-level configurations. +- `dev` directory stores the customized stack configuration: + - `dev/main.k` stores configurations in the `dev` stack. + - `dev/stack.yaml` stores stack-level configurations. + - `dev/kcl.mod` stores stack-level dependencies. + - `dev/kcl.mod.lock` stores version-sensitive dependencies. + +In general, the `.k` files are the KCL source code that represents the application configuration, and the `.yaml` is the static configuration file that describes behavior at the project or stack level. + +### kcl.mod +There should be a `kcl.mod` file generated automatically under the project directory. The `kcl.mod` file describes the dependency for the current project or stack. By default, it should contain a reference to the official [`catalog` repository](https://github.com/KusionStack/catalog) which holds some common model definitions that fits best practices. You can also create your own models library and reference that. + +## Compiling + +At this point, the project has been initialized with the Kusion built-in template. +The configuration is written in KCL, not JSON/YAML which Kubernetes recognizes, so it needs to be compiled to get the final output. + +Enter stack dir `helloworld/dev` and compile: + +```bash +cd helloworld/dev && kusion compile +``` + +The output is printed to `stdout` by default. You can save it to a file using the `-o/--output` flag when running `kusion compile`. + +The output of `kusion compile` is the spec format. + +:::tip + +For instructions on the kusion command line tool, execute `kusion -h`, or refer to the tool's online [documentation](../../reference/cli/kusion). +::: + +## Applying + +Compilation is now completed. We can apply the configuration as the next step. In the output from `kusion compile`, you can see 3 resources: + +- a Namespace named `helloworld` +- a Deployment named `helloworld-dev-helloworld` in the `helloworld` namespace +- a Service named `helloworld-dev-helloworld-private` in the `helloworld` namespace + +Execute command: + +```bash +kusion apply +``` + +The output is similar to: + +``` + ✔︎ Generating Spec in the Stack dev... +Stack: dev ID Action +* ├─ v1:Namespace:helloworld Create +* ├─ v1:Service:helloworld:helloworld-dev-helloworld-private Create +* └─ apps/v1:Deployment:helloworld:helloworld-dev-helloworld Create + + +? Do you want to apply these diffs? yes +Start applying diffs ... + SUCCESS Create v1:Namespace:helloworld success + SUCCESS Create v1:Service:helloworld:helloworld-dev-helloworld-private success + SUCCESS Create apps/v1:Deployment:helloworld:helloworld-dev-helloworld success +Create apps/v1:Deployment:helloworld:helloworld-dev-helloworld success [3/3] █████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████ 100% | 0s +Apply complete! Resources: 3 created, 0 updated, 0 deleted. +``` + +After the configuration applying successfully, you can use the `kubectl` to check the actual status of these resources. + +1、 Check Namespace + +```bash +kubectl get ns +``` + +The output is similar to: + +``` +NAME STATUS AGE +default Active 117d +helloworld Active 63s +kube-system Active 117d +... +``` + +2、Check Deployment + +```bash +kubectl get deploy -n helloworld +``` + +The output is similar to: + +``` +NAME READY UP-TO-DATE AVAILABLE AGE +helloworld-dev-helloworld 2/2 2 2 111s +``` + +3、Check Service + +```bash +kubectl get svc -n helloworld +``` + +The output is similar to: + +``` +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +helloworld-dev-helloworld-private ClusterIP 10.111.183.0 80/TCP 2m6s +``` + +4、Validate app + +Using the `kubectl` tool, forward native port `30000` to the service port `80`. + +```bash +kubectl port-forward svc/helloworld-dev-helloworld-private -n helloworld 30000:80 +``` + +Open browser and visit [http://127.0.0.1:30000](http://127.0.0.1:30000): + +![app-preview](/img/docs/user_docs/guides/working-with-k8s/app-preview.png) diff --git a/docs_versioned_docs/version-v0.9/guides/working-with-k8s/2-container.md b/docs_versioned_docs/version-v0.9/guides/working-with-k8s/2-container.md new file mode 100644 index 00000000..70e67095 --- /dev/null +++ b/docs_versioned_docs/version-v0.9/guides/working-with-k8s/2-container.md @@ -0,0 +1,109 @@ +# Configure Containers + +You can manage container-level configurations in the `AppConfiguration` model via the `containers` field (under the `workload` schemas). By default, everything defined in the `containers` field will be treated as application containers. Sidecar containers will be supported in a future version of kusion. + +For the full `Container` schema reference, please see [here](../../reference/model/catalog_models/workload/doc_service#schema-container) for more details. + +## Pre-requisite +Please refer to the [prerequisites](deploy-application#prerequisites) in the guide for deploying an application. + +The example below also requires you to have [initialized the project](deploy-application#initializing) using the `kusion init` command, which will generate a [`kcl.mod` file](deploy-application#kclmod) under the stack directory. + +## Example +`helloworld/dev/main.k`: +```py +import catalog.models.schema.v1 as ac +import catalog.models.schema.v1.workload as wl +import catalog.models.schema.v1.workload.container as c +import catalog.models.schema.v1.workload.container.probe as p +import catalog.models.schema.v1.workload.network as n + +helloworld: ac.AppConfiguration { + workload: wl.Service { + containers: { + "helloworld": c.Container { + image: "gcr.io/google-samples/gb-frontend:v4" + env: { + "env1": "VALUE" + "env2": "VALUE2" + } + resources: { + "cpu": "500m" + "memory": "512M" + } + # Configure an HTTP readiness probe + readinessProbe: p.Probe { + probeHandler: p.Http { + url: "http://localhost:80" + } + initialDelaySeconds: 10 + } + } + } + replicas: 2 + ports: [ + n.Port { + port: 80 + } + ] + } +} +``` + +## Apply + +Re-run steps in [Applying](deploy-application#applying), new container configuration can be applied. + +``` +$ kusion apply + ✔︎ Generating Spec in the Stack dev... +Stack: dev ID Action +* ├─ v1:Namespace:helloworld UnChanged +* ├─ v1:Service:helloworld:helloworld-dev-helloworld-private UnChanged +* └─ apps/v1:Deployment:helloworld:helloworld-dev-helloworld Update + + +? Do you want to apply these diffs? yes +Start applying diffs ... + SUCCESS UnChanged v1:Namespace:helloworld, skip + SUCCESS UnChanged v1:Service:helloworld:helloworld-dev-helloworld-private, skip + SUCCESS Update apps/v1:Deployment:helloworld:helloworld-dev-helloworld success +Update apps/v1:Deployment:helloworld:helloworld-dev-helloworld success [3/3] █████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████ 100% | 0s +Apply complete! Resources: 0 created, 1 updated, 0 deleted. +``` + +## Validation + +We can verify the container (in the deployment template) now has the updated attributes as defined in the container configuration: +``` +$ kubectl get deployment -n helloworld -o yaml +... + template: + ... + spec: + containers: + - env: + - name: env1 + value: VALUE + - name: env2 + value: VALUE2 + image: gcr.io/google-samples/gb-frontend:v4 + imagePullPolicy: IfNotPresent + name: helloworld + readinessProbe: + failureThreshold: 3 + httpGet: + host: localhost + path: / + port: 80 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + resources: + limits: + cpu: 500m + memory: 512M +... +``` \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.9/guides/working-with-k8s/3-service.md b/docs_versioned_docs/version-v0.9/guides/working-with-k8s/3-service.md new file mode 100644 index 00000000..1da230f2 --- /dev/null +++ b/docs_versioned_docs/version-v0.9/guides/working-with-k8s/3-service.md @@ -0,0 +1,104 @@ +# Expose Service + +You can determine how to expose your service in the `AppConfiguration` model via the `ports` field (under the `workload` schemas). The `ports` field defines a list of all the `Port`s you want to expose for the application (and their corresponding listening ports on the container, if they don't match the service ports), so that it can be consumed by other applications. + +Unless explicitly defined, each of the ports exposed is by default exposed privately as a `ClusterIP` type service. You can expose a port publicly by specifying the `exposeInternet` field in the `Port` schema. At the moment, the implementation for publicly access is done via Load Balancer type service backed by cloud providers. Ingress will be supported in a future version of kusion. + +For the `Port` schema reference, please see [here](../../reference/model/catalog_models/workload/doc_service#schema-port) for more details. + +## Prerequisites + +Please refer to the [prerequisites](deploy-application#prerequisites) in the guide for deploying an application. + +The example below also requires you to have [initialized the project](deploy-application#initializing) using the `kusion init` command, which will generate a [`kcl.mod` file](deploy-application#kclmod) under the project directory. + +## Example + +`helloworld/dev/main.k`: +```py +import catalog.models.schema.v1 as ac +import catalog.models.schema.v1.workload as wl +import catalog.models.schema.v1.workload.container as c +import catalog.models.schema.v1.workload.container.probe as p +import catalog.models.schema.v1.workload.network as n + +helloworld: ac.AppConfiguration { + workload: wl.Service { + containers: { + "helloworld": c.Container { + image: "gcr.io/google-samples/gb-frontend:v4" + env: { + "env1": "VALUE" + "env2": "VALUE2" + } + resources: { + "cpu": "500m" + "memory": "512M" + } + # Configure an HTTP readiness probe + readinessProbe: p.Probe { + probeHandler: p.Http { + url: "http://localhost:80" + } + initialDelaySeconds: 10 + } + } + } + replicas: 2 + ports: [ + n.Port { + port: 8080 + targetPort: 80 + } + ] + } +} +``` + +The code above changes the service port to expose from `80` in the last guide to `8080`, but still targeting the container port `80` because that's what the application is listening on. + +## Applying + +Re-run steps in [Applying](deploy-application#applying), new service configuration can be applied. + +``` +$ kusion apply + ✔︎ Generating Spec in the Stack dev... +Stack: dev ID Action +* ├─ v1:Namespace:helloworld UnChanged +* ├─ v1:Service:helloworld:helloworld-dev-helloworld-private Update +* └─ apps/v1:Deployment:helloworld:helloworld-dev-helloworld UnChanged + + +? Do you want to apply these diffs? yes +Start applying diffs ... + SUCCESS UnChanged v1:Namespace:helloworld, skip + SUCCESS Update v1:Service:helloworld:helloworld-dev-helloworld-private success + SUCCESS UnChanged apps/v1:Deployment:helloworld:helloworld-dev-helloworld, skip +UnChanged apps/v1:Deployment:helloworld:helloworld-dev-helloworld, skip [3/3] ████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████ 100% | 0s +Apply complete! Resources: 0 created, 1 updated, 0 deleted. +``` + +## Validation +We can verify the Kubernetes service now has the updated attributes (mapping service port 8080 to container port 80) as defined in the `ports` configuration: +``` +kubectl get svc -n helloworld -o yaml +... + spec: + ... + ports: + - name: helloworld-dev-helloworld-private-8080-tcp + port: 8080 + protocol: TCP + targetPort: 80 +... +``` + +Exposing service port 8080: +``` +kubectl port-forward svc/helloworld-dev-helloworld-private -n helloworld 30000:8080 +``` + +Open browser and visit [http://127.0.0.1:30000](http://127.0.0.1:30000), the application should be up and running: + +![app-preview](/img/docs/user_docs/guides/working-with-k8s/app-preview.png) \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.9/guides/working-with-k8s/4-image-upgrade.md b/docs_versioned_docs/version-v0.9/guides/working-with-k8s/4-image-upgrade.md new file mode 100644 index 00000000..1ae7b621 --- /dev/null +++ b/docs_versioned_docs/version-v0.9/guides/working-with-k8s/4-image-upgrade.md @@ -0,0 +1,68 @@ +# Upgrade Image + +You can declare the application's container image via `image` field of the `Container` schema. + +For the full `Container` schema reference, please see [here](../../reference/model/catalog_models/workload/doc_service#schema-container) for more details. + +## Pre-requisite +Please refer to the [prerequisites](deploy-application#prerequisites) in the guide for deploying an application. + +The example below also requires you to have [initialized the project](deploy-application#initializing) using the `kusion init` command, which will generate a [`kcl.mod` file](deploy-application#kclmod) under the project directory. + +## Example + +Update the image value in `dev/main.k`: +```py +import catalog.models.schema.v1 as ac + +helloworld: ac.AppConfiguration { + workload.containers.nginx: { + # dev stack has different image + # set image to your want + # before: + # image = "gcr.io/google-samples/gb-frontend:v4" + # after: + image = "gcr.io/google-samples/gb-frontend:v5" + } +} +``` + +Everything else in `main.k` stay the same. + +## Applying + +Re-run steps in [Applying](deploy-application#applying), update image is completed. + +``` +$ kusion apply +✔︎ Generating Spec in the Stack dev... +Stack: dev ID Action +* ├─ v1:Namespace:helloworld UnChanged +* ├─ v1:Service:helloworld:helloworld-dev-helloworld-private UnChanged +* └─ apps/v1:Deployment:helloworld:helloworld-dev-helloworld Update + + +? Do you want to apply these diffs? yes +Start applying diffs ... + SUCCESS UnChanged v1:Namespace:helloworld, skip + SUCCESS UnChanged v1:Service:helloworld:helloworld-dev-helloworld-private, skip + SUCCESS Update apps/v1:Deployment:helloworld:helloworld-dev-helloworld success +Update apps/v1:Deployment:helloworld:helloworld-dev-helloworld success [3/3] █████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████ 100% | 0s +Apply complete! Resources: 0 created, 1 updated, 0 deleted. +``` + +## Validation +We can verify the application container (in the deployment template) now has the updated image (v5) as defined in the container configuration: +``` +kubectl get deployment -n helloworld -o yaml +... + template: + ... + spec: + containers: + - env: + ... + image: gcr.io/google-samples/gb-frontend:v5 + ... +... +``` \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.9/guides/working-with-k8s/5-resource-spec.md b/docs_versioned_docs/version-v0.9/guides/working-with-k8s/5-resource-spec.md new file mode 100644 index 00000000..4750f65e --- /dev/null +++ b/docs_versioned_docs/version-v0.9/guides/working-with-k8s/5-resource-spec.md @@ -0,0 +1,78 @@ +# Configure Resource Specification + +You can manage container-level resource specification in the `AppConfiguration` model via the `resources` field (under the `Container` schema). + +For the full `Container` schema reference, please see [here](../../reference/model/catalog_models/workload/doc_service#schema-container) for more details. + +## Prerequisites + +Please refer to the [prerequisites](deploy-application#prerequisites) in the guide for deploying an application. + +The example below also requires you to have [initialized the project](deploy-application#initializing) using the `kusion init` command, which will generate a [`kcl.mod` file](deploy-application#kclmod) under the project directory. + +## Example +Update the resources value in `dev/main.k`: +```py +import catalog.models.schema.v1 as ac + +helloworld: ac.AppConfiguration { + workload.containers.helloworld: { + # dev stack has different resource requirements + # set resources to your want + # before: + # resources: { + # "cpu": "500m" + # "memory": "512M" + # } + # after: + resources: { + "cpu": "250m" + "memory": "256Mi" + } + } +} +``` + +Everything else in `main.k` stay the same. + +## Applying + +Re-run steps in [Applying](deploy-application#applying), resource scaling is completed. + +``` +$ kusion apply +✔︎ Generating Spec in the Stack dev... +Stack: dev ID Action +* ├─ v1:Namespace:helloworld UnChanged +* ├─ v1:Service:helloworld:helloworld-dev-helloworld-private UnChanged +* └─ apps/v1:Deployment:helloworld:helloworld-dev-helloworld Update + + +? Do you want to apply these diffs? yes +Start applying diffs ... + SUCCESS UnChanged v1:Namespace:helloworld, skip + SUCCESS UnChanged v1:Service:helloworld:helloworld-dev-helloworld-private, skip + SUCCESS Update apps/v1:Deployment:helloworld:helloworld-dev-helloworld success +Update apps/v1:Deployment:helloworld:helloworld-dev-helloworld success [3/3] █████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████ 100% | 0s +Apply complete! Resources: 0 created, 1 updated, 0 deleted. +``` + +## Validation +We can verify the application container (in the deployment template) now has the updated resources attributes (cpu:250m, memory:256Mi) as defined in the container configuration: +``` +kubectl get deployment -n helloworld -o yaml +... + template: + ... + spec: + containers: + - env: + ... + image: gcr.io/google-samples/gb-frontend:v4 + ... + resources: + limits: + cpu: 250m + memory: 256Mi +... +``` \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.9/guides/working-with-k8s/_category_.json b/docs_versioned_docs/version-v0.9/guides/working-with-k8s/_category_.json new file mode 100644 index 00000000..c0008286 --- /dev/null +++ b/docs_versioned_docs/version-v0.9/guides/working-with-k8s/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Kubernetes", + "position": 1 +} diff --git a/docs/user_docs/guides/working-with-k8s/index.md b/docs_versioned_docs/version-v0.9/guides/working-with-k8s/index.md similarity index 100% rename from docs/user_docs/guides/working-with-k8s/index.md rename to docs_versioned_docs/version-v0.9/guides/working-with-k8s/index.md diff --git a/docs_versioned_docs/version-v0.9/intro/_category_.json b/docs_versioned_docs/version-v0.9/intro/_category_.json new file mode 100644 index 00000000..63c6703d --- /dev/null +++ b/docs_versioned_docs/version-v0.9/intro/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "What is Kusion?", + "position": 0 +} diff --git a/docs_versioned_docs/version-v0.9/intro/kusion-vs-x.md b/docs_versioned_docs/version-v0.9/intro/kusion-vs-x.md new file mode 100644 index 00000000..dfd012cf --- /dev/null +++ b/docs_versioned_docs/version-v0.9/intro/kusion-vs-x.md @@ -0,0 +1,39 @@ +--- +sidebar_position: 2 +--- + +# Kusion vs. Other Software + +It can be difficult to understand how different software compare to each other. Is one a replacement for the other? Are they complementary? etc. In this section, we compare Kusion to other software. + +**vs. GitOps (ArgoCD, FluxCD, etc.)** + +According to the [open GitOps principles](https://opengitops.dev/), GitOps systems typically have its desired state expressed declaratively, continuously observe actual system state and attempt to apply the desired state. In the design of Kusion toolchain, we refer to those principles but have no intention to reinvent any GitOps systems wheel. + +Kusion adopts your GitOps process and improves it with richness of features. The declarative [AppConfiguration](../concepts/appconfiguration) model can be used to express desired intent, once intent is declared [Kusion CLI](../reference/cli/kusion) takes the role to make production match intent as safely as possible. + +**vs. PaaS (Heroku, Vercel, etc.)** + +Kusion shares the same goal with traditional PaaS platforms to provide application delivery and management capabilities. The intuitive difference from the full functionality PaaS platforms is that Kusion is a client-side toolchain, not a complete PaaS platform. + +Also traditional PaaS platforms typically constrain the type of applications they can run but there is no such constrain for Kusion which means Kusion provides greater flexibility. + +Kusion allows you to have platform-like features without the constraints of a traditional PaaS. However, Kusion is not attempting to replace any PaaS platforms, instead Kusion can be used to deploy to a platform such as Heroku. + +**vs. KubeVela** + +KubeVela is a modern software delivery and management control plane. KubeVela makes it easier to deploy and operate applications on top of Kubernetes. + +Kusion is not a control plane. Kusion is a client-side tool for describing application intent in a declarative way and providing consistent workflow to apply that desired state. + +With proper Generator implementation, the target Spec of [AppConfiguration](../concepts/appconfiguration) can be [KubeVela Application](https://kubevela.io/docs/getting-started/core-concept/) and Kusion can use KubeVela to satisfy the "apply" step. + +**vs. Helm** + +The concept of Helm originates from the [package management](https://en.wikipedia.org/wiki/Package_manager) mechanism of the operating system. It is a package management tool based on templated YAML files and supports the execution and management of resources in the package. + +Kusion is not a package manager. Kusion naturally provides a superset of Helm capabilities with the modeled key-value pairs, so that developers can use Kusion directly as a programable alternative to avoid the pain of writing text templates. For users who have adopted Helm, the stack compilation results in Kusion can be packaged and used in Helm format. + +**vs. Kubernetes** + +Kubernetes(K8s) is a container scheduling and management runtime widely used around the world, an "operating system" core for containers, and a platform for building platforms. Above the Kubernetes API layer, Kusion aims to provide app-centric **abstraction** and unified **workspace**, better **user experience** and automation **workflow**, and helps developers build the app delivery model easily and collaboratively. diff --git a/docs_versioned_docs/version-v0.9/intro/overview.md b/docs_versioned_docs/version-v0.9/intro/overview.md new file mode 100644 index 00000000..0fe39fd7 --- /dev/null +++ b/docs_versioned_docs/version-v0.9/intro/overview.md @@ -0,0 +1,52 @@ +--- +sidebar_position: 1 +sidebar_label: Overview +title: Overview +slug: / +--- + +# Introduction to Kusion + +Welcome to Kusion! This introduction section covers what Kusion is, the problem Kusion aims to solve, and how Kusion compares to other software. If you just want to dive into using Kusion, feel free to skip ahead to the [Getting Started](getting-started/install-kusion) section. + +## What is Kusion? + +Kusion is a modern application delivery and management toolchain that enables developers to specify desired intent in a declarative way and then using a consistent workflow to drive continuous deployment through application lifecycle. Inspired by the phrase **Fusion on Kubernetes**, Kusion aims to help application and platform developers to develop and deliver in a self-serviceable, fast, reliable, and collaborative way. + + +![](/img/docs/user_docs/intro/kusion.png) + + +## Why Kusion? + +Developers should be able to deploy and run their applications and services end to end. **"You build it, you run it", the original promise of DevOps.** + +But the modern day for most software organizations this promise quickly become unrelalistic since the increasingly complex cloud-native toolchains, while cloud native technologies made huge improvements in areas such as scalability, availability and operability, it also brings downside - the growing burden on developers, which leads to the rise of [Platform Engineering](https://platformengineering.org/). + +Another challenge we saw is that a series of [antipatterns](https://web.devopstopologies.com/#anti-types) emerge when regular software organizations tries to implement true DevOps. Without well proven reference architecture and supporting tools, it's much more difficult to accomplish the original promise. + +On one hand, **Kusion was build to minimize developer's cognitive load**. With application-centric configuration model, you don't need to deal with tedious infrastructure and configuration management tooling, all you need to be familiar with is [AppConfigation](config-walkthrough/overview). This approach shields developers from the configurational complexity of Kubernetes but still enable standardization by design. + +On the other hand, **Kusion defines a new way of how different engineering teams collaboration**. With the separation of concerns, different roles could focus on their work based on their knowledge and responsibility. Through such a division of labor, the platform team can better manage the differences and complexities of the platform, and app developers could participate in ops work with less cognitive load. + +## Kusion Highlights + +* **Platform as Code** + + Specify desired application intent through declarative configuration code, drive continuous deployment with any CI/CD systems or GitOps to match that intent. No ad-hoc scripts, no hard maintain custom workflows, just declarative configuration code. + +* **Dynamic Configuration Management** + + Enable platform teams to set baseline-templates, control how and where to deploy application workloads and provision accessory resources. While still enabling application developers freedom via workload-centric specification and deployment. + +* **Security & Compliance Built In** + + Enforce security and infrastructure best practices with out-of-box [base models](https://github.com/KusionStack/catalog), create security and compliance guardrails for any Kusion deploy with third-party Policy as Code tools. All accessory resource secrets are automatically injected into Workloads. + +* **Lightweight and Open Model Ecosystem** + + Pure client-side solution ensures good portability and the rich APIs make it easier to integrate and automate. Large growing model ecosystem covers all stages in application lifecycle, with extensive connections to various infrastructure capabilities. + +:::tip + +**Kusion is an early project.** The end goal of Kusion is to boost [Internal Developer Platform](https://internaldeveloperplatform.org/) revolution and we are iterating on Kusion quickly to strive towards this goal. For any help or feedback, please contract us in [Slack](https://github.com/KusionStack/community/discussions/categories/meeting) or [issues](https://github.com/KusionStack/kusion/issues). diff --git a/docs_versioned_docs/version-v0.9/reference/_category_.json b/docs_versioned_docs/version-v0.9/reference/_category_.json new file mode 100644 index 00000000..3ce2f2b7 --- /dev/null +++ b/docs_versioned_docs/version-v0.9/reference/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Reference", + "position": 6 +} diff --git a/docs/reference/cli/_category_.json b/docs_versioned_docs/version-v0.9/reference/cli/_category_.json similarity index 100% rename from docs/reference/cli/_category_.json rename to docs_versioned_docs/version-v0.9/reference/cli/_category_.json diff --git a/docs_versioned_docs/version-v0.9/reference/cli/backend/_category_.json b/docs_versioned_docs/version-v0.9/reference/cli/backend/_category_.json new file mode 100644 index 00000000..29176598 --- /dev/null +++ b/docs_versioned_docs/version-v0.9/reference/cli/backend/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Backend Configuration", + "position": 2 +} diff --git a/docs_versioned_docs/version-v0.9/reference/cli/backend/backend-configuration.md b/docs_versioned_docs/version-v0.9/reference/cli/backend/backend-configuration.md new file mode 100644 index 00000000..78966e91 --- /dev/null +++ b/docs_versioned_docs/version-v0.9/reference/cli/backend/backend-configuration.md @@ -0,0 +1,175 @@ +# Backend Configuration + +The backend configuration defines the place where Kusion stores its `state` data file. By default, Kusion uses the `local` type of backend to store the state on the local disk, while for team collaboration projects, the state can be stored on a `remote` type of backend, such as `database`, `oss` and `s3` to allow multiple users access it. + +## Configuring State Backend + +Kusion configures the storage of state through command line parameters or the `backend` field in the `project.yaml` file. + +### Command Line Parameters + +Users can specify the type of backend with the option `--backend-type`, and configure the detailed information with `--backend-config` or `-C`, for instance: + +```shell +kusion apply --backend-type local --backend-config path=kusion_state.json +``` + +```shell +kusion destroy --backend-type local --backend-config path=kusion_state.json +``` + +### Configuration File + +Users can configure the storage of the state with the `backend` field in the `project.yaml` file: + +```yaml +# project.yaml +backend: + storageType: local + config: + path: kusion_state.json +``` + +In this case, `storageType` is used to declare the type of storage for the state backend, and `config` is used to declare the required parameters for the corresponding storage type. + +### Configuration Combination + +When both of the `config` field in the `project.yaml` and the `--backend-config` option in the command line are configured, Kusion will merge the entire configuration, combining both the `project.yaml` file and the command line options. When there comes a conflict between the options in the `project.yaml` file and the command line, the options in the **command line** will take precedence. This way, users can pass the sensitive information like `accessKeyID` and `accessKeySecret` to Kusion through command line parameters. + +## Available Backend + +- local +- oss +- s3 +- db + +### Default Backend + +When neither the `project.yaml` file nor the command line parameters declare the backend configuration, Kusion by default uses the [local](#local). + +### local + +The `local` storage type stores the `state` on the local file system, which is suitable for local operations while not ideal for multi-user collaboration. + +Here is an example: + +```yaml +# project.yaml +backend: + storageType: local + config: + path: kusion_state.json +``` + +* storageType - local, using local file system to store the state +* path - `optional` specify the local file path to store the state + +### oss + +The `oss` storage type stores the `state` on the **Alicloud Object Storage Service (OSS)**. + +Here is an example: + +```yaml +# project.yaml +backend: + storageType: oss + config: + endpoint: oss-cn-beijing.aliyuncs.com + bucket: kusion-oss +``` +```shell +kusion apply -C accessKeyID=******* -C accessKeySecret=******* +``` +```shell +kusion destroy -C accessKeyID=******* -C accessKeySecret=******* +``` + +* storageType - oss, using alicloud oss as the storage backend for state +* endpoint - `required` specify the access endpoint for alicloud oss bucket +* bucket - `required` specify the name of the alicloud oss bucket +* accessKeyID - `required` specify the alicloud account accessKeyID +* accessKeySecret - `required` specify the alicloud account accessKeySecret + +### s3 + +The `s3` storage type stores the `state` on the **AWS Simple Storage Service (S3)**. + +Here is an example: + +```yaml +# project.yaml +backend: + storageType: s3 + config: + endpoint: s3.us-east-1.amazonaws.com + bucket: kusion-s3 + region: us-east-1 +``` +```shell +kusion apply -C accessKeyID=******* -C accessKeySecret=******* +``` +```shell +kusion destroy -C accessKeyID=******* -C accessKeySecret=******* +``` + +* storageType - s3, using aws s3 as the storage backend for state +* endpoint - `required` specify the access endpoint for aws s3 bucket +* bucket - `required` specify the name of the aws s3 bucket +* accessKeyID - `required` specify the aws account accessKeyID +* accessKeySecret - `required` specify the aws account accessKeySecret + +### db + +The `db` storage type stores the `state` into a **database**. + +Here is an example: + +```yaml +# project.yaml +backend: + storageType: db + config: + dbHost: 127.0.0.1 + dbName: kusion-db + dbPort: 3306 +``` +```shell +kusion apply -C dbUser=******* -C dbPassword=******* +``` + +* storageType - db, using database as the storage backend for state +* dbHost - `required` the access address for the database +* dbName - `required` the name of the database +* dbPort - `required` the port of the database +* dbUser - `required` the user name of the database +* dbPassword - `required` the password of the database + +Note that the table name in the database used by Kusion is **state**. Below is an example SQL statement for creating this table: + +```sql +CREATE TABLE `state` ( + `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'primary key', + `tenant` varchar(100) DEFAULT NULL COMMENT 'tenant', + `project` varchar(100) NOT NULL COMMENT 'project', + `kusion_version` varchar(50) DEFAULT NULL COMMENT 'kusion version', + `version` int(10) unsigned NOT NULL COMMENT 'current state format version,may upgrade in the future', + `serial` bigint(20) unsigned NOT NULL DEFAULT '0' COMMENT 'modification times for state,can be used in concurrent control', + `operator` varchar(100) DEFAULT NULL COMMENT 'last modifier', + `resources` longtext DEFAULT NULL COMMENT 'state of the resources,json array', + `create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT 'creation time', + `modified_time` timestamp NULL DEFAULT NULL ON UPDATE CURRENT_TIMESTAMP COMMENT 'update time', + `stack` varchar(100) DEFAULT NULL COMMENT 'stack', + `cluster` varchar(100) DEFAULT NULL COMMENT 'logical isolation in a stack,usually clustername__cellname', + PRIMARY KEY (`id`), + UNIQUE KEY `uk_state_latest` (`tenant`, `project`, `stack`, `serial`, `cluster`), + KEY `idx_tenant` (`tenant`), + KEY `idx_project` (`project`), + KEY `idx_kusion_version` (`kusion_version`), + KEY `idx_version` (`version`), + KEY `idx_create_time` (`create_time`), + KEY `idx_modified_time` (`modified_time`), + KEY `idx_stack` (`stack`), + KEY `idx_cluster` (`cluster`) +); +``` diff --git a/docs_versioned_docs/version-v0.9/reference/cli/index.md b/docs_versioned_docs/version-v0.9/reference/cli/index.md new file mode 100644 index 00000000..b4f861dd --- /dev/null +++ b/docs_versioned_docs/version-v0.9/reference/cli/index.md @@ -0,0 +1 @@ +# Command Line Tools diff --git a/docs_versioned_docs/version-v0.9/reference/cli/kusion/_category_.json b/docs_versioned_docs/version-v0.9/reference/cli/kusion/_category_.json new file mode 100644 index 00000000..569a9b2b --- /dev/null +++ b/docs_versioned_docs/version-v0.9/reference/cli/kusion/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Kusion Commands", + "position": 1 +} diff --git a/docs_versioned_docs/version-v0.9/reference/cli/kusion/index.md b/docs_versioned_docs/version-v0.9/reference/cli/kusion/index.md new file mode 100644 index 00000000..66f5e9d4 --- /dev/null +++ b/docs_versioned_docs/version-v0.9/reference/cli/kusion/index.md @@ -0,0 +1,28 @@ +# Kusion Commands + +Kusion is the platform engineering engine of KusionStack + +## Synopsis + +Kusion is the platform engineering engine of KusionStack. It delivers intentions to Kubernetes, Clouds, and On-Premise resources. + +``` +kusion [flags] +``` + +## Options + +``` + -h, --help help for kusion +``` + +## SEE ALSO + +* [kusion apply](kusion_apply.md) - Apply the operation intents of various resources to multiple runtimes +* [kusion compile](kusion_compile.md) - Compile KCL into YAML +* [kusion destroy](kusion_destroy.md) - Delete the specified resources in runtime +* [kusion init](kusion_init.md) - Initialize the scaffolding for a project +* [kusion preview](kusion_preview.md) - Preview a series of resource changes within the stack +* [kusion version](kusion_version.md) - Print the Kusion version information for the current context + +###### Auto generated by spf13/cobra on 15-Jul-2023 diff --git a/docs_versioned_docs/version-v0.9/reference/cli/kusion/kusion_apply.md b/docs_versioned_docs/version-v0.9/reference/cli/kusion/kusion_apply.md new file mode 100644 index 00000000..d89ff77a --- /dev/null +++ b/docs_versioned_docs/version-v0.9/reference/cli/kusion/kusion_apply.md @@ -0,0 +1,65 @@ +# kusion apply + +Apply the operation intents of various resources to multiple runtimes + +### Synopsis + +Apply a series of resource changes within the stack. + + Create or update or delete resources according to the KCL files within a stack. By default, Kusion will generate an execution plan and present it for your approval before taking any action. + + You can check the plan details and then decide if the actions should be taken or aborted. + +``` +kusion apply [flags] +``` + +### Examples + +``` + # Apply with specifying work directory + kusion apply -w /path/to/workdir + + # Apply with specifying arguments + kusion apply -D name=test -D age=18 + + # Apply with specifying setting file + kusion apply -Y settings.yaml + + # Apply with specifying spec file + kusion apply --spec-file spec.yaml + + # Skip interactive approval of plan details before applying + kusion apply --yes + + # Apply without output style and color + kusion apply --no-style=true +``` + +### Options + +``` + -a, --all --detail Automatically show all plan details, combined use with flag --detail + -D, --argument stringToString Specify the top-level argument (default []) + -C, --backend-config strings backend-config config state storage backend + --backend-type string backend-type specify state storage backend + -d, --detail Automatically show plan details with interactive options + --dry-run Preview the execution effect (always successful) without actually applying the changes + -h, --help help for apply + --ignore-fields strings Ignore differences of target fields + --no-style no-style sets to RawOutput mode and disables all of styling + --operator string Specify the operator + -o, --output string Specify the output format + -O, --overrides strings Specify the configuration override path and value + -Y, --setting strings Specify the command line setting files + --spec-file string Specify the spec file path as input, and the spec file must be located in the working directory or its subdirectories + --watch After creating/updating/deleting the requested object, watch for changes + -w, --workdir string Specify the work directory + -y, --yes Automatically approve and perform the update after previewing it +``` + +### SEE ALSO + +* [kusion](index.md) - Kusion is the platform engineering engine of KusionStack + +###### Auto generated by spf13/cobra on 28-Sep-2023 diff --git a/docs_versioned_docs/version-v0.9/reference/cli/kusion/kusion_build.md b/docs_versioned_docs/version-v0.9/reference/cli/kusion/kusion_build.md new file mode 100644 index 00000000..2aac972b --- /dev/null +++ b/docs_versioned_docs/version-v0.9/reference/cli/kusion/kusion_build.md @@ -0,0 +1,45 @@ +# kusion build + +Build Kusion modules in a Stack to the Intent. + +### Synopsis + +Build Kusion modules in a Stack to the Intent. + + The command must be executed in a Stack or by specifying a Stack directory with the -w flag. You can provide a list of arguments to replace the placeholders defined in KCL, and use the --output flag to output the built results to a file. + +``` +kusion build [flags] +``` + +### Examples + +``` + # Build main.k with arguments + kusion build -D name=test -D age=18 + + # Build main.k with work directory + kusion build -w appops/demo/dev + + # Build main.k and write result into output.yaml + kusion build -o output.yaml + + # Build without output style and color + kusion build --no-style=true +``` + +### Options + +``` + -D, --argument stringToString Specify the top-level argument (default []) + -h, --help help for build + --no-style Disable the output style and color + -o, --output string Specify the output file + -w, --workdir string Specify the work directory +``` + +### SEE ALSO + +* [kusion](index.md) - Kusion is the platform engineering engine of KusionStack + +###### Auto generated by spf13/cobra on 30-Nov-2023 diff --git a/docs_versioned_docs/version-v0.9/reference/cli/kusion/kusion_compile.md b/docs_versioned_docs/version-v0.9/reference/cli/kusion/kusion_compile.md new file mode 100644 index 00000000..f4725c6d --- /dev/null +++ b/docs_versioned_docs/version-v0.9/reference/cli/kusion/kusion_compile.md @@ -0,0 +1,25 @@ +# kusion compile + +Deprecated: Use 'kusion build' to generate the Intent instead + +``` +kusion compile [flags] +``` + +### Examples + +``` + Deprecated +``` + +### Options + +``` + -h, --help help for compile +``` + +### SEE ALSO + +* [kusion](index.md) - Kusion is the platform engineering engine of KusionStack + +###### Auto generated by spf13/cobra on 30-Nov-2023 diff --git a/docs_versioned_docs/version-v0.9/reference/cli/kusion/kusion_destroy.md b/docs_versioned_docs/version-v0.9/reference/cli/kusion/kusion_destroy.md new file mode 100644 index 00000000..1fd0d7a4 --- /dev/null +++ b/docs_versioned_docs/version-v0.9/reference/cli/kusion/kusion_destroy.md @@ -0,0 +1,43 @@ +# kusion destroy + +Delete the specified resources in runtime + +### Synopsis + +Delete resources by resource spec. + + Only KCL files are accepted. Only one type of arguments may be specified: filenames, resources and names, or resources and label selector. + + Note that the destroy command does NOT do resource version checks, so if someone submits an update to a resource right when you submit a destroy, their update will be lost along with the rest of the resource. + +``` +kusion destroy [flags] +``` + +### Examples + +``` + # Delete the configuration of current stack + kusion destroy +``` + +### Options + +``` + -D, --argument stringToString Specify the top-level argument (default []) + -C, --backend-config strings backend-config config state storage backend + --backend-type string backend-type specify state storage backend + -d, --detail Automatically show plan details after previewing it + -h, --help help for destroy + --operator string Specify the operator + -O, --overrides strings Specify the configuration override path and value + -Y, --setting strings Specify the command line setting files + -w, --workdir string Specify the work directory + -y, --yes Automatically approve and perform the update after previewing it +``` + +### SEE ALSO + +* [kusion](index.md) - Kusion is the platform engineering engine of KusionStack + +###### Auto generated by spf13/cobra on 28-Sep-2023 diff --git a/docs_versioned_docs/version-v0.9/reference/cli/kusion/kusion_init.md b/docs_versioned_docs/version-v0.9/reference/cli/kusion/kusion_init.md new file mode 100644 index 00000000..a6c56946 --- /dev/null +++ b/docs_versioned_docs/version-v0.9/reference/cli/kusion/kusion_init.md @@ -0,0 +1,47 @@ +# kusion init + +Initialize the scaffolding for a project + +### Synopsis + +This command initializes the scaffolding for a project, generating a project from an appointed template with correct structure. + + The scaffold templates can be retrieved from local or online. The built-in templates are used by default, self-defined templates are also supported by assigning the template repository path. + +``` +kusion init +``` + +### Examples + +``` + # Initialize a project from internal templates + kusion init + + # Initialize a project from default online templates + kusion init --online=true + + # Initialize a project from a specific online template + kusion init https://github.com// --online=true + + # Initialize a project from a specific local template + kusion init /path/to/templates +``` + +### Options + +``` + --custom-params string Custom params in JSON. If specified, it will be used as the template default value and skip prompts + --force Force generating the scaffolding files, even if it would change the existing files + -h, --help help for init + --online Use templates from online repository to initialize project, or use locally cached templates + --project-name string Initialize with specified project name. If not specified, a prompt will request it + --template-name string Initialize with specified template. If not specified, a prompt will request it + --yes Skip prompts and proceed with default values +``` + +### SEE ALSO + +* [kusion](index.md) - Kusion is the platform engineering engine of KusionStack + +###### Auto generated by spf13/cobra on 28-Sep-2023 diff --git a/docs_versioned_docs/version-v0.9/reference/cli/kusion/kusion_preview.md b/docs_versioned_docs/version-v0.9/reference/cli/kusion/kusion_preview.md new file mode 100644 index 00000000..6907d9fc --- /dev/null +++ b/docs_versioned_docs/version-v0.9/reference/cli/kusion/kusion_preview.md @@ -0,0 +1,63 @@ +# kusion preview + +Preview a series of resource changes within the stack + +### Synopsis + +Preview a series of resource changes within the stack. + + Create or update or delete resources according to the KCL files within a stack. By default, Kusion will generate an execution plan and present it for your approval before taking any action. + +``` +kusion preview [flags] +``` + +### Examples + +``` + # Preview with specifying work directory + kusion preview -w /path/to/workdir + + # Preview with specifying arguments + kusion preview -D name=test -D age=18 + + # Preview with specifying setting file + kusion preview -Y settings.yaml + + # Preview with specifying spec file + kusion preview --spec-file spec.yaml + + # Preview with ignored fields + kusion preview --ignore-fields="metadata.generation,metadata.managedFields + + # Preview with json format result + kusion preview -o json + + # Preview without output style and color + kusion preview --no-style=true +``` + +### Options + +``` + -a, --all --detail Automatically show all plan details, combined use with flag --detail + -D, --argument stringToString Specify the top-level argument (default []) + -C, --backend-config strings backend-config config state storage backend + --backend-type string backend-type specify state storage backend + -d, --detail Automatically show plan details with interactive options + -h, --help help for preview + --ignore-fields strings Ignore differences of target fields + --no-style no-style sets to RawOutput mode and disables all of styling + --operator string Specify the operator + -o, --output string Specify the output format + -O, --overrides strings Specify the configuration override path and value + -Y, --setting strings Specify the command line setting files + --spec-file string Specify the spec file path as input, and the spec file must be located in the working directory or its subdirectories + -w, --workdir string Specify the work directory +``` + +### SEE ALSO + +* [kusion](index.md) - Kusion is the platform engineering engine of KusionStack + +###### Auto generated by spf13/cobra on 28-Sep-2023 diff --git a/docs_versioned_docs/version-v0.9/reference/cli/kusion/kusion_version.md b/docs_versioned_docs/version-v0.9/reference/cli/kusion/kusion_version.md new file mode 100644 index 00000000..3aff6abc --- /dev/null +++ b/docs_versioned_docs/version-v0.9/reference/cli/kusion/kusion_version.md @@ -0,0 +1,31 @@ +# kusion version + +Print the Kusion version information for the current context + +### Synopsis + +Print the Kusion version information for the current context + +``` +kusion version [flags] +``` + +### Examples + +``` + # Print the Kusion version + kusion version +``` + +### Options + +``` + -h, --help help for version + -o, --output string Output format. Only json format is supported for now +``` + +### SEE ALSO + +* [kusion](index.md) - Kusion is the platform engineering engine of KusionStack + +###### Auto generated by spf13/cobra on 28-Sep-2023 diff --git a/docs_versioned_docs/version-v0.9/reference/model/1-overview.md b/docs_versioned_docs/version-v0.9/reference/model/1-overview.md new file mode 100644 index 00000000..fc75a878 --- /dev/null +++ b/docs_versioned_docs/version-v0.9/reference/model/1-overview.md @@ -0,0 +1,49 @@ +--- +id: overview +sidebar_label: Overview +--- +# Overview + +KusionStack presets application configuration models described by KCL, where the model is called **Kusion Model**. The GitHub repository [KusionStack/catalog](https://github.com/KusionStack/catalog) is used to store these models, which is known as **Kusion Model Library**. + +The original intention of designing Kusion Model is to enhance the efficiency and improve the experience of YAML users. Through the unified application model defined by code, abstract and encapsulate complex configuration items, omit repetitive and derivable configurations, and supplement with necessary verification logic. Only the necessary attributes get exposed, users get an out-of-the-box, easy-to-understand configuration interface, which reduces the difficulty and improves the reliability of the configuration work. + +Kusion Model Library currently provides the Kusion Model `AppConfiguration`. The design of `AppConfiguration` is developer-centric, based on Ant Group's decades of practice in building and managing hyperscale IDP (Internal Developer Platform), and the best practice of community. `AppConfiguration` describes the full lifecycle of an application. + +A simple example of using `AppConfiguration` to describe an application is as follows: + +```bash +wordpress: ac.AppConfiguration { + workload: wl.Service { + containers: { + "wordpress": c.Container { + image: "wordpress:latest" + env: { + "WORDPRESS_DB_HOST": "secret://wordpress-db/hostAddress" + "WORDPRESS_DB_PASSWORD": "secret://wordpress-db/password" + } + resources: { + "cpu": "1" + "memory": "2Gi" + } + } + } + replicas: 2 + ports: [ + n.Port { + port: 80 + public: True + } + ] + } + + database: db.Database { + type: "alicloud" + engine: "MySQL" + version: "5.7" + size: 20 + instanceType: "mysql.n2.serverless.1c" + category: "serverless_basic" + } +} +``` \ No newline at end of file diff --git a/docs/reference/model/_category_.json b/docs_versioned_docs/version-v0.9/reference/model/_category_.json similarity index 100% rename from docs/reference/model/_category_.json rename to docs_versioned_docs/version-v0.9/reference/model/_category_.json diff --git a/docs_versioned_docs/version-v0.9/reference/model/catalog_models/_category_.json b/docs_versioned_docs/version-v0.9/reference/model/catalog_models/_category_.json new file mode 100644 index 00000000..4cfbecb3 --- /dev/null +++ b/docs_versioned_docs/version-v0.9/reference/model/catalog_models/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Catalog Models", + "position": 3 +} \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.9/reference/model/catalog_models/database/_category_.json b/docs_versioned_docs/version-v0.9/reference/model/catalog_models/database/_category_.json new file mode 100644 index 00000000..94bfa835 --- /dev/null +++ b/docs_versioned_docs/version-v0.9/reference/model/catalog_models/database/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Database", + "position": 3 +} \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.9/reference/model/catalog_models/database/doc_database.md b/docs_versioned_docs/version-v0.9/reference/model/catalog_models/database/doc_database.md new file mode 100644 index 00000000..8c830262 --- /dev/null +++ b/docs_versioned_docs/version-v0.9/reference/model/catalog_models/database/doc_database.md @@ -0,0 +1,36 @@ +# database + +## Schema Database + +As an important supporting accessory, Database describes the attributes
to locally deploy or create a cloud provider managed database instance for
the workload. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**type**
Type defines the local deployment mode or the specific cloud vendor that
provides the relational database service (rds). |str|Undefined|**required**| +|**engine**
Engine defines the database engine to use. |str|Undefined|**required**| +|**version**
Version defines the database engine version to use. |str|Undefined|**required**| +|**instanceType**
InstanceType defines the type of the database which is required when
creating an rds instance provided by the cloud vendor. |str|Undefined|optional| +|**size**
Size defines the allocated storage size of the rds instance provided by
the cloud vendor in GB. |int|10|optional| +|**category**
Category defines the edition of the rds instance provided by the cloud
vendor. |str|"Basic"|optional| +|**username**
Username defines the operation account for the database. |str|"root"|optional| +|**securityIPs**
SecurityIPs defines the list of IP addresses allowed to access the rds
instance provided by the cloud vendor. |[str]|["0.0.0.0/0"]|optional| +|**subnetID**
SubnetID defines the virtual subnet ID associated with the VPC that the rds
instance will be created in. |str|Undefined|optional| +|**privateLink**
PrivateLink defines whether the host address of the rds instance for the workload
to connect with is via public network or private network of the cloud vendor. |bool|True|optional| +|**extraMap**
ExtraMap defines the diversified rds configuration items from different
cloud vendors. |{str: str}|Undefined|optional| +### Examples +```python +Instantiate an aws rds with mysql 5.7. + +import catalog.models.schema.v1.accessories.database as db + +database: db.Database { + type: "aws" + engine: "mysql" + version: "5.7" + instanceType: "db.t3.micro" +} +``` + + diff --git a/docs_versioned_docs/version-v0.9/reference/model/catalog_models/doc_app_configuration.md b/docs_versioned_docs/version-v0.9/reference/model/catalog_models/doc_app_configuration.md new file mode 100644 index 00000000..d6b96edd --- /dev/null +++ b/docs_versioned_docs/version-v0.9/reference/model/catalog_models/doc_app_configuration.md @@ -0,0 +1,62 @@ +--- +id: doc_app_configuration +sidebar_label: App Configuration +sidebar_position: 1 +--- + +# app_configuration + +## Schema AppConfiguration + +AppConfiguration is a developer-centric definition that describes how to run an Application.
This application model builds upon a decade of experience at AntGroup running super large scale
internal developer platform, combined with best-of-breed ideas and practices from the community. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**workload**
Workload defines how to run your application code. Currently supported workload profile
includes Service and Job.|[workload.Service](workload/doc_service.md#schema-service) \| [workload.Job](workload/doc_job.md#schema-job)|Undefined|**required**| +|**opsRule**
OpsRule specifies collection of rules that will be checked for Day-2 operation.|[trait.OpsRule](trait/doc_opsrule.md#schema-opsrule)|Undefined|optional| +|**database**|[database.Database](database/doc_database.md#schema-database)|Undefined|optional| +|**monitoring**|[monitoring.Prometheus](monitoring/doc_prometheus.md#schema-prometheus)|Undefined|optional| +|**labels**|{str: str}|Undefined|optional| +|**annotations**|{str: str}|Undefined|optional| +### Examples +```python +# Instantiate an App with a long-running service and its image is "nginx:v1" + +import catalog.models.schema.v1 as ac +import catalog.models.schema.v1.workload as wl +import catalog.models.schema.v1.workload.container as c +import catalog.models.schema.v1.accessories.database as db +import catalog.models.schema.v1.accessories.monitoring as m +import catalog.models.schema.v1.accessories.trait as t + +appConfiguration = ac.AppConfiguration { + workload: wl.Service { + containers: { + "nginx": c.Container { + image: "nginx:v1" + } + } + type: "CollaSet" + } + opsRule: t.OpsRule { + maxUnavailable: "30%" + } + database: db.Database { + type: "aws" + engine: "mysql" + version: "5.7" + instanceType: "db.t3.micro" + } + monitoring: m.Prometheus{ + interval: "30s" + timeout: "15s" + path: "/metrics" + port: "web" + scheme: "http" + } +} +``` + + diff --git a/docs_versioned_docs/version-v0.9/reference/model/catalog_models/internal/_category_.json b/docs_versioned_docs/version-v0.9/reference/model/catalog_models/internal/_category_.json new file mode 100644 index 00000000..42610d97 --- /dev/null +++ b/docs_versioned_docs/version-v0.9/reference/model/catalog_models/internal/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Internal", + "position": 6 +} \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.9/reference/model/catalog_models/internal/container/doc_container.md b/docs_versioned_docs/version-v0.9/reference/model/catalog_models/internal/container/doc_container.md new file mode 100644 index 00000000..b66365b0 --- /dev/null +++ b/docs_versioned_docs/version-v0.9/reference/model/catalog_models/internal/container/doc_container.md @@ -0,0 +1,61 @@ +# container + +## Schema Container + +Container describes how the Application's tasks are expected to be run. Depending on
the replicas parameter 1 or more containers can be created from each template. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**image**
Image refers to the Docker image name to run for this container.
More info: https://kubernetes.io/docs/concepts/containers/images|str|Undefined|**required**| +|**command**
Entrypoint array. Not executed within a shell.
Command will overwrite the ENTRYPOINT value set in the Dockfile, otherwise the Docker
image's ENTRYPOINT is used if this is not provided.|[str]|Undefined|optional| +|**args**
Arguments to the entrypoint.
Args will overwrite the CMD value set in the Dockfile, otherwise the Docker
image's CMD is used if this is not provided.|[str]|Undefined|optional| +|**env**
List of environment variables to set in the container.
The value of the environment variable may be static text or a value from a secret.|{str: str}|Undefined|optional| +|**workingDir**
The working directory of the running process defined in entrypoint.
Default container runtime will be used if this is not specified.|str|Undefined|optional| +|**resources**
Map of resource requirements the container should run with.
The resources parameter is a dict with the key being the resource name and the value being
the resource value.|{str: str}|Undefined|optional| +|**files**
List of files to create in the container.
The files parameter is a dict with the key being the file name in the container and the value
being the target file specification.|{str: [container.FileSpec](#schema-filespec)}|Undefined|optional| +|**dirs**
Collection of volumes mount into the container's filesystem.
The dirs parameter is a dict with the key being the folder name in the container and the value
being the referenced volume.|{str: str}|Undefined|optional| +|**livenessProbe**
LivenessProbe indicates if a running process is healthy.
Container will be restarted if the probe fails.|[p.Probe](probe/doc_probe#schema-probe)|Undefined|optional| +|**readinessProbe**
ReadinessProbe indicates whether an application is available to handle requests.|[p.Probe](probe/doc_probe#schema-probe)|Undefined|optional| +|**startupProbe**
StartupProbe indicates that the container has started for the first time.
Container will be restarted if the probe fails.|[p.Probe](probe/doc_probe#schema-probe)|Undefined|optional| +|**lifecycle**
Lifecycle refers to actions that the management system should take in response to container lifecycle events.|[lc.Lifecycle](lifecycle/doc_lifecycle#schema-lifecycle)|Undefined|optional| +### Examples +```python +import catalog.models.schema.v1.workload.container as c + +web = c.Container { + image: "nginx:latest" + command: ["/bin/sh", "-c", "echo hi"] + env: { + "name": "value" + } + resources: { + "cpu": "2" + "memory": "4Gi" + } +} +``` + +## Schema FileSpec + +FileSpec defines the target file in a Container. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**content**
File content in plain text.|str|Undefined|optional| +|**contentFrom**
Source for the file content, reference to a secret of configmap value.|str|Undefined|optional| +|**mode**
Mode bits used to set permissions on this file, must be an octal value
between 0000 and 0777 or a decimal value between 0 and 511|str|Undefined|**required**| +### Examples +```python +import catalog.models.schema.v1.workload.container as c + +tmpFile = c.FileSpec { + content: "some file contents" + mode: "0777" +} +``` + + diff --git a/docs_versioned_docs/version-v0.9/reference/model/catalog_models/internal/container/lifecycle/doc_lifecycle.md b/docs_versioned_docs/version-v0.9/reference/model/catalog_models/internal/container/lifecycle/doc_lifecycle.md new file mode 100644 index 00000000..a40c60b4 --- /dev/null +++ b/docs_versioned_docs/version-v0.9/reference/model/catalog_models/internal/container/lifecycle/doc_lifecycle.md @@ -0,0 +1,28 @@ +# lifecycle + +## Schema Lifecycle + +Lifecycle describes actions that the management system should take in response
to container lifecycle events. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**preStop**
The action to be taken before a container is terminated due to an API request or
management event such as liveness/startup probe failure, preemption, resource contention, etc.
More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/\#container-hooks|[probe.Exec](../probe/doc_probe#schema-exec) \| [probe.Http](../probe/doc_probe#schema-http)|Undefined|optional| +|**postStart**
The action to be taken after a container is created.
More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/\#container-hooks|[probe.Exec](../probe/doc_probe#schema-exec) \| [probe.Http](../probe/doc_probe#schema-http)|Undefined|optional| +### Examples +```python +import catalog.models.schema.v1.workload.container.probe as p +import catalog.models.schema.v1.workload.container.lifecycle as lc + +lifecycleHook = lc.Lifecycle { + preStop: p.Exec { + command: ["preStop.sh"] + } + postStart: p.Http { + url: "http://localhost:80" + } +} +``` + + diff --git a/docs_versioned_docs/version-v0.9/reference/model/catalog_models/internal/container/probe/doc_probe.md b/docs_versioned_docs/version-v0.9/reference/model/catalog_models/internal/container/probe/doc_probe.md new file mode 100644 index 00000000..ad392f55 --- /dev/null +++ b/docs_versioned_docs/version-v0.9/reference/model/catalog_models/internal/container/probe/doc_probe.md @@ -0,0 +1,88 @@ +# probe + +## Schema Probe + +Probe describes a health check to be performed against a container to determine whether it is
alive or ready to receive traffic. There are three probe types: readiness, liveness, and startup. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**probeHandler**
The action taken to determine the alive or health of a container|[probe.Exec](#schema-exec) \| [probe.Http](#schema-http) \| [probe.Tcp](#schema-tcp)|Undefined|**required**| +|**initialDelaySeconds**
The number of seconds before health checking is activated.
More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle\#container-probes|int|Undefined|optional| +|**timeoutSeconds**
The number of seconds after which the probe times out.
More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle\#container-probes|int|Undefined|optional| +|**periodSeconds**
How often (in seconds) to perform the probe.|int|Undefined|optional| +|**successThreshold**
Minimum consecutive successes for the probe to be considered successful after having failed.|int|Undefined|optional| +|**failureThreshold**
Minimum consecutive failures for the probe to be considered failed after having succeeded.|int|Undefined|optional| +|**terminationGracePeriod**|int|Undefined|optional| +### Examples +```python +import catalog.models.schema.v1.workload.container.probe as p + +probe = p.Probe { + probeHandler: p.Http { + path: "/healthz" + } + initialDelaySeconds: 10 +} +``` + +## Schema Exec + +Exec describes a "run in container" action. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**command**
The command line to execute inside the container.|[str]|Undefined|**required**| +### Examples +```python +import catalog.models.schema.v1.workload.container.probe as p + +execProbe = p.Exec { + command: ["probe.sh"] +} +``` + +## Schema Http + +Http describes an action based on HTTP Get requests. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**url**
The full qualified url to send HTTP requests.|str|Undefined|**required**| +|**headers**
Collection of custom headers to set in the request|{str: str}|Undefined|optional| +### Examples +```python +import catalog.models.schema.v1.workload.container.probe as p + +httpProbe = p.Http { + url: "http://localhost:80" + headers: { + "X-HEADER": "VALUE" + } +} +``` + +## Schema Tcp + +Tcp describes an action based on opening a socket. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**url**
The full qualified url to open a socket.|str|Undefined|**required**| +### Examples +```python +import catalog.models.schema.v1.workload.container.probe as p + +tcpProbe = p.Tcp { + url: "tcp://localhost:1234" +} +``` + + diff --git a/docs_versioned_docs/version-v0.9/reference/model/catalog_models/internal/doc_common.md b/docs_versioned_docs/version-v0.9/reference/model/catalog_models/internal/doc_common.md new file mode 100644 index 00000000..f797fffa --- /dev/null +++ b/docs_versioned_docs/version-v0.9/reference/model/catalog_models/internal/doc_common.md @@ -0,0 +1,16 @@ +# common + +## Schema WorkloadBase + +WorkloadBase defines set of attributes shared by different workload profile, e.g Service
and Job. You can inherit this Schema to reuse these common attributes. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**containers**
Containers defines the templates of containers to be ran.
More info: https://kubernetes.io/docs/concepts/containers|{str: [container.Container](container/doc_container.md#schema-container)}|Undefined|**required**| +|**secrets**|{str: [secret.Secret](secret/doc_secret.md#schema-secret)}|Undefined|optional| +|**replicas**
Number of container replicas based on this configuration that should be ran.|int|2|**required**| +|**labels**
Labels are key/value pairs that are attached to the workload.|{str: str}|Undefined|optional| +|**annotations**
Annotations are key/value pairs that attach arbitrary non-identifying metadata to the workload.|{str: str}|Undefined|optional| + diff --git a/docs_versioned_docs/version-v0.9/reference/model/catalog_models/internal/network/doc_port.md b/docs_versioned_docs/version-v0.9/reference/model/catalog_models/internal/network/doc_port.md new file mode 100644 index 00000000..9da16051 --- /dev/null +++ b/docs_versioned_docs/version-v0.9/reference/model/catalog_models/internal/network/doc_port.md @@ -0,0 +1,27 @@ +# port + +## Schema Port + +Port defines the exposed port of Service, which can be used to describe how the Service
get accessed. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**port**
The exposed port of the Service.|int|80|**required**| +|**targetPort**
The backend container port. If empty, set it the same as the port.|int|Undefined|optional| +|**protocol**
The protocol to access the port.|"TCP" \| "UDP"|"TCP"|**required**| +|**public**
Public defines whether the port can be accessed through Internet.|bool|False|**required**| +### Examples +```python +import catalog.models.schema.v1.workload.network as n + +port = n.Port { + port: 80 + targetPort: 8080 + protocol: "TCP" + public: True +} +``` + + diff --git a/docs_versioned_docs/version-v0.9/reference/model/catalog_models/internal/secret/doc_secret.md b/docs_versioned_docs/version-v0.9/reference/model/catalog_models/internal/secret/doc_secret.md new file mode 100644 index 00000000..0d1eced0 --- /dev/null +++ b/docs_versioned_docs/version-v0.9/reference/model/catalog_models/internal/secret/doc_secret.md @@ -0,0 +1,27 @@ +# secret + +## Schema Secret + +Secret can be used to store sensitive data. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**type**
Type of secret, used to facilitate programmatic handling of secret data.
More info: https://kubernetes.io/docs/concepts/configuration/secret/\#secret-types|"basic" \| "opaque"|opaque|**required**| +|**data**
Data contains the non-binary secret data in string form.|{str: str}|Undefined|optional| +|**immutable**
Immutable, if set to true, ensures that data stored in the Secret cannot be updated.|bool|Undefined|optional| +### Examples +```python +import catalog.models.schema.v1.workload.secret as sec + +basicAuth = sec.Secret { + type: "basic" + data: { + "username": "" + "password": "" + } +} +``` + + diff --git a/docs_versioned_docs/version-v0.9/reference/model/catalog_models/monitoring/_category_.json b/docs_versioned_docs/version-v0.9/reference/model/catalog_models/monitoring/_category_.json new file mode 100644 index 00000000..ae501b55 --- /dev/null +++ b/docs_versioned_docs/version-v0.9/reference/model/catalog_models/monitoring/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Monitoring", + "position": 4 +} \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.9/reference/model/catalog_models/monitoring/doc_prometheus.md b/docs_versioned_docs/version-v0.9/reference/model/catalog_models/monitoring/doc_prometheus.md new file mode 100644 index 00000000..69c68e90 --- /dev/null +++ b/docs_versioned_docs/version-v0.9/reference/model/catalog_models/monitoring/doc_prometheus.md @@ -0,0 +1,29 @@ +# prometheus + +## Schema Prometheus + +Prometheus can be used to define monitoring requirements + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**interval**
The time interval which Prometheus scrapes metrics data. Only applicable when operator mode is set to true.
When operator mode is set to false, the scraping interval can only be set in the scraping job configuration, which kusion does not have permission to manage directly.|str|Prometheus global scraping interval, which should be 1m if not explicitly set|optional| +|**timeout**
The timeout when Prometheus scrapes metrics data. Only applicable when operator mode is set to true.
When operator mode is set to false, the scraping timeout can only be set in the scraping job configuration, which kusion does not have permission to manage directly.|str|Prometheus global scraping timeout, which should be 10s if not explicitly set|optional| +|**path**
The path to scrape metrics from.|str|Prometheus global scraping path, which should be /metrics if not explicitly set|optional| +|**port**
The port to scrape metrics from. When using Prometheus operator, this needs to be the port NAME. Otherwise, this can be a port name or a number.|str|Container ports when scraping pod (monitorType is pod); Service port when scraping service (monitorType is service)|optional| +|**scheme**
The scheme to scrape metrics from. Possible values are http and https.|"http" \| "https"|http|optional| +### Examples +```python +import catalog.models.schema.v1.monitoring as m + +monitoring: m.Prometheus{ + interval: "30s" + timeout: "15s" + path: "/metrics" + port: "web" + scheme: "http" +} +``` + + diff --git a/docs_versioned_docs/version-v0.9/reference/model/catalog_models/trait/_category_.json b/docs_versioned_docs/version-v0.9/reference/model/catalog_models/trait/_category_.json new file mode 100644 index 00000000..a9f8afa4 --- /dev/null +++ b/docs_versioned_docs/version-v0.9/reference/model/catalog_models/trait/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Trait", + "position": 5 +} \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.9/reference/model/catalog_models/trait/doc_opsrule.md b/docs_versioned_docs/version-v0.9/reference/model/catalog_models/trait/doc_opsrule.md new file mode 100644 index 00000000..1169b39b --- /dev/null +++ b/docs_versioned_docs/version-v0.9/reference/model/catalog_models/trait/doc_opsrule.md @@ -0,0 +1,21 @@ +# opsrule + +## Schema OpsRule + +OpsRule describes operation rules for various Day-2 Operations. Once declared, these
operation rules will be checked before any Day-2 operations. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**maxUnavailable**
The maximum percentage of the total pod instances in the component that can be
simultaneously unhealthy.|int \| str|Undefined|optional| +### Examples +```python +import catalog.models.schema.v1.component.trait as t + +opsRule = t.OpsRule { + maxUnavailable: "30%" +} +``` + + diff --git a/docs_versioned_docs/version-v0.9/reference/model/catalog_models/workload/_category_.json b/docs_versioned_docs/version-v0.9/reference/model/catalog_models/workload/_category_.json new file mode 100644 index 00000000..4e680b9f --- /dev/null +++ b/docs_versioned_docs/version-v0.9/reference/model/catalog_models/workload/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Workload", + "position": 2 +} \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.9/reference/model/catalog_models/workload/doc_job.md b/docs_versioned_docs/version-v0.9/reference/model/catalog_models/workload/doc_job.md new file mode 100644 index 00000000..654e3ad1 --- /dev/null +++ b/docs_versioned_docs/version-v0.9/reference/model/catalog_models/workload/doc_job.md @@ -0,0 +1,241 @@ +# Job + +## Schemas +- [Job](#schema-job) + - [Container](#schema-container) + - [Filespec](#schema-filespec) + - [LifeCycle](#schema-lifecycle) + - [Probe](#schema-probe) + - [Exec](#schema-exec) + - [Http](#schema-http) + - [Tcp](#schema-tcp) + - [Secret](#schema-secret) + +## Schema Job + +Job is a kind of workload profile that describes how to run your application code. This
is typically used for tasks that take from a few seconds to a few days to complete. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**containers**
Containers defines the templates of containers to be ran.
More info: https://kubernetes.io/docs/concepts/containers|{str: [container.Container](#schema-container)}|Undefined|**required**| +|**schedule**|str|Undefined|**required**| +|**replicas**
Number of container replicas based on this configuration that should be ran.|int|2|**required**| +|**secrets**|{str: [secret.Secret](#schema-secret)}|Undefined|optional| +|**labels**
Labels are key/value pairs that are attached to the workload.|{str: str}|Undefined|optional| +|**annotations**
Annotations are key/value pairs that attach arbitrary non-identifying metadata to the workload.|{str: str}|Undefined|optional| +### Examples +```python +Instantiate a job with busybox image and runs every hour + +import catalog.models.schema.v1.workload as wl +import catalog.models.schema.v1.workload.container as c + +job: wl.Job { + containers: { + "busybox": c.Container{ + image: "busybox:1.28" + command: ["/bin/sh", "-c", "echo hello"] + } + } + schedule: "0 * * * *" +} +``` + +### Base Schema +[WorkloadBase](../internal/doc_common.md#schema-workloadbase) + +## Schema Container + +Container describes how the Application's tasks are expected to be run. Depending on
the replicas parameter 1 or more containers can be created from each template. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**image**
Image refers to the Docker image name to run for this container.
More info: https://kubernetes.io/docs/concepts/containers/images|str|Undefined|**required**| +|**command**
Entrypoint array. Not executed within a shell.
Command will overwrite the ENTRYPOINT value set in the Dockfile, otherwise the Docker
image's ENTRYPOINT is used if this is not provided.|[str]|Undefined|optional| +|**args**
Arguments to the entrypoint.
Args will overwrite the CMD value set in the Dockfile, otherwise the Docker
image's CMD is used if this is not provided.|[str]|Undefined|optional| +|**env**
List of environment variables to set in the container.
The value of the environment variable may be static text or a value from a secret.|{str: str}|Undefined|optional| +|**workingDir**
The working directory of the running process defined in entrypoint.
Default container runtime will be used if this is not specified.|str|Undefined|optional| +|**resources**
Map of resource requirements the container should run with.
The resources parameter is a dict with the key being the resource name and the value being
the resource value.|{str: str}|Undefined|optional| +|**files**
List of files to create in the container.
The files parameter is a dict with the key being the file name in the container and the value
being the target file specification.|{str: [container.FileSpec](#schema-filespec)}|Undefined|optional| +|**dirs**
Collection of volumes mount into the container's filesystem.
The dirs parameter is a dict with the key being the folder name in the container and the value
being the referenced volume.|{str: str}|Undefined|optional| +|**livenessProbe**
LivenessProbe indicates if a running process is healthy.
Container will be restarted if the probe fails.|[p.Probe](#schema-probe)|Undefined|optional| +|**readinessProbe**
ReadinessProbe indicates whether an application is available to handle requests.|[p.Probe](#schema-probe)|Undefined|optional| +|**startupProbe**
StartupProbe indicates that the container has started for the first time.
Container will be restarted if the probe fails.|[p.Probe](#schema-probe)|Undefined|optional| +|**lifecycle**
Lifecycle refers to actions that the management system should take in response to container lifecycle events.|[lc.Lifecycle](#schema-lifecycle)|Undefined|optional| +### Examples +```python +import catalog.models.schema.v1.workload.container as c + +web = c.Container { + image: "nginx:latest" + command: ["/bin/sh", "-c", "echo hi"] + env: { + "name": "value" + } + resources: { + "cpu": "2" + "memory": "4Gi" + } +} +``` + +## Schema FileSpec + +FileSpec defines the target file in a Container. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**content**
File content in plain text.|str|Undefined|optional| +|**contentFrom**
Source for the file content, reference to a secret of configmap value.|str|Undefined|optional| +|**mode**
Mode bits used to set permissions on this file, must be an octal value
between 0000 and 0777 or a decimal value between 0 and 511|str|Undefined|**required**| +### Examples +```python +import catalog.models.schema.v1.workload.container as c + +tmpFile = c.FileSpec { + content: "some file contents" + mode: "0777" +} +``` + +## Schema Probe + +Probe describes a health check to be performed against a container to determine whether it is
alive or ready to receive traffic. There are three probe types: readiness, liveness, and startup. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**probeHandler**
The action taken to determine the alive or health of a container|[probe.Exec](#schema-exec) \| [probe.Http](#schema-http) \| [probe.Tcp](#schema-tcp)|Undefined|**required**| +|**initialDelaySeconds**
The number of seconds before health checking is activated.
More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle\#container-probes|int|Undefined|optional| +|**timeoutSeconds**
The number of seconds after which the probe times out.
More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle\#container-probes|int|Undefined|optional| +|**periodSeconds**
How often (in seconds) to perform the probe.|int|Undefined|optional| +|**successThreshold**
Minimum consecutive successes for the probe to be considered successful after having failed.|int|Undefined|optional| +|**failureThreshold**
Minimum consecutive failures for the probe to be considered failed after having succeeded.|int|Undefined|optional| +|**terminationGracePeriod**|int|Undefined|optional| +### Examples +```python +import catalog.models.schema.v1.workload.container.probe as p + +probe = p.Probe { + probeHandler: p.Http { + path: "/healthz" + } + initialDelaySeconds: 10 +} +``` + +## Schema Exec + +Exec describes a "run in container" action. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**command**
The command line to execute inside the container.|[str]|Undefined|**required**| +### Examples +```python +import catalog.models.schema.v1.workload.container.probe as p + +execProbe = p.Exec { + command: ["probe.sh"] +} +``` + +## Schema Http + +Http describes an action based on HTTP Get requests. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**url**
The full qualified url to send HTTP requests.|str|Undefined|**required**| +|**headers**
Collection of custom headers to set in the request|{str: str}|Undefined|optional| +### Examples +```python +import catalog.models.schema.v1.workload.container.probe as p + +httpProbe = p.Http { + url: "http://localhost:80" + headers: { + "X-HEADER": "VALUE" + } +} +``` + +## Schema Tcp + +Tcp describes an action based on opening a socket. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**url**
The full qualified url to open a socket.|str|Undefined|**required**| +### Examples +```python +import catalog.models.schema.v1.workload.container.probe as p + +tcpProbe = p.Tcp { + url: "tcp://localhost:1234" +} +``` + +## Schema Lifecycle + +Lifecycle describes actions that the management system should take in response
to container lifecycle events. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**preStop**
The action to be taken before a container is terminated due to an API request or
management event such as liveness/startup probe failure, preemption, resource contention, etc.
More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/\#container-hooks|[probe.Exec](#schema-exec) \| [probe.Http](#schema-http)|Undefined|optional| +|**postStart**
The action to be taken after a container is created.
More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/\#container-hooks|[probe.Exec](#schema-exec) \| [probe.Http](#schema-http)|Undefined|optional| +### Examples +```python +import catalog.models.schema.v1.workload.container.probe as p +import catalog.models.schema.v1.workload.container.lifecycle as lc + +lifecycleHook = lc.Lifecycle { + preStop: p.Exec { + command: ["preStop.sh"] + } + postStart: p.Http { + url: "http://localhost:80" + } +} +``` + +## Schema Secret + +Secret can be used to store sensitive data. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**type**
Type of secret, used to facilitate programmatic handling of secret data.
More info: https://kubernetes.io/docs/concepts/configuration/secret/\#secret-types|"basic" \| "opaque"|opaque|**required**| +|**data**
Data contains the non-binary secret data in string form.|{str: str}|Undefined|optional| +|**immutable**
Immutable, if set to true, ensures that data stored in the Secret cannot be updated.|bool|Undefined|optional| +### Examples +```python +import catalog.models.schema.v1.workload.secret as sec + +basicAuth = sec.Secret { + type: "basic" + data: { + "username": "" + "password": "" + } +} +``` + + diff --git a/docs_versioned_docs/version-v0.9/reference/model/catalog_models/workload/doc_service.md b/docs_versioned_docs/version-v0.9/reference/model/catalog_models/workload/doc_service.md new file mode 100644 index 00000000..8e1a6a99 --- /dev/null +++ b/docs_versioned_docs/version-v0.9/reference/model/catalog_models/workload/doc_service.md @@ -0,0 +1,274 @@ +# Service + +## Schemas +- [Service](#schema-service) + - [Container](#schema-container) + - [Filespec](#schema-filespec) + - [LifeCycle](#schema-lifecycle) + - [Probe](#schema-probe) + - [Exec](#schema-exec) + - [Http](#schema-http) + - [Tcp](#schema-tcp) + - [Secret](#schema-secret) + - [Port](#schema-port) + +## Schema Service + +Service is a kind of workload profile that describes how to run your application code. This
is typically used for long-running web applications that should "never" go down, and handle
short-lived latency-sensitive web requests, or events. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**containers**
Containers defines the templates of containers to be ran.
More info: https://kubernetes.io/docs/concepts/containers|{str: [container.Container](#schema-container)}|Undefined|**required**| +|**replicas**
Number of container replicas based on this configuration that should be ran.|int|2|**required**| +|**ports**
The list of ports of the Service should get exposed.|[[network.Port](#schema-port)]|Undefined|optional| +|**secrets**|{str: [secret.Secret](#schema-secret)}|Undefined|optional| +|**labels**
Labels are key/value pairs that are attached to the workload.|{str: str}|Undefined|optional| +|**annotations**
Annotations are key/value pairs that attach arbitrary non-identifying metadata to the workload.|{str: str}|Undefined|optional| +|**type**
Type represents the type of workload used by this Service. Currently, it supports several
types, including Deployment and CollaSet.|"Deployment" \| "CollaSet"|Deployment|optional| +### Examples +```python +# Instantiate a long-running service and its image is "nginx:v1" + +import catalog.models.schema.v1.workload as wl +import catalog.models.schema.v1.workload.container as c + +svc = wl.Service { + containers: { + "nginx": c.Container { + image: "nginx:v1" + } + } + ports: [ + n.Port { + port: 80 + public: True + } + n.Port { + port: 9090 + } + ] +} +``` + +### Base Schema +[WorkloadBase](../internal/doc_common.md#schema-workloadbase) + +## Schema Container + +Container describes how the Application's tasks are expected to be run. Depending on
the replicas parameter 1 or more containers can be created from each template. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**image**
Image refers to the Docker image name to run for this container.
More info: https://kubernetes.io/docs/concepts/containers/images|str|Undefined|**required**| +|**command**
Entrypoint array. Not executed within a shell.
Command will overwrite the ENTRYPOINT value set in the Dockfile, otherwise the Docker
image's ENTRYPOINT is used if this is not provided.|[str]|Undefined|optional| +|**args**
Arguments to the entrypoint.
Args will overwrite the CMD value set in the Dockfile, otherwise the Docker
image's CMD is used if this is not provided.|[str]|Undefined|optional| +|**env**
List of environment variables to set in the container.
The value of the environment variable may be static text or a value from a secret.|{str: str}|Undefined|optional| +|**workingDir**
The working directory of the running process defined in entrypoint.
Default container runtime will be used if this is not specified.|str|Undefined|optional| +|**resources**
Map of resource requirements the container should run with.
The resources parameter is a dict with the key being the resource name and the value being
the resource value.|{str: str}|Undefined|optional| +|**files**
List of files to create in the container.
The files parameter is a dict with the key being the file name in the container and the value
being the target file specification.|{str: [container.FileSpec](#schema-filespec)}|Undefined|optional| +|**dirs**
Collection of volumes mount into the container's filesystem.
The dirs parameter is a dict with the key being the folder name in the container and the value
being the referenced volume.|{str: str}|Undefined|optional| +|**livenessProbe**
LivenessProbe indicates if a running process is healthy.
Container will be restarted if the probe fails.|[p.Probe](#schema-probe)|Undefined|optional| +|**readinessProbe**
ReadinessProbe indicates whether an application is available to handle requests.|[p.Probe](#schema-probe)|Undefined|optional| +|**startupProbe**
StartupProbe indicates that the container has started for the first time.
Container will be restarted if the probe fails.|[p.Probe](#schema-probe)|Undefined|optional| +|**lifecycle**
Lifecycle refers to actions that the management system should take in response to container lifecycle events.|[lc.Lifecycle](#schema-lifecycle)|Undefined|optional| +### Examples +```python +import catalog.models.schema.v1.workload.container as c + +web = c.Container { + image: "nginx:latest" + command: ["/bin/sh", "-c", "echo hi"] + env: { + "name": "value" + } + resources: { + "cpu": "2" + "memory": "4Gi" + } +} +``` + +## Schema FileSpec + +FileSpec defines the target file in a Container. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**mode**
Mode bits used to set permissions on this file, must be an octal value
between 0000 and 0777 or a decimal value between 0 and 511|str|Undefined|**required**| +|**content**
File content in plain text.|str|Undefined|optional| +|**contentFrom**
Source for the file content, reference to a secret of configmap value.|str|Undefined|optional| +### Examples +```python +import catalog.models.schema.v1.workload.container as c + +tmpFile = c.FileSpec { + content: "some file contents" + mode: "0777" +} +``` + +## Schema Probe + +Probe describes a health check to be performed against a container to determine whether it is
alive or ready to receive traffic. There are three probe types: readiness, liveness, and startup. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**probeHandler**
The action taken to determine the alive or health of a container|[probe.Exec](#schema-exec) \| [probe.Http](#schema-http) \| [probe.Tcp](#schema-tcp)|Undefined|**required**| +|**initialDelaySeconds**
The number of seconds before health checking is activated.
More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle\#container-probes|int|Undefined|optional| +|**timeoutSeconds**
The number of seconds after which the probe times out.
More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle\#container-probes|int|Undefined|optional| +|**periodSeconds**
How often (in seconds) to perform the probe.|int|Undefined|optional| +|**successThreshold**
Minimum consecutive successes for the probe to be considered successful after having failed.|int|Undefined|optional| +|**failureThreshold**
Minimum consecutive failures for the probe to be considered failed after having succeeded.|int|Undefined|optional| +|**terminationGracePeriod**|int|Undefined|optional| +### Examples +```python +import catalog.models.schema.v1.workload.container.probe as p + +probe = p.Probe { + probeHandler: p.Http { + path: "/healthz" + } + initialDelaySeconds: 10 +} +``` + +## Schema Exec + +Exec describes a "run in container" action. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**command**
The command line to execute inside the container.|[str]|Undefined|**required**| +### Examples +```python +import catalog.models.schema.v1.workload.container.probe as p + +execProbe = p.Exec { + command: ["probe.sh"] +} +``` + +## Schema Http + +Http describes an action based on HTTP Get requests. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**url**
The full qualified url to send HTTP requests.|str|Undefined|**required**| +|**headers**
Collection of custom headers to set in the request|{str: str}|Undefined|optional| +### Examples +```python +import catalog.models.schema.v1.workload.container.probe as p + +httpProbe = p.Http { + url: "http://localhost:80" + headers: { + "X-HEADER": "VALUE" + } +} +``` + +## Schema Tcp + +Tcp describes an action based on opening a socket. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**url**
The full qualified url to open a socket.|str|Undefined|**required**| +### Examples +```python +import catalog.models.schema.v1.workload.container.probe as p + +tcpProbe = p.Tcp { + url: "tcp://localhost:1234" +} +``` + +## Schema Lifecycle + +Lifecycle describes actions that the management system should take in response
to container lifecycle events. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**preStop**
The action to be taken before a container is terminated due to an API request or
management event such as liveness/startup probe failure, preemption, resource contention, etc.
More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/\#container-hooks|[probe.Exec](#schema-exec) \| [probe.Http](#schema-http)|Undefined|optional| +|**postStart**
The action to be taken after a container is created.
More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/\#container-hooks|[probe.Exec](#schema-exec) \| [probe.Http](#schema-http)|Undefined|optional| +### Examples +```python +import catalog.models.schema.v1.workload.container.probe as p +import catalog.models.schema.v1.workload.container.lifecycle as lc + +lifecycleHook = lc.Lifecycle { + preStop: p.Exec { + command: ["preStop.sh"] + } + postStart: p.Http { + url: "http://localhost:80" + } +} +``` + +## Schema Secret + +Secret can be used to store sensitive data. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**type**
Type of secret, used to facilitate programmatic handling of secret data.
More info: https://kubernetes.io/docs/concepts/configuration/secret/\#secret-types|"basic" \| "opaque"|opaque|**required**| +|**data**
Data contains the non-binary secret data in string form.|{str: str}|Undefined|optional| +|**immutable**
Immutable, if set to true, ensures that data stored in the Secret cannot be updated.|bool|Undefined|optional| +### Examples +```python +import catalog.models.schema.v1.workload.secret as sec + +basicAuth = sec.Secret { + type: "basic" + data: { + "username": "" + "password": "" + } +} +``` + +## Schema Port + +Port defines the exposed port of Service, which can be used to describe how the Service
get accessed. + +### Attributes + +|Name and Description|Type|Default Value|Required| +|--------------------|----|-------------|--------| +|**port**
The exposed port of the Service.|int|80|**required**| +|**protocol**
The protocol to access the port.|"TCP" \| "UDP"|"TCP"|optional| +|**public**
Public defines whether the port can be accessed through Internet.|bool|False|optional| +|**targetPort**
The backend container port. If empty, set it the same as the port.|int|Undefined|optional| +### Examples +```python +import catalog.models.schema.v1.workload.network as n + +port = n.Port { + port: 80 + targetPort: 8080 + protocol: "TCP" + public: True +} +``` + + diff --git a/docs/reference/model/images/kusion-model-01.png b/docs_versioned_docs/version-v0.9/reference/model/images/kusion-model-01.png similarity index 100% rename from docs/reference/model/images/kusion-model-01.png rename to docs_versioned_docs/version-v0.9/reference/model/images/kusion-model-01.png diff --git a/docs/reference/model/index.md b/docs_versioned_docs/version-v0.9/reference/model/index.md similarity index 100% rename from docs/reference/model/index.md rename to docs_versioned_docs/version-v0.9/reference/model/index.md diff --git a/docs_versioned_docs/version-v0.9/reference/model/naming-conventions.md b/docs_versioned_docs/version-v0.9/reference/model/naming-conventions.md new file mode 100644 index 00000000..63439820 --- /dev/null +++ b/docs_versioned_docs/version-v0.9/reference/model/naming-conventions.md @@ -0,0 +1,60 @@ +--- +id: naming-conventions +sidebar_label: Naming Conventions +--- +# Naming Conventions + +## Kubernetes Resources + +Kusion adheres to specific rules when generating the Kubernetes resources for users' applications. The table below lists some common Kubernetes resource naming conventions. + +| Resource | Concatenation Rule | Example ID | +| -------- | ------------------ | ---------- | +| Namespace | `project name` | v1:Namespace:wordpress | +| Deployment | `project name`-`stack name`-`app name` | apps/v1:Deployment:wordpress:wordpress-dev-wordpress | +| CronJob | `project name`-`stack name`-`app name` | batch/v1:CronJob:helloworld:helloworld-dev-helloworld | +| Service | `project name`-`stack name`-`app name`-`public`/`private` | v1:Service:helloworld:helloworld-dev-helloworld-public | + +## Terraform Resources + +Similarly, Kusion also adheres to specific naming conventions when generating the Terraform Resources. Some common resources are listed below. + +| Resource | Concatenation Rule | Example ID | +| -------- | ------------------ | ---------- | +| random_password | `app name`-`db` | hashicorp:random:random_password:wordpress-db | +| aws_security_group | `app name`-`db` | hashicorp:aws:aws_security_group:wordpress-db | +| aws_db_instance | `app name` | hashicorp:aws:aws_db_instance:wordpress | +| alicloud_db_instance | `app name` | aliyun:alicloud:alicloud_db_instance:wordpress | +| alicloud_db_connection | `app name` | aliyun:alicloud:alicloud_db_connection:wordpress | +| alicloud_rds_account | `app name` | aliyun:alicloud:alicloud_rds_account:wordpress | + +## Apply Options + +Before applying the project, users may need to export some environment variables to specify the Provider information for provisioning cloud resources. The relevant environment variables are listed in the table below. + +| Environment Variable | Explanation | Example | +| -------------------- | ----------- | ------- | +| AWS_PROVIDER_REGION | The region where the aws provider provisions the resources | us-east-1 | +| AWS_ACCESS_KEY_ID | The access key for the aws provider to provision the resources | | +| AWS_SECRET_ACCESS_KEY | The secret key for the aws provider to provision the resources | | +| ALICLOUD_PROVIDER_REGION | The region where the alicloud provider provisions the resources | cn-beijing | +| ALICLOUD_ACCESS_KEY | The access key for the alicloud provider to provision the resources | | +| ALICLOUD_SECRET_KEY | The secret key for the alicloud provider to provision the resources | | + +## Magic Variables + +### Concept Explanation + +**Magic variables** are preconfigured variables representing fundamental metadata or environment variables automatically generated and injected into the application container by Kusion, which are typically used for accessories such as databases. + +### List of Magic Variables + +#### Sensitive Database Information + +For sensitive information such as the **host address**, **username** and **password** for the database instance, Kusion will automatically inject them into the application container for users through environment variables. The relevant environment variables are listed in the table below. + +| Name | Explanation | +| ---- | ----------- | +| KUSION_DB_HOST | Host address for accessing the database instance | +| KUSION_DB_USERNAME | Account username for accessing the database instance | +| KUSION_DB_PASSWORD | Account password for accessing the database instance | diff --git a/docs_versioned_docs/version-v0.9/reference/model/project-stack-config-items.md b/docs_versioned_docs/version-v0.9/reference/model/project-stack-config-items.md new file mode 100644 index 00000000..41a1378d --- /dev/null +++ b/docs_versioned_docs/version-v0.9/reference/model/project-stack-config-items.md @@ -0,0 +1,49 @@ +--- +id: project-stack-config-items +sidebar_label: Project & Stack Config Items +--- +# Project & Stack Config Items + +In **project.yaml** and **stack.yaml**, users can add config items for their applications such as the project or stack names, generator types, Prometheus monitoring, etc. Below, we will provide the explanations for both config file. + +## project.yaml + +Here is an example of `project.yaml`. + +```yaml +# The project basic info +name: helloworld +prometheus: + operatorMode: True + monitorType: Service +``` + +The config items in `project.yaml` are explained below. + +- `name`: The name of the project. +- `prometheus`: + - `operatorMode`: Decides whether Kusion runs Prometheus in **Operator** mode. Kusion will generate a **Custom Resource** if it is **true**, while generate some annotations if it is **false**. + - `monitorType`: The type of the monitored resource, which can be one of `Service` or `Pod`. + +### Backend Configuration + +Kusion supports configuring the storage of state through the `backend` field in the `project.yaml` file. Detailed instructions can be found in [Backend Configuration](../cli/backend/backend-configuration.md) + +## stack.yaml + +Here is an example of `stack.yaml`. + +```yaml +# The stack basic info +name: dev +kubeConfig: /Users/username/.kube/config +``` + +The config items in `stack.yaml` are explained below. + +- `name`: The name of the stack, typically the environment of the project, e.g. `dev`, `pre` and `prod`. +- `kubeConfig`: The kubeconfig file path for this stack. + +:::tip +The `kubeConfig` field in the `stack.yaml` file only supports **absolute path** and **relative path** with a dot (.) or double dots (..). Expansions for tilde (~) and $HOME are not supported yet. +::: diff --git a/docs_versioned_docs/version-v0.9/reference/roadmap.md b/docs_versioned_docs/version-v0.9/reference/roadmap.md new file mode 100644 index 00000000..9afb1d35 --- /dev/null +++ b/docs_versioned_docs/version-v0.9/reference/roadmap.md @@ -0,0 +1,15 @@ +# Roadmap + +For a finer-grained view into our roadmap and what is being worked on for a release, please refer to the [GitHub Issue Tracker](https://github.com/KusionStack/kusion/issues) + +## Resource Ecosystem +We plan to expand the range of resource types that our platform can handle. This includes not only traditional cloud IaaS resources, but also popular cloud-native products such as Prometheus, istio and Argo. By supporting a wider variety of resources, we aim to address the heterogeneous needs of modern applications and allow users to harness the full power of the cloud-native technologies. + +## App Progressive Rollout +One of the key challenges in delivering applications at scale is to balance the need for speed with the need for reliability. To help our users achieve this balance, we will introduce progressive rollout strategies, such as canary release, rolling release, and percentage release. These techniques enable users to test new features or versions on a small subset of their users or infrastructure before rolling them out to the entire system. By doing so, users can minimize the risk of downtime or errors caused by untested changes. + +## Custom Pipelines +Thie current workflow of KusionStack is `write`,`preview` and `apply`, but to handle more complex deployments we need to empower users to customize the deployment pipelines to fit their specific workflows and requirements. This includes the ability to define custom stages, add or remove steps, and integrate with third-party tools. With customizable pipelines, users can streamline their deployment process, automate repetitive tasks, and satisfy their own needs by themself. + +## Runtime Plugin +We have already supported IaaS cloud resources and Kubernetes resources, but we need a more flexible mechanism to support a broader range of on-premise infrastructure. By supporting a diverse set of infrastructures, we can help users avoid vendor lock-in, optimize their resource usage, and scale their applications across different regions and geographies. diff --git a/docs_versioned_docs/version-v0.9/support/_category_.json b/docs_versioned_docs/version-v0.9/support/_category_.json new file mode 100644 index 00000000..c2c714f8 --- /dev/null +++ b/docs_versioned_docs/version-v0.9/support/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "FAQ", + "position": 7 +} diff --git a/docs_versioned_docs/version-v0.9/support/install-error.md b/docs_versioned_docs/version-v0.9/support/install-error.md new file mode 100644 index 00000000..a0fde76a --- /dev/null +++ b/docs_versioned_docs/version-v0.9/support/install-error.md @@ -0,0 +1,39 @@ +--- +sidebar_position: 1 +--- + +# Installation + +## 1. Could not find `libintl.dylib` + +This problem is that some tools depends on the `Gettext` library, but macOS does not have this library by default. You can try to solve it in the following ways: + +1. (Skip this step for non-macOS m1) For macOS m1 operating system, make sure you have a homebrew arm64e-version installed in /opt/homebrew, otherwise install the arm version of brew with the following command + +``` +/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" +# add to path +export PATH=/opt/homebrew/bin:$PATH +``` + +2. `brew install gettext` +3. Make sure `libintl.8.dylib` exists in `/usr/local/opt/gettext/lib` directory +4. If brew is installed in another directory, the library can be created by copying it to the corresponding directory + +## 2. macOS system SSL related errors + +Openssl dylib library not found or SSL module is not available problem + +1. (Skip this step for non-macOS m1) For macOS m1 operating system, make sure you have a homebrew arm64e-version installed in /opt/homebrew, otherwise install the arm version of brew with the following command + +``` +/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" +# add to path +export PATH=/opt/homebrew/bin:$PATH +``` + +2. Install openssl (version 1.1) via brew + +``` +brew install openssl@1.1 +``` diff --git a/docs_versioned_docs/version-v0.9/support/kcl.md b/docs_versioned_docs/version-v0.9/support/kcl.md new file mode 100644 index 00000000..59af8492 --- /dev/null +++ b/docs_versioned_docs/version-v0.9/support/kcl.md @@ -0,0 +1,3 @@ +# KCL + +Visit the [KCL website](https://kcl-lang.io/docs/user_docs/support/faq-kcl) for more documents. \ No newline at end of file diff --git a/docs_versioned_docs/version-v0.9/support/support.md b/docs_versioned_docs/version-v0.9/support/support.md new file mode 100644 index 00000000..3ae6efb5 --- /dev/null +++ b/docs_versioned_docs/version-v0.9/support/support.md @@ -0,0 +1,3 @@ +# FAQ + +KusionStack frequently asked questions. \ No newline at end of file diff --git a/docs_versioned_sidebars/version-v0.10-sidebars.json b/docs_versioned_sidebars/version-v0.10-sidebars.json new file mode 100644 index 00000000..9dc70e9f --- /dev/null +++ b/docs_versioned_sidebars/version-v0.10-sidebars.json @@ -0,0 +1,8 @@ +{ + "kusion": [ + { + "type": "autogenerated", + "dirName": "." + } + ] +} diff --git a/docs_versioned_sidebars/version-v0.11-sidebars.json b/docs_versioned_sidebars/version-v0.11-sidebars.json new file mode 100644 index 00000000..9dc70e9f --- /dev/null +++ b/docs_versioned_sidebars/version-v0.11-sidebars.json @@ -0,0 +1,8 @@ +{ + "kusion": [ + { + "type": "autogenerated", + "dirName": "." + } + ] +} diff --git a/docs_versioned_sidebars/version-v0.12-sidebars.json b/docs_versioned_sidebars/version-v0.12-sidebars.json new file mode 100644 index 00000000..9dc70e9f --- /dev/null +++ b/docs_versioned_sidebars/version-v0.12-sidebars.json @@ -0,0 +1,8 @@ +{ + "kusion": [ + { + "type": "autogenerated", + "dirName": "." + } + ] +} diff --git a/docs_versioned_sidebars/version-v0.13-sidebars.json b/docs_versioned_sidebars/version-v0.13-sidebars.json new file mode 100644 index 00000000..9dc70e9f --- /dev/null +++ b/docs_versioned_sidebars/version-v0.13-sidebars.json @@ -0,0 +1,8 @@ +{ + "kusion": [ + { + "type": "autogenerated", + "dirName": "." + } + ] +} diff --git a/docs_versioned_sidebars/version-v0.9-sidebars.json b/docs_versioned_sidebars/version-v0.9-sidebars.json new file mode 100644 index 00000000..9dc70e9f --- /dev/null +++ b/docs_versioned_sidebars/version-v0.9-sidebars.json @@ -0,0 +1,8 @@ +{ + "kusion": [ + { + "type": "autogenerated", + "dirName": "." + } + ] +} diff --git a/docs_versions.json b/docs_versions.json new file mode 100644 index 00000000..3c2a6773 --- /dev/null +++ b/docs_versions.json @@ -0,0 +1,7 @@ +[ + "v0.13", + "v0.12", + "v0.11", + "v0.10", + "v0.9" +] diff --git a/docusaurus.config.js b/docusaurus.config.js index 425c2394..c22322e1 100644 --- a/docusaurus.config.js +++ b/docusaurus.config.js @@ -1,41 +1,90 @@ // @ts-check // Note: type annotations allow type checking and IDEs autocompletion -const lightCodeTheme = require('prism-react-renderer/themes/github'); -const darkCodeTheme = require('prism-react-renderer/themes/dracula'); +const lightCodeTheme = require("prism-react-renderer/themes/github"); +const darkCodeTheme = require("prism-react-renderer/themes/dracula"); +const fs = require("fs"); + +function getLastReleasedVersion(current) { + if (fs.existsSync(`./${current}_versions.json`)) { + const versions = require(`./${current}_versions.json`); + return versions[0]; + } else { + return null; + } +} + +function getNextVersionName(current) { + const expectedPrefix = "v0."; + const lastReleasedVersion = getLastReleasedVersion(current); + if (!lastReleasedVersion) { + return "v0.1"; + } + if (!lastReleasedVersion?.includes(expectedPrefix)) { + throw new Error("this code is only meant to be used during the 1.0 phase."); + } + + const version = parseInt( + lastReleasedVersion?.replace(expectedPrefix, ""), + 10, + ); + return `${expectedPrefix}${version + 1}`; +} /** @type {import('@docusaurus/types').Config} */ const config = { - title: 'Collaborate, Automate', - tagline: 'Codify modern delivery across Kubernetes and Clouds', + title: "Transform Your Internal Developer Platform with KusionStack", + tagline: + "Create a robust, secure, and enterprise-ready Internal Developer Platform on Kubernetes and cloud infrastructures.", - url: 'https://kusionstack.io', - organizationName: 'KusionStack', // Usually your GitHub org/user name. - projectName: 'kusionstack.io', // Usually your repo name. + url: "https://kusionstack.io", + organizationName: "KusionStack", // Usually your GitHub org/user name. + projectName: "kusionstack.io", // Usually your repo name. - baseUrl: '/', - onBrokenLinks: 'throw', - onBrokenMarkdownLinks: 'warn', - favicon: 'img/kcl-logo.png', + baseUrl: "/", + onBrokenLinks: "throw", + onBrokenMarkdownLinks: "warn", + favicon: "img/kusionstack-icon-square.png", i18n: { - defaultLocale: 'en', - locales: ['en', 'zh-CN'], + defaultLocale: "en", + locales: ["en", "zh"], + localeConfigs: { + en: { + label: "English", + }, + zh: { + label: "简体中文", + }, + }, }, scripts: [], presets: [ [ - '@docusaurus/preset-classic', + "@docusaurus/preset-classic", /** @type {import('@docusaurus/preset-classic').Options} */ ({ docs: { - // default version: Next - // lastVersion: 'current', + id: "docs", + path: "docs/kusion", + routeBasePath: "docs", + sidebarPath: require.resolve("./sidebars/kusion.js"), + + // Versionning related configs + lastVersion: getLastReleasedVersion("docs"), + versions: { + current: { + label: `${getNextVersionName("docs")} 🚧`, + }, + }, + // includeCurrentVersion: true, + // onlyIncludeVersions: (() => { + // return ['current', ...versions.slice(0, 2)]; + // })(), - sidebarPath: require.resolve('./sidebars.js'), // Please change this to your repo. - editUrl: 'https://github.com/KusionStack/kusionstack.io/blob/main', + editUrl: "https://github.com/KusionStack/kusionstack.io/blob/main", showLastUpdateAuthor: true, showLastUpdateTime: true, }, @@ -43,178 +92,258 @@ const config = { postsPerPage: 2, showReadingTime: true, // Please change this to your repo. - editUrl: 'https://github.com/KusionStack/kusionstack.io/blob/main', + editUrl: "https://github.com/KusionStack/kusionstack.io/blob/main", }, theme: { - customCss: require.resolve('./src/css/custom.css'), + customCss: require.resolve("./src/css/custom.css"), + }, + // The default Global Site Tag (gtag.js) plugin. + // It is a JavaScript tagging framework and API that allows you to send event data to + // Google Analytics, Google Ads, and Google Marketing Platform. + // + // More see: https://docusaurus.io/docs/3.0.1/api/plugins/@docusaurus/plugin-google-gtag + gtag: { + trackingID: "G-XC4Z27TLBR", + anonymizeIP: false, }, }), ], ], + plugins: [ + [ + "@docusaurus/plugin-content-docs", + { + id: "karpor", + path: "docs/karpor", + routeBasePath: "karpor", + sidebarPath: "./sidebars/karpor.js", + versions: { + current: { + label: `${getNextVersionName("karpor")} 🚧`, + }, + }, + editUrl: "https://github.com/KusionStack/kusionstack.io/edit/main", + showLastUpdateAuthor: true, + showLastUpdateTime: true, + }, + ], + [ + "@docusaurus/plugin-content-docs", + { + id: "kuperator", + path: "docs/kuperator", + routeBasePath: "kuperator", + sidebarPath: "./sidebars/kuperator.js", + versions: { + current: { + label: `${getNextVersionName("kuperator")} 🚧`, + }, + }, + }, + ], + [ + "@docusaurus/plugin-content-docs", + { + id: "ctrlmesh", + path: "docs/ctrlmesh", + routeBasePath: "ctrlmesh", + sidebarPath: "./sidebars/ctrlmesh.js", + versions: { + current: { + label: `${getNextVersionName("ctrlmesh")} 🚧`, + }, + }, + }, + ], + [ + "@docusaurus/plugin-content-docs", + { + id: "community", + path: "docs/community", + routeBasePath: "community", + sidebarPath: "./sidebars/community.js", + // ... other options + }, + ], + [ + require.resolve("@cmfcmf/docusaurus-search-local"), + { + language: ["en", "zh"], + } + ], + ], themeConfig: /** @type {import('@docusaurus/preset-classic').ThemeConfig} */ ({ // hideableSidebar: true, - autoCollapseSidebarCategories: true, + docs: { + sidebar: { + autoCollapseCategories: false, + }, + }, announcementBar: { - id: 'announcementBar-1', // Increment on change + id: "announcementBar-1", // Increment on change content: `⭐️ If you like KusionStack, give it a star on Github`, }, - + algolia: { - appId: 'RE5E6BQUZV', - apiKey: 'e9703ec3fe7856ddb5a1321fd17a5425', - indexName: 'kusionstack', + appId: "RE5E6BQUZV", + apiKey: "e9703ec3fe7856ddb5a1321fd17a5425", + indexName: "kusionstack", contextualSearch: true, }, navbar: { - title: 'KusionStack', + title: "KusionStack", logo: { - alt: 'KusionStack Logo', - src: 'img/kcl-logo.png', + alt: "KusionStack Icon", + src: "img/kusionstack-icon.png", + srcDark: "img/kusionstack-icon-white.png", }, items: [ { - type: 'docSidebar', - docId: 'intro/kusion-intro', - position: 'left', - sidebarId: 'user_docs', - label: 'UserDoc', + type: "docSidebar", + position: "left", + sidebarId: "kusion", + label: "Kusion", + docsPluginId: "docs", }, { - type: 'docSidebar', - position: 'left', - sidebarId: 'reference', - label: 'Reference', + type: "docSidebar", + position: "left", + sidebarId: "karpor", + label: "Karpor", + docsPluginId: "karpor", }, { - type: 'docSidebar', - position: 'left', - sidebarId: 'develop', - label: 'DevDoc', + type: "docSidebar", + position: "left", + sidebarId: "kuperator", + label: "Kuperator", + docsPluginId: "kuperator", }, { - type: 'docSidebar', - position: 'left', - sidebarId: 'governance', - label: 'Governance', + type: "docSidebar", + position: "left", + sidebarId: "ctrlmesh", + label: "Ctrlmesh", + docsPluginId: "ctrlmesh", }, - - {to: '/blog', label: 'Blog', position: 'left'}, - { - type: 'docSidebar', - position: 'left', - sidebarId: 'events', - label: 'Events', + type: "docSidebar", + position: "left", + sidebarId: "community", + label: "Community", + docsPluginId: "community", + }, + { + to: "https://blog.kusionstack.io", + label: "Blog", + position: "left", + target: "_self", + }, + { + type: "docsVersionDropdown", + position: "right", + docsPluginId: "docs", + }, + { + type: "docsVersionDropdown", + position: "right", + docsPluginId: "karpor", + }, + { + type: "docsVersionDropdown", + position: "right", + docsPluginId: "kuperator", + }, + { + type: "docsVersionDropdown", + position: "right", + docsPluginId: "ctrlmesh", }, - - //{ - // type: 'docsVersionDropdown', - // position: 'right', - // dropdownActiveClassDisabled: true - //}, { - type: 'localeDropdown', - position: 'right', + type: "localeDropdown", + position: "right", dropdownItemsAfter: [ { - href: 'https://github.com/KusionStack/kusionstack.io/issues/25', - label: 'Help Us Translate', + href: "https://github.com/KusionStack/karpor/issues/468", + label: "Help Us Translate", }, ], }, { - href: 'https://github.com/KusionStack/kusion', - className: 'header-github-link', - 'aria-label': 'GitHub repository', - position: 'right', + href: "https://github.com/KusionStack", + className: "header-github-link", + "aria-label": "GitHub repository", + position: "right", }, - ].filter(item => true), + ].filter((item) => true), }, footer: { - style: 'dark', + style: "dark", links: [ { - title: 'Document', + title: "Document", items: [ { - label: 'Introduction', - to: '/docs/user_docs/intro/kusion-intro', + label: "Kusion", + to: "/docs", + }, + { + label: "Karpor", + to: "/karpor/", + }, + { + label: "Kuperator", + to: "/kuperator/introduction/", + }, + { + label: "CtrlMesh", + to: "/ctrlmesh/intro/", }, ], }, { - title: 'Resource', + title: "Resource", items: [ { - label: 'Blog', - to: '/blog', + label: "Blog", + to: "https://blog.kusionstack.io/", }, { - label: 'Github', - href: 'https://github.com/KusionStack', + label: "Github", + href: "https://github.com/KusionStack", }, { - label: 'Slack', - href: 'https://join.slack.com/t/kusionstack/shared_invite/zt-19lqcc3a9-_kTNwagaT5qwBE~my5Lnxg', + label: "Slack", + href: "https://join.slack.com/t/kusionstack/shared_invite/zt-19lqcc3a9-_kTNwagaT5qwBE~my5Lnxg", }, ], }, { - title: 'More', + title: "More", items: [ { - label: 'FAQ', - to: '/docs/user_docs/support', - }, - { - label: 'Changelog', - to: '/changelog', + label: "KCL", + to: "https://kcl-lang.io", }, ], }, ], - logo: { - alt: 'AntGroup Open Source Logo', - src: 'img/oss_logo.svg', - width: 160, - height: 51, - href: 'https://opensource.antgroup.com/', - }, - copyright: `Copyright © ${new Date().getFullYear()} KusionStack Authors`, + // logo: { + // alt: "AntGroup Open Source Logo", + // src: "img/oss_logo.svg", + // width: 160, + // height: 51, + // href: "https://opensource.antgroup.com/", + // }, + copyright: `Copyright © ${new Date().getFullYear()} KusionStack Authors. The Linux Foundation has registered trademarks and uses trademarks. For a list of trademarks of The Linux Foundation, please see our Trademark Usage page.`, }, prism: { theme: lightCodeTheme, darkTheme: darkCodeTheme, }, }), - - plugins: [ - [ - require.resolve('./src/plugins/changelog/index.js'), - { - blogTitle: 'KusionStack Changelog', - blogDescription: - 'Keep yourself up-to-date about new features in every release', - blogSidebarCount: 'ALL', - blogSidebarTitle: 'Changelog', - routeBasePath: '/changelog', - showReadingTime: false, - postsPerPage: 20, - archiveBasePath: null, - authorsMapPath: 'authors.json', - feedOptions: { - type: 'all', - title: 'KusionStack changelog', - description: - 'Keep yourself up-to-date about new features in every release', - copyright: `Copyright © ${new Date().getFullYear()} KusionStack Authors.`, - language: 'en', - }, - }, - ], - ], }; module.exports = config; diff --git a/i18n/en/code.json b/i18n/en/code.json index 2b74f537..84c5ac51 100644 --- a/i18n/en/code.json +++ b/i18n/en/code.json @@ -1,12 +1,67 @@ { + "Getting Started": { + "message": "Getting Started", + "description": "homepage getting started button" + }, + "Install": { + "message": "Install", + "description": "homepage install button" + }, + "home.quickstart": { + "message": "All About Your Modern Apps by Platform Engineering" + }, + "home.easyshipping": { + "message": "Dynamic Configuration Management" + }, + "home.easyshipping.1": { + "message": "Manage all application operations in one place, in a unified easy way" + }, + "home.easyshipping.2": { + "message": "Environment-agnostic application configurations" + }, + "home.easyshipping.3": { + "message": "Standardized and flexible platform configurations" + }, + "home.easyshipping.4": { + "message": "Kubernetes-first, lightweight and user-friendly" + }, + "home.platformengineering": { + "message": "Enable Developer Self-Service" + }, + "home.platformengineering.1": { + "message": "Fulfill the customized needs with reusable building blocks" + }, + "home.platformengineering.2": { + "message": "A growing open module ecosystem integrated with various cloud-native infrastructures" + }, + "home.platformengineering.3": { + "message": "An efficient collaboration paradigm between App Developers and Platform Engineers" + }, + "home.platformengineering.4": { + "message": "Building the golden path for end-to-end DevOps lifecycle management" + }, + "home.enterpriseops": { + "message": "Built-in Security and Compliance" + }, + "home.enterpriseops.1": { + "message": "From the first line of codes to production runtime" + }, + "home.enterpriseops.2": { + "message": "Codified shift-left validation to detect configuration risks" + }, + "home.enterpriseops.3": { + "message": "Extended check stages for workload lifecycle" + }, + "home.enterpriseops.4": { + "message": "Enterprise-grade fine-grained cluster control for Kubernetes" + }, + "home.whoisusing": { + "message": "Adopted by" + }, "theme.ErrorPageContent.title": { "message": "This page crashed.", "description": "The title of the fallback page when the page crashed" }, - "theme.ErrorPageContent.tryAgain": { - "message": "Try again", - "description": "The label of the button to try again when the page crashed" - }, "theme.NotFound.title": { "message": "Page Not Found", "description": "The title of the 404 page" @@ -19,25 +74,25 @@ "message": "Please contact the owner of the site that linked you to the original URL and let them know their link is broken.", "description": "The 2nd paragraph of the 404 page" }, - "theme.BackToTopButton.buttonAriaLabel": { - "message": "Scroll back to top", - "description": "The ARIA label for the back to top button" + "theme.admonition.note": { + "message": "note", + "description": "The default label used for the Note admonition (:::note)" }, - "theme.AnnouncementBar.closeButtonAriaLabel": { - "message": "Close", - "description": "The ARIA label for close button of announcement bar" + "theme.admonition.tip": { + "message": "tip", + "description": "The default label used for the Tip admonition (:::tip)" }, - "theme.blog.paginator.navAriaLabel": { - "message": "Blog list page navigation", - "description": "The ARIA label for the blog pagination" + "theme.admonition.danger": { + "message": "danger", + "description": "The default label used for the Danger admonition (:::danger)" }, - "theme.blog.paginator.newerEntries": { - "message": "Newer Entries", - "description": "The label used to navigate to the newer blog posts page (previous page)" + "theme.admonition.info": { + "message": "info", + "description": "The default label used for the Info admonition (:::info)" }, - "theme.blog.paginator.olderEntries": { - "message": "Older Entries", - "description": "The label used to navigate to the older blog posts page (next page)" + "theme.admonition.caution": { + "message": "caution", + "description": "The default label used for the Caution admonition (:::caution)" }, "theme.blog.archive.title": { "message": "Archive", @@ -47,17 +102,21 @@ "message": "Archive", "description": "The page & hero description of the blog archive page" }, - "theme.blog.post.readingTime.plurals": { - "message": "One min read|{readingTime} min read", - "description": "Pluralized label for \"{readingTime} min read\". Use as much plural forms (separated by \"|\") as your language support (see https://www.unicode.org/cldr/cldr-aux/charts/34/supplemental/language_plural_rules.html)" + "theme.BackToTopButton.buttonAriaLabel": { + "message": "Scroll back to top", + "description": "The ARIA label for the back to top button" }, - "theme.blog.post.readMoreLabel": { - "message": "Read more about {title}", - "description": "The ARIA label for the link to full blog posts from excerpts" + "theme.blog.paginator.navAriaLabel": { + "message": "Blog list page navigation", + "description": "The ARIA label for the blog pagination" }, - "theme.blog.post.readMore": { - "message": "Read More", - "description": "The label used in blog post item excerpts to link to full blog posts" + "theme.blog.paginator.newerEntries": { + "message": "Newer Entries", + "description": "The label used to navigate to the newer blog posts page (previous page)" + }, + "theme.blog.paginator.olderEntries": { + "message": "Older Entries", + "description": "The label used to navigate to the older blog posts page (next page)" }, "theme.blog.post.paginator.navAriaLabel": { "message": "Blog post page navigation", @@ -83,22 +142,6 @@ "message": "View All Tags", "description": "The label of the link targeting the tag list page" }, - "theme.CodeBlock.copyButtonAriaLabel": { - "message": "Copy code to clipboard", - "description": "The ARIA label for copy code blocks button" - }, - "theme.CodeBlock.copied": { - "message": "Copied", - "description": "The copied button label on code blocks" - }, - "theme.CodeBlock.copy": { - "message": "Copy", - "description": "The copy button label on code blocks" - }, - "theme.blog.sidebar.navAriaLabel": { - "message": "Blog recent posts navigation", - "description": "The ARIA label for recent posts in the blog sidebar" - }, "theme.colorToggle.ariaLabel": { "message": "Switch between dark and light mode (currently {mode})", "description": "The ARIA label for the navbar color mode toggle" @@ -111,20 +154,16 @@ "message": "light mode", "description": "The name for the light color mode" }, + "theme.docs.breadcrumbs.navAriaLabel": { + "message": "Breadcrumbs", + "description": "The ARIA label for the breadcrumbs" + }, "theme.docs.DocCard.categoryDescription": { "message": "{count} items", "description": "The default description for a category card in the generated index about how many items this category includes" }, - "theme.docs.sidebar.expandButtonTitle": { - "message": "Expand sidebar", - "description": "The ARIA label and title attribute for expand button of doc sidebar" - }, - "theme.docs.sidebar.expandButtonAriaLabel": { - "message": "Expand sidebar", - "description": "The ARIA label and title attribute for expand button of doc sidebar" - }, "theme.docs.paginator.navAriaLabel": { - "message": "Docs pages navigation", + "message": "Docs pages", "description": "The ARIA label for the docs pagination" }, "theme.docs.paginator.previous": { @@ -135,18 +174,6 @@ "message": "Next", "description": "The label used to navigate to the next doc" }, - "theme.docs.sidebar.collapseButtonTitle": { - "message": "Collapse sidebar", - "description": "The title attribute for collapse button of doc sidebar" - }, - "theme.docs.sidebar.collapseButtonAriaLabel": { - "message": "Collapse sidebar", - "description": "The title attribute for collapse button of doc sidebar" - }, - "theme.DocSidebarItem.toggleCollapsedCategoryAriaLabel": { - "message": "Toggle the collapsible sidebar category '{label}'", - "description": "The ARIA label to toggle the collapsible sidebar category" - }, "theme.docs.tagDocListPageTitle.nDocsTagged": { "message": "One doc tagged|{count} docs tagged", "description": "Pluralized label for \"{count} docs tagged\". Use as much plural forms (separated by \"|\") as your language support (see https://www.unicode.org/cldr/cldr-aux/charts/34/supplemental/language_plural_rules.html)" @@ -179,7 +206,7 @@ "description": "The link label to edit the current page" }, "theme.common.headingLinkTitle": { - "message": "Direct link to heading", + "message": "Direct link to {heading}", "description": "Title for link to heading" }, "theme.lastUpdated.atDate": { @@ -194,36 +221,101 @@ "message": "Last updated{atDate}{byUser}", "description": "The sentence used to display when a page has been last updated, and by who" }, - "theme.navbar.mobileSidebarSecondaryMenu.backButtonLabel": { - "message": "← Back to main menu", - "description": "The label of the back button to return to main menu, inside the mobile navbar sidebar secondary menu (notably used to display the docs sidebar)" - }, "theme.navbar.mobileVersionsDropdown.label": { "message": "Versions", "description": "The label for the navbar versions dropdown on mobile view" }, - "theme.common.skipToMainContent": { - "message": "Skip to main content", - "description": "The skip to content label used for accessibility, allowing to rapidly navigate to main content with keyboard tab/enter navigation" + "theme.tags.tagsListLabel": { + "message": "Tags:", + "description": "The label alongside a tag list" + }, + "theme.AnnouncementBar.closeButtonAriaLabel": { + "message": "Close", + "description": "The ARIA label for close button of announcement bar" + }, + "theme.blog.sidebar.navAriaLabel": { + "message": "Blog recent posts navigation", + "description": "The ARIA label for recent posts in the blog sidebar" + }, + "theme.CodeBlock.copied": { + "message": "Copied", + "description": "The copied button label on code blocks" + }, + "theme.CodeBlock.copyButtonAriaLabel": { + "message": "Copy code to clipboard", + "description": "The ARIA label for copy code blocks button" + }, + "theme.CodeBlock.copy": { + "message": "Copy", + "description": "The copy button label on code blocks" + }, + "theme.CodeBlock.wordWrapToggle": { + "message": "Toggle word wrap", + "description": "The title attribute for toggle word wrapping button of code block lines" + }, + "theme.DocSidebarItem.toggleCollapsedCategoryAriaLabel": { + "message": "Toggle the collapsible sidebar category '{label}'", + "description": "The ARIA label to toggle the collapsible sidebar category" + }, + "theme.NavBar.navAriaLabel": { + "message": "Main", + "description": "The ARIA label for the main navigation" }, "theme.TOCCollapsible.toggleButtonLabel": { "message": "On this page", "description": "The label used by the button on the collapsible TOC component" }, - "theme.tags.tagsListLabel": { - "message": "Tags:", - "description": "The label alongside a tag list" + "theme.blog.post.readMore": { + "message": "Read More", + "description": "The label used in blog post item excerpts to link to full blog posts" + }, + "theme.blog.post.readMoreLabel": { + "message": "Read more about {title}", + "description": "The ARIA label for the link to full blog posts from excerpts" + }, + "theme.blog.post.readingTime.plurals": { + "message": "One min read|{readingTime} min read", + "description": "Pluralized label for \"{readingTime} min read\". Use as much plural forms (separated by \"|\") as your language support (see https://www.unicode.org/cldr/cldr-aux/charts/34/supplemental/language_plural_rules.html)" + }, + "theme.docs.breadcrumbs.home": { + "message": "Home page", + "description": "The ARIA label for the home page in the breadcrumbs" }, "theme.navbar.mobileLanguageDropdown.label": { "message": "Languages", "description": "The label for the mobile language switcher dropdown" }, - "theme.SearchBar.seeAll": { - "message": "See all {count} results" + "theme.docs.sidebar.collapseButtonTitle": { + "message": "Collapse sidebar", + "description": "The title attribute for collapse button of doc sidebar" }, - "theme.SearchBar.label": { - "message": "Search", - "description": "The ARIA label and placeholder for search button" + "theme.docs.sidebar.collapseButtonAriaLabel": { + "message": "Collapse sidebar", + "description": "The title attribute for collapse button of doc sidebar" + }, + "theme.docs.sidebar.navAriaLabel": { + "message": "Docs sidebar", + "description": "The ARIA label for the sidebar navigation" + }, + "theme.docs.sidebar.closeSidebarButtonAriaLabel": { + "message": "Close navigation bar", + "description": "The ARIA label for close button of mobile sidebar" + }, + "theme.navbar.mobileSidebarSecondaryMenu.backButtonLabel": { + "message": "← Back to main menu", + "description": "The label of the back button to return to main menu, inside the mobile navbar sidebar secondary menu (notably used to display the docs sidebar)" + }, + "theme.docs.sidebar.toggleSidebarButtonAriaLabel": { + "message": "Toggle navigation bar", + "description": "The ARIA label for hamburger menu button of mobile navigation" + }, + "theme.docs.sidebar.expandButtonTitle": { + "message": "Expand sidebar", + "description": "The ARIA label and title attribute for expand button of doc sidebar" + }, + "theme.docs.sidebar.expandButtonAriaLabel": { + "message": "Expand sidebar", + "description": "The ARIA label and title attribute for expand button of doc sidebar" }, "theme.SearchPage.documentsFound.plurals": { "message": "One document found|{count} documents found", @@ -257,38 +349,115 @@ "message": "Fetching new results...", "description": "The paragraph for fetching new search results" }, - "theme.tags.tagsPageTitle": { - "message": "Tags", - "description": "The title of the tag list page" + "theme.SearchBar.label": { + "message": "Search", + "description": "The ARIA label and placeholder for search button" + }, + "theme.SearchModal.searchBox.resetButtonTitle": { + "message": "Clear the query", + "description": "The label and ARIA label for search box reset button" + }, + "theme.SearchModal.searchBox.cancelButtonText": { + "message": "Cancel", + "description": "The label and ARIA label for search box cancel button" }, - "Quick Start - 15min ⏱️": { - "message": "Quick Start - 15min ⏱️" + "theme.SearchModal.startScreen.recentSearchesTitle": { + "message": "Recent", + "description": "The title for recent searches" }, - "Description will go into a meta tag in ": { - "message": "Description will go into a meta tag in " + "theme.SearchModal.startScreen.noRecentSearchesText": { + "message": "No recent searches", + "description": "The text when no recent searches" }, - "homepage.title": { - "message": "KusionStack" + "theme.SearchModal.startScreen.saveRecentSearchButtonTitle": { + "message": "Save this search", + "description": "The label for save recent search button" }, - "homepage.tagline": { - "message": "Codify modern delivery across Kubernetes and Clouds" + "theme.SearchModal.startScreen.removeRecentSearchButtonTitle": { + "message": "Remove this search from history", + "description": "The label for remove recent search button" }, - "homepage.feature.kcl": { - "message": "KCL" + "theme.SearchModal.startScreen.favoriteSearchesTitle": { + "message": "Favorite", + "description": "The title for favorite searches" }, - "homepage.feature.konfig": { - "message": "Konfig" + "theme.SearchModal.startScreen.removeFavoriteSearchButtonTitle": { + "message": "Remove this search from favorites", + "description": "The label for remove favorite search button" }, - "homepage.feature.kusion": { - "message": "Kusion Engine" + "theme.SearchModal.errorScreen.titleText": { + "message": "Unable to fetch results", + "description": "The title for error screen of search modal" }, - "homepage.feature.kcl.description": { - "message": "A constraint-based record & functional language mainly used in configuration and policy scenarios." + "theme.SearchModal.errorScreen.helpText": { + "message": "You might want to check your network connection.", + "description": "The help text for error screen of search modal" }, - "homepage.feature.konfig.description": { - "message": "Konfig is a monorepo that stores operation intentions described by KCL and provides a set of out-of-the-box Cloud Native application models, allowing users to quickly start the journey of cloud-native delivery." + "theme.SearchModal.footer.selectText": { + "message": "to select", + "description": "The explanatory text of the action for the enter key" }, - "homepage.feature.kusion.description": { - "message": "Kusion engine is to compile and deliver intents in Konfig to hybrid runtime on multi-cloud with less complexity and a consistent experience." + "theme.SearchModal.footer.selectKeyAriaLabel": { + "message": "Enter key", + "description": "The ARIA label for the Enter key button that makes the selection" + }, + "theme.SearchModal.footer.navigateText": { + "message": "to navigate", + "description": "The explanatory text of the action for the Arrow up and Arrow down key" + }, + "theme.SearchModal.footer.navigateUpKeyAriaLabel": { + "message": "Arrow up", + "description": "The ARIA label for the Arrow up key button that makes the navigation" + }, + "theme.SearchModal.footer.navigateDownKeyAriaLabel": { + "message": "Arrow down", + "description": "The ARIA label for the Arrow down key button that makes the navigation" + }, + "theme.SearchModal.footer.closeText": { + "message": "to close", + "description": "The explanatory text of the action for Escape key" + }, + "theme.SearchModal.footer.closeKeyAriaLabel": { + "message": "Escape key", + "description": "The ARIA label for the Escape key button that close the modal" + }, + "theme.SearchModal.footer.searchByText": { + "message": "Search by", + "description": "The text explain that the search is making by Algolia" + }, + "theme.SearchModal.noResultsScreen.noResultsText": { + "message": "No results for", + "description": "The text explains that there are no results for the following search" + }, + "theme.SearchModal.noResultsScreen.suggestedQueryText": { + "message": "Try searching for", + "description": "The text for the suggested query when no results are found for the following search" + }, + "theme.SearchModal.noResultsScreen.reportMissingResultsText": { + "message": "Believe this query should return results?", + "description": "The text for the question where the user thinks there are missing results" + }, + "theme.SearchModal.noResultsScreen.reportMissingResultsLinkText": { + "message": "Let us know.", + "description": "The text for the link to report missing results" + }, + "theme.SearchModal.placeholder": { + "message": "Search docs", + "description": "The placeholder of the input of the DocSearch pop-up modal" + }, + "theme.SearchBar.seeAll": { + "message": "See all {count} results" + }, + "theme.ErrorPageContent.tryAgain": { + "message": "Try again", + "description": "The label of the button to try again rendering when the React error boundary captures an error" + }, + "theme.common.skipToMainContent": { + "message": "Skip to main content", + "description": "The skip to content label used for accessibility, allowing to rapidly navigate to main content with keyboard tab/enter navigation" + }, + "theme.tags.tagsPageTitle": { + "message": "Tags", + "description": "The title of the tag list page" } } diff --git a/i18n/en/docusaurus-plugin-content-docs-community/current.json b/i18n/en/docusaurus-plugin-content-docs-community/current.json new file mode 100644 index 00000000..dd30528d --- /dev/null +++ b/i18n/en/docusaurus-plugin-content-docs-community/current.json @@ -0,0 +1,6 @@ +{ + "version.label": { + "message": "Next", + "description": "The label for version current" + } +} diff --git a/i18n/en/docusaurus-plugin-content-docs-ctrlmesh/current.json b/i18n/en/docusaurus-plugin-content-docs-ctrlmesh/current.json new file mode 100644 index 00000000..7e461960 --- /dev/null +++ b/i18n/en/docusaurus-plugin-content-docs-ctrlmesh/current.json @@ -0,0 +1,10 @@ +{ + "version.label": { + "message": "v0.2 🚧", + "description": "The label for version current" + }, + "sidebar.ctrlmesh.category.Getting Started": { + "message": "Getting Started", + "description": "The label for category Getting Started in sidebar ctrlmesh" + } +} diff --git a/i18n/en/docusaurus-plugin-content-docs-ctrlmesh/version-v0.1.json b/i18n/en/docusaurus-plugin-content-docs-ctrlmesh/version-v0.1.json new file mode 100644 index 00000000..08190601 --- /dev/null +++ b/i18n/en/docusaurus-plugin-content-docs-ctrlmesh/version-v0.1.json @@ -0,0 +1,10 @@ +{ + "version.label": { + "message": "v0.1", + "description": "The label for version v0.1" + }, + "sidebar.ctrlmesh.category.Getting Started": { + "message": "Getting Started", + "description": "The label for category Getting Started in sidebar ctrlmesh" + } +} diff --git a/i18n/en/docusaurus-plugin-content-docs-docs/current.json b/i18n/en/docusaurus-plugin-content-docs-docs/current.json new file mode 100644 index 00000000..427f5ca2 --- /dev/null +++ b/i18n/en/docusaurus-plugin-content-docs-docs/current.json @@ -0,0 +1,122 @@ +{ + "version.label": { + "message": "v0.14 🚧", + "description": "The label for version current" + }, + "sidebar.kusion.category.What is Kusion?": { + "message": "What is Kusion?", + "description": "The label for category What is Kusion? in sidebar kusion" + }, + "sidebar.kusion.category.Getting Started": { + "message": "Getting Started", + "description": "The label for category Getting Started in sidebar kusion" + }, + "sidebar.kusion.category.Concepts": { + "message": "Concepts", + "description": "The label for category Concepts in sidebar kusion" + }, + "sidebar.kusion.category.Project": { + "message": "Project", + "description": "The label for category Project in sidebar kusion" + }, + "sidebar.kusion.category.Stack": { + "message": "Stack", + "description": "The label for category Stack in sidebar kusion" + }, + "sidebar.kusion.category.Kusion Module": { + "message": "Kusion Module", + "description": "The label for category Kusion Module in sidebar kusion" + }, + "sidebar.kusion.category.Configuration Walkthrough": { + "message": "Configuration Walkthrough", + "description": "The label for category Configuration Walkthrough in sidebar kusion" + }, + "sidebar.kusion.category.User Guides": { + "message": "User Guides", + "description": "The label for category User Guides in sidebar kusion" + }, + "sidebar.kusion.category.Cloud Resources": { + "message": "Cloud Resources", + "description": "The label for category Cloud Resources in sidebar kusion" + }, + "sidebar.kusion.category.Kubernetes": { + "message": "Kubernetes", + "description": "The label for category Kubernetes in sidebar kusion" + }, + "sidebar.kusion.category.Automated Observability": { + "message": "Automated Observability", + "description": "The label for category Automated Observability in sidebar kusion" + }, + "sidebar.kusion.category.Secrets Management": { + "message": "Secrets Management", + "description": "The label for category Secrets Management in sidebar kusion" + }, + "sidebar.kusion.category.Reference": { + "message": "Reference", + "description": "The label for category Reference in sidebar kusion" + }, + "sidebar.kusion.category.Kusion Commands": { + "message": "Kusion Commands", + "description": "The label for category Kusion Commands in sidebar kusion" + }, + "sidebar.kusion.category.Kusion Modules": { + "message": "Kusion Modules", + "description": "The label for category Kusion Modules in sidebar kusion" + }, + "sidebar.kusion.category.Developer Schemas": { + "message": "Developer Schemas", + "description": "The label for category Developer Schemas in sidebar kusion" + }, + "sidebar.kusion.category.database": { + "message": "database", + "description": "The label for category database in sidebar kusion" + }, + "sidebar.kusion.category.internal": { + "message": "internal", + "description": "The label for category internal in sidebar kusion" + }, + "sidebar.kusion.category.container": { + "message": "container", + "description": "The label for category container in sidebar kusion" + }, + "sidebar.kusion.category.monitoring": { + "message": "monitoring", + "description": "The label for category monitoring in sidebar kusion" + }, + "sidebar.kusion.category.workload": { + "message": "workload", + "description": "The label for category workload in sidebar kusion" + }, + "sidebar.kusion.category.Workspace Configs": { + "message": "Workspace Configs", + "description": "The label for category Workspace Configs in sidebar kusion" + }, + "sidebar.kusion.category.networking": { + "message": "networking", + "description": "The label for category networking in sidebar kusion" + }, + "sidebar.kusion.category.FAQ": { + "message": "FAQ", + "description": "The label for category FAQ in sidebar kusion" + }, + "sidebar.kusion.category.Projects": { + "message": "Projects", + "description": "The label for category Projects in sidebar kusion" + }, + "sidebar.kusion.category.Stacks": { + "message": "Stacks", + "description": "The label for category Stacks in sidebar kusion" + }, + "sidebar.kusion.category.Modules": { + "message": "Modules", + "description": "The label for category Modules in sidebar kusion" + }, + "sidebar.kusion.category.Production Practice Case": { + "message": "Production Practice Case", + "description": "The label for category Production Practice Case in sidebar kusion" + }, + "sidebar.kusion.category.LLM Ops": { + "message": "LLM Ops", + "description": "The label for category LLM Ops in sidebar kusion" + } +} diff --git a/i18n/en/docusaurus-plugin-content-docs-docs/version-v0.10.json b/i18n/en/docusaurus-plugin-content-docs-docs/version-v0.10.json new file mode 100644 index 00000000..4ff8b1cc --- /dev/null +++ b/i18n/en/docusaurus-plugin-content-docs-docs/version-v0.10.json @@ -0,0 +1,110 @@ +{ + "version.label": { + "message": "v0.10", + "description": "The label for version v0.10" + }, + "sidebar.kusion.category.What is Kusion?": { + "message": "What is Kusion?", + "description": "The label for category What is Kusion? in sidebar kusion" + }, + "sidebar.kusion.category.Getting Started": { + "message": "Getting Started", + "description": "The label for category Getting Started in sidebar kusion" + }, + "sidebar.kusion.category.Concepts": { + "message": "Concepts", + "description": "The label for category Concepts in sidebar kusion" + }, + "sidebar.kusion.category.Project": { + "message": "Project", + "description": "The label for category Project in sidebar kusion" + }, + "sidebar.kusion.category.Stack": { + "message": "Stack", + "description": "The label for category Stack in sidebar kusion" + }, + "sidebar.kusion.category.Configuration Walkthrough": { + "message": "Configuration Walkthrough", + "description": "The label for category Configuration Walkthrough in sidebar kusion" + }, + "sidebar.kusion.category.User Guides": { + "message": "User Guides", + "description": "The label for category User Guides in sidebar kusion" + }, + "sidebar.kusion.category.Cloud Resources": { + "message": "Cloud Resources", + "description": "The label for category Cloud Resources in sidebar kusion" + }, + "sidebar.kusion.category.Kubernetes": { + "message": "Kubernetes", + "description": "The label for category Kubernetes in sidebar kusion" + }, + "sidebar.kusion.category.Automated Observability": { + "message": "Automated Observability", + "description": "The label for category Automated Observability in sidebar kusion" + }, + "sidebar.kusion.category.GitHub Actions": { + "message": "GitHub Actions", + "description": "The label for category GitHub Actions in sidebar kusion" + }, + "sidebar.kusion.category.Secrets Management": { + "message": "Secrets Management", + "description": "The label for category Secrets Management in sidebar kusion" + }, + "sidebar.kusion.category.Reference": { + "message": "Reference", + "description": "The label for category Reference in sidebar kusion" + }, + "sidebar.kusion.category.Kusion Commands": { + "message": "Kusion Commands", + "description": "The label for category Kusion Commands in sidebar kusion" + }, + "sidebar.kusion.category.Kusion Modules": { + "message": "Kusion Modules", + "description": "The label for category Kusion Modules in sidebar kusion" + }, + "sidebar.kusion.category.Catalog Models": { + "message": "Catalog Models", + "description": "The label for category Catalog Models in sidebar kusion" + }, + "sidebar.kusion.category.database": { + "message": "database", + "description": "The label for category database in sidebar kusion" + }, + "sidebar.kusion.category.internal": { + "message": "internal", + "description": "The label for category internal in sidebar kusion" + }, + "sidebar.kusion.category.container": { + "message": "container", + "description": "The label for category container in sidebar kusion" + }, + "sidebar.kusion.category.network": { + "message": "network", + "description": "The label for category network in sidebar kusion" + }, + "sidebar.kusion.category.monitoring": { + "message": "monitoring", + "description": "The label for category monitoring in sidebar kusion" + }, + "sidebar.kusion.category.trait": { + "message": "trait", + "description": "The label for category trait in sidebar kusion" + }, + "sidebar.kusion.category.workload": { + "message": "workload", + "description": "The label for category workload in sidebar kusion" + }, + "sidebar.kusion.category.Workspace Configs": { + "message": "Workspace Configs", + "description": "The label for category Workspace Configs in sidebar kusion" + }, + "sidebar.kusion.category.networking": { + "message": "networking", + "description": "The label for category networking in sidebar kusion" + }, + "sidebar.kusion.category.FAQ": { + "message": "FAQ", + "description": "The label for category FAQ in sidebar kusion" + } +} diff --git a/i18n/en/docusaurus-plugin-content-docs-docs/version-v0.11.json b/i18n/en/docusaurus-plugin-content-docs-docs/version-v0.11.json new file mode 100644 index 00000000..7d892af9 --- /dev/null +++ b/i18n/en/docusaurus-plugin-content-docs-docs/version-v0.11.json @@ -0,0 +1,102 @@ +{ + "version.label": { + "message": "v0.11", + "description": "The label for version v0.11" + }, + "sidebar.kusion.category.What is Kusion?": { + "message": "What is Kusion?", + "description": "The label for category What is Kusion? in sidebar kusion" + }, + "sidebar.kusion.category.Getting Started": { + "message": "Getting Started", + "description": "The label for category Getting Started in sidebar kusion" + }, + "sidebar.kusion.category.Concepts": { + "message": "Concepts", + "description": "The label for category Concepts in sidebar kusion" + }, + "sidebar.kusion.category.Project": { + "message": "Project", + "description": "The label for category Project in sidebar kusion" + }, + "sidebar.kusion.category.Stack": { + "message": "Stack", + "description": "The label for category Stack in sidebar kusion" + }, + "sidebar.kusion.category.Kusion Module": { + "message": "Kusion Module", + "description": "The label for category Kusion Module in sidebar kusion" + }, + "sidebar.kusion.category.Configuration Walkthrough": { + "message": "Configuration Walkthrough", + "description": "The label for category Configuration Walkthrough in sidebar kusion" + }, + "sidebar.kusion.category.User Guides": { + "message": "User Guides", + "description": "The label for category User Guides in sidebar kusion" + }, + "sidebar.kusion.category.Cloud Resources": { + "message": "Cloud Resources", + "description": "The label for category Cloud Resources in sidebar kusion" + }, + "sidebar.kusion.category.Kubernetes": { + "message": "Kubernetes", + "description": "The label for category Kubernetes in sidebar kusion" + }, + "sidebar.kusion.category.Automated Observability": { + "message": "Automated Observability", + "description": "The label for category Automated Observability in sidebar kusion" + }, + "sidebar.kusion.category.Secrets Management": { + "message": "Secrets Management", + "description": "The label for category Secrets Management in sidebar kusion" + }, + "sidebar.kusion.category.Reference": { + "message": "Reference", + "description": "The label for category Reference in sidebar kusion" + }, + "sidebar.kusion.category.Kusion Commands": { + "message": "Kusion Commands", + "description": "The label for category Kusion Commands in sidebar kusion" + }, + "sidebar.kusion.category.Kusion Modules": { + "message": "Kusion Modules", + "description": "The label for category Kusion Modules in sidebar kusion" + }, + "sidebar.kusion.category.Developer Schemas": { + "message": "Developer Schemas", + "description": "The label for category Developer Schemas in sidebar kusion" + }, + "sidebar.kusion.category.database": { + "message": "database", + "description": "The label for category database in sidebar kusion" + }, + "sidebar.kusion.category.internal": { + "message": "internal", + "description": "The label for category internal in sidebar kusion" + }, + "sidebar.kusion.category.container": { + "message": "container", + "description": "The label for category container in sidebar kusion" + }, + "sidebar.kusion.category.monitoring": { + "message": "monitoring", + "description": "The label for category monitoring in sidebar kusion" + }, + "sidebar.kusion.category.workload": { + "message": "workload", + "description": "The label for category workload in sidebar kusion" + }, + "sidebar.kusion.category.Workspace Configs": { + "message": "Workspace Configs", + "description": "The label for category Workspace Configs in sidebar kusion" + }, + "sidebar.kusion.category.networking": { + "message": "networking", + "description": "The label for category networking in sidebar kusion" + }, + "sidebar.kusion.category.FAQ": { + "message": "FAQ", + "description": "The label for category FAQ in sidebar kusion" + } +} diff --git a/i18n/en/docusaurus-plugin-content-docs-docs/version-v0.12.json b/i18n/en/docusaurus-plugin-content-docs-docs/version-v0.12.json new file mode 100644 index 00000000..3236c27d --- /dev/null +++ b/i18n/en/docusaurus-plugin-content-docs-docs/version-v0.12.json @@ -0,0 +1,106 @@ +{ + "version.label": { + "message": "v0.12", + "description": "The label for version v0.12" + }, + "sidebar.kusion.category.What is Kusion?": { + "message": "What is Kusion?", + "description": "The label for category What is Kusion? in sidebar kusion" + }, + "sidebar.kusion.category.Getting Started": { + "message": "Getting Started", + "description": "The label for category Getting Started in sidebar kusion" + }, + "sidebar.kusion.category.Concepts": { + "message": "Concepts", + "description": "The label for category Concepts in sidebar kusion" + }, + "sidebar.kusion.category.Projects": { + "message": "Projects", + "description": "The label for category Projects in sidebar kusion" + }, + "sidebar.kusion.category.Stacks": { + "message": "Stacks", + "description": "The label for category Stacks in sidebar kusion" + }, + "sidebar.kusion.category.Modules": { + "message": "Modules", + "description": "The label for category Modules in sidebar kusion" + }, + "sidebar.kusion.category.Configuration Walkthrough": { + "message": "Configuration Walkthrough", + "description": "The label for category Configuration Walkthrough in sidebar kusion" + }, + "sidebar.kusion.category.User Guides": { + "message": "User Guides", + "description": "The label for category User Guides in sidebar kusion" + }, + "sidebar.kusion.category.Cloud Resources": { + "message": "Cloud Resources", + "description": "The label for category Cloud Resources in sidebar kusion" + }, + "sidebar.kusion.category.Kubernetes": { + "message": "Kubernetes", + "description": "The label for category Kubernetes in sidebar kusion" + }, + "sidebar.kusion.category.Automated Observability": { + "message": "Automated Observability", + "description": "The label for category Automated Observability in sidebar kusion" + }, + "sidebar.kusion.category.Secrets Management": { + "message": "Secrets Management", + "description": "The label for category Secrets Management in sidebar kusion" + }, + "sidebar.kusion.category.Production Practice Case": { + "message": "Production Practice Case", + "description": "The label for category Production Practice Case in sidebar kusion" + }, + "sidebar.kusion.category.Reference": { + "message": "Reference", + "description": "The label for category Reference in sidebar kusion" + }, + "sidebar.kusion.category.Kusion Commands": { + "message": "Kusion Commands", + "description": "The label for category Kusion Commands in sidebar kusion" + }, + "sidebar.kusion.category.Kusion Modules": { + "message": "Kusion Modules", + "description": "The label for category Kusion Modules in sidebar kusion" + }, + "sidebar.kusion.category.Developer Schemas": { + "message": "Developer Schemas", + "description": "The label for category Developer Schemas in sidebar kusion" + }, + "sidebar.kusion.category.database": { + "message": "database", + "description": "The label for category database in sidebar kusion" + }, + "sidebar.kusion.category.internal": { + "message": "internal", + "description": "The label for category internal in sidebar kusion" + }, + "sidebar.kusion.category.container": { + "message": "container", + "description": "The label for category container in sidebar kusion" + }, + "sidebar.kusion.category.monitoring": { + "message": "monitoring", + "description": "The label for category monitoring in sidebar kusion" + }, + "sidebar.kusion.category.workload": { + "message": "workload", + "description": "The label for category workload in sidebar kusion" + }, + "sidebar.kusion.category.Workspace Configs": { + "message": "Workspace Configs", + "description": "The label for category Workspace Configs in sidebar kusion" + }, + "sidebar.kusion.category.networking": { + "message": "networking", + "description": "The label for category networking in sidebar kusion" + }, + "sidebar.kusion.category.FAQ": { + "message": "FAQ", + "description": "The label for category FAQ in sidebar kusion" + } +} diff --git a/i18n/en/docusaurus-plugin-content-docs-docs/version-v0.13.json b/i18n/en/docusaurus-plugin-content-docs-docs/version-v0.13.json new file mode 100644 index 00000000..f4c4a482 --- /dev/null +++ b/i18n/en/docusaurus-plugin-content-docs-docs/version-v0.13.json @@ -0,0 +1,110 @@ +{ + "version.label": { + "message": "v0.13", + "description": "The label for version v0.13" + }, + "sidebar.kusion.category.What is Kusion?": { + "message": "What is Kusion?", + "description": "The label for category What is Kusion? in sidebar kusion" + }, + "sidebar.kusion.category.Getting Started": { + "message": "Getting Started", + "description": "The label for category Getting Started in sidebar kusion" + }, + "sidebar.kusion.category.Concepts": { + "message": "Concepts", + "description": "The label for category Concepts in sidebar kusion" + }, + "sidebar.kusion.category.Projects": { + "message": "Projects", + "description": "The label for category Projects in sidebar kusion" + }, + "sidebar.kusion.category.Stacks": { + "message": "Stacks", + "description": "The label for category Stacks in sidebar kusion" + }, + "sidebar.kusion.category.Modules": { + "message": "Modules", + "description": "The label for category Modules in sidebar kusion" + }, + "sidebar.kusion.category.Configuration Walkthrough": { + "message": "Configuration Walkthrough", + "description": "The label for category Configuration Walkthrough in sidebar kusion" + }, + "sidebar.kusion.category.User Guides": { + "message": "User Guides", + "description": "The label for category User Guides in sidebar kusion" + }, + "sidebar.kusion.category.Cloud Resources": { + "message": "Cloud Resources", + "description": "The label for category Cloud Resources in sidebar kusion" + }, + "sidebar.kusion.category.Kubernetes": { + "message": "Kubernetes", + "description": "The label for category Kubernetes in sidebar kusion" + }, + "sidebar.kusion.category.Automated Observability": { + "message": "Automated Observability", + "description": "The label for category Automated Observability in sidebar kusion" + }, + "sidebar.kusion.category.Secrets Management": { + "message": "Secrets Management", + "description": "The label for category Secrets Management in sidebar kusion" + }, + "sidebar.kusion.category.Production Practice Case": { + "message": "Production Practice Case", + "description": "The label for category Production Practice Case in sidebar kusion" + }, + "sidebar.kusion.category.LLM Ops": { + "message": "LLM Ops", + "description": "The label for category LLM Ops in sidebar kusion" + }, + "sidebar.kusion.category.Reference": { + "message": "Reference", + "description": "The label for category Reference in sidebar kusion" + }, + "sidebar.kusion.category.Kusion Commands": { + "message": "Kusion Commands", + "description": "The label for category Kusion Commands in sidebar kusion" + }, + "sidebar.kusion.category.Kusion Modules": { + "message": "Kusion Modules", + "description": "The label for category Kusion Modules in sidebar kusion" + }, + "sidebar.kusion.category.Developer Schemas": { + "message": "Developer Schemas", + "description": "The label for category Developer Schemas in sidebar kusion" + }, + "sidebar.kusion.category.database": { + "message": "database", + "description": "The label for category database in sidebar kusion" + }, + "sidebar.kusion.category.internal": { + "message": "internal", + "description": "The label for category internal in sidebar kusion" + }, + "sidebar.kusion.category.container": { + "message": "container", + "description": "The label for category container in sidebar kusion" + }, + "sidebar.kusion.category.monitoring": { + "message": "monitoring", + "description": "The label for category monitoring in sidebar kusion" + }, + "sidebar.kusion.category.workload": { + "message": "workload", + "description": "The label for category workload in sidebar kusion" + }, + "sidebar.kusion.category.Workspace Configs": { + "message": "Workspace Configs", + "description": "The label for category Workspace Configs in sidebar kusion" + }, + "sidebar.kusion.category.networking": { + "message": "networking", + "description": "The label for category networking in sidebar kusion" + }, + "sidebar.kusion.category.FAQ": { + "message": "FAQ", + "description": "The label for category FAQ in sidebar kusion" + } +} diff --git a/i18n/en/docusaurus-plugin-content-docs-docs/version-v0.9.json b/i18n/en/docusaurus-plugin-content-docs-docs/version-v0.9.json new file mode 100644 index 00000000..b0fcd5d3 --- /dev/null +++ b/i18n/en/docusaurus-plugin-content-docs-docs/version-v0.9.json @@ -0,0 +1,110 @@ +{ + "version.label": { + "message": "v0.9", + "description": "The label for version v0.9" + }, + "sidebar.kusion.category.What is Kusion?": { + "message": "What is Kusion?", + "description": "The label for category What is Kusion? in sidebar kusion" + }, + "sidebar.kusion.category.Getting Started": { + "message": "Getting Started", + "description": "The label for category Getting Started in sidebar kusion" + }, + "sidebar.kusion.category.Concepts": { + "message": "Concepts", + "description": "The label for category Concepts in sidebar kusion" + }, + "sidebar.kusion.category.Configuration Walkthrough": { + "message": "Configuration Walkthrough", + "description": "The label for category Configuration Walkthrough in sidebar kusion" + }, + "sidebar.kusion.category.User Guide": { + "message": "User Guide", + "description": "The label for category User Guide in sidebar kusion" + }, + "sidebar.kusion.category.Cloud Resources": { + "message": "Cloud Resources", + "description": "The label for category Cloud Resources in sidebar kusion" + }, + "sidebar.kusion.category.Kubernetes": { + "message": "Kubernetes", + "description": "The label for category Kubernetes in sidebar kusion" + }, + "sidebar.kusion.category.Automated Observability": { + "message": "Automated Observability", + "description": "The label for category Automated Observability in sidebar kusion" + }, + "sidebar.kusion.category.GitHub Actions": { + "message": "GitHub Actions", + "description": "The label for category GitHub Actions in sidebar kusion" + }, + "sidebar.kusion.category.Reference": { + "message": "Reference", + "description": "The label for category Reference in sidebar kusion" + }, + "sidebar.kusion.category.Command Line Tools": { + "message": "Command Line Tools", + "description": "The label for category Command Line Tools in sidebar kusion" + }, + "sidebar.kusion.category.Kusion Commands": { + "message": "Kusion Commands", + "description": "The label for category Kusion Commands in sidebar kusion" + }, + "sidebar.kusion.category.Backend Configuration": { + "message": "Backend Configuration", + "description": "The label for category Backend Configuration in sidebar kusion" + }, + "sidebar.kusion.category.Kusion Model Library": { + "message": "Kusion Model Library", + "description": "The label for category Kusion Model Library in sidebar kusion" + }, + "sidebar.kusion.category.Catalog Models": { + "message": "Catalog Models", + "description": "The label for category Catalog Models in sidebar kusion" + }, + "sidebar.kusion.category.Workload": { + "message": "Workload", + "description": "The label for category Workload in sidebar kusion" + }, + "sidebar.kusion.category.Database": { + "message": "Database", + "description": "The label for category Database in sidebar kusion" + }, + "sidebar.kusion.category.Monitoring": { + "message": "Monitoring", + "description": "The label for category Monitoring in sidebar kusion" + }, + "sidebar.kusion.category.Trait": { + "message": "Trait", + "description": "The label for category Trait in sidebar kusion" + }, + "sidebar.kusion.category.Internal": { + "message": "Internal", + "description": "The label for category Internal in sidebar kusion" + }, + "sidebar.kusion.category.container": { + "message": "container", + "description": "The label for category container in sidebar kusion" + }, + "sidebar.kusion.category.lifecycle": { + "message": "lifecycle", + "description": "The label for category lifecycle in sidebar kusion" + }, + "sidebar.kusion.category.probe": { + "message": "probe", + "description": "The label for category probe in sidebar kusion" + }, + "sidebar.kusion.category.network": { + "message": "network", + "description": "The label for category network in sidebar kusion" + }, + "sidebar.kusion.category.secret": { + "message": "secret", + "description": "The label for category secret in sidebar kusion" + }, + "sidebar.kusion.category.FAQ": { + "message": "FAQ", + "description": "The label for category FAQ in sidebar kusion" + } +} diff --git a/i18n/en/docusaurus-plugin-content-docs-karpor/current.json b/i18n/en/docusaurus-plugin-content-docs-karpor/current.json new file mode 100644 index 00000000..22942fe9 --- /dev/null +++ b/i18n/en/docusaurus-plugin-content-docs-karpor/current.json @@ -0,0 +1,46 @@ +{ + "version.label": { + "message": "v0.6 🚧", + "description": "The label for version current" + }, + "sidebar.karpor.category.Getting Started": { + "message": "Getting Started", + "description": "The label for category Getting Started in sidebar karpor" + }, + "sidebar.karpor.category.Concepts": { + "message": "Concepts", + "description": "The label for category Concepts in sidebar karpor" + }, + "sidebar.karpor.category.User Guide": { + "message": "User Guide", + "description": "The label for category User Guide in sidebar karpor" + }, + "sidebar.karpor.category.How to Insight": { + "message": "How to Insight", + "description": "The label for category How to Insight in sidebar karpor" + }, + "sidebar.karpor.category.Best Production Practices": { + "message": "Best Production Practices", + "description": "The label for category Best Production Practices in sidebar karpor" + }, + "sidebar.karpor.category.Developer Guide": { + "message": "Developer Guide", + "description": "The label for category Developer Guide in sidebar karpor" + }, + "sidebar.karpor.category.Contribution Guide": { + "message": "Contribution Guide", + "description": "The label for category Contribution Guide in sidebar karpor" + }, + "sidebar.karpor.category.Conventions": { + "message": "Conventions", + "description": "The label for category Conventions in sidebar karpor" + }, + "sidebar.karpor.category.References": { + "message": "References", + "description": "The label for category References in sidebar karpor" + }, + "sidebar.karpor.category.CLI Commands": { + "message": "CLI Commands", + "description": "The label for category CLI Commands in sidebar karpor" + } +} diff --git a/i18n/en/docusaurus-plugin-content-docs-karpor/version-v0.4.json b/i18n/en/docusaurus-plugin-content-docs-karpor/version-v0.4.json new file mode 100644 index 00000000..6c7b646a --- /dev/null +++ b/i18n/en/docusaurus-plugin-content-docs-karpor/version-v0.4.json @@ -0,0 +1,46 @@ +{ + "version.label": { + "message": "v0.4", + "description": "The label for version v0.4" + }, + "sidebar.karpor.category.Getting Started": { + "message": "Getting Started", + "description": "The label for category Getting Started in sidebar karpor" + }, + "sidebar.karpor.category.Concepts": { + "message": "Concepts", + "description": "The label for category Concepts in sidebar karpor" + }, + "sidebar.karpor.category.User Guide": { + "message": "User Guide", + "description": "The label for category User Guide in sidebar karpor" + }, + "sidebar.karpor.category.How to Insight": { + "message": "How to Insight", + "description": "The label for category How to Insight in sidebar karpor" + }, + "sidebar.karpor.category.Best Production Practices": { + "message": "Best Production Practices", + "description": "The label for category Best Production Practices in sidebar karpor" + }, + "sidebar.karpor.category.Developer Guide": { + "message": "Developer Guide", + "description": "The label for category Developer Guide in sidebar karpor" + }, + "sidebar.karpor.category.Contribution Guide": { + "message": "Contribution Guide", + "description": "The label for category Contribution Guide in sidebar karpor" + }, + "sidebar.karpor.category.Conventions": { + "message": "Conventions", + "description": "The label for category Conventions in sidebar karpor" + }, + "sidebar.karpor.category.References": { + "message": "References", + "description": "The label for category References in sidebar karpor" + }, + "sidebar.karpor.category.CLI Commands": { + "message": "CLI Commands", + "description": "The label for category CLI Commands in sidebar karpor" + } +} diff --git a/i18n/en/docusaurus-plugin-content-docs-karpor/version-v0.5.json b/i18n/en/docusaurus-plugin-content-docs-karpor/version-v0.5.json new file mode 100644 index 00000000..783ce4f7 --- /dev/null +++ b/i18n/en/docusaurus-plugin-content-docs-karpor/version-v0.5.json @@ -0,0 +1,46 @@ +{ + "version.label": { + "message": "v0.5", + "description": "The label for version v0.5" + }, + "sidebar.karpor.category.Getting Started": { + "message": "Getting Started", + "description": "The label for category Getting Started in sidebar karpor" + }, + "sidebar.karpor.category.Concepts": { + "message": "Concepts", + "description": "The label for category Concepts in sidebar karpor" + }, + "sidebar.karpor.category.User Guide": { + "message": "User Guide", + "description": "The label for category User Guide in sidebar karpor" + }, + "sidebar.karpor.category.How to Insight": { + "message": "How to Insight", + "description": "The label for category How to Insight in sidebar karpor" + }, + "sidebar.karpor.category.Best Production Practices": { + "message": "Best Production Practices", + "description": "The label for category Best Production Practices in sidebar karpor" + }, + "sidebar.karpor.category.Developer Guide": { + "message": "Developer Guide", + "description": "The label for category Developer Guide in sidebar karpor" + }, + "sidebar.karpor.category.Contribution Guide": { + "message": "Contribution Guide", + "description": "The label for category Contribution Guide in sidebar karpor" + }, + "sidebar.karpor.category.Conventions": { + "message": "Conventions", + "description": "The label for category Conventions in sidebar karpor" + }, + "sidebar.karpor.category.References": { + "message": "References", + "description": "The label for category References in sidebar karpor" + }, + "sidebar.karpor.category.CLI Commands": { + "message": "CLI Commands", + "description": "The label for category CLI Commands in sidebar karpor" + } +} diff --git a/i18n/en/docusaurus-plugin-content-docs-operating/current.json b/i18n/en/docusaurus-plugin-content-docs-operating/current.json new file mode 100644 index 00000000..39507407 --- /dev/null +++ b/i18n/en/docusaurus-plugin-content-docs-operating/current.json @@ -0,0 +1,18 @@ +{ + "version.label": { + "message": "v0.5 🚧", + "description": "The label for version current" + }, + "sidebar.kuperator.category.Getting Started": { + "message": "Getting Started", + "description": "The label for category Getting Started in sidebar kuperator" + }, + "sidebar.kuperator.category.Concepts": { + "message": "Concepts", + "description": "The label for category Concepts in sidebar kuperator" + }, + "sidebar.kuperator.category.Manuals": { + "message": "Manuals", + "description": "The label for category Manuals in sidebar kuperator" + } +} diff --git a/i18n/en/docusaurus-plugin-content-docs-operating/version-v0.3.json b/i18n/en/docusaurus-plugin-content-docs-operating/version-v0.3.json new file mode 100644 index 00000000..819a0506 --- /dev/null +++ b/i18n/en/docusaurus-plugin-content-docs-operating/version-v0.3.json @@ -0,0 +1,18 @@ +{ + "version.label": { + "message": "v0.3", + "description": "The label for version v0.3" + }, + "sidebar.kuperator.category.Getting Started": { + "message": "Getting Started", + "description": "The label for category Getting Started in sidebar kuperator" + }, + "sidebar.kuperator.category.Concepts": { + "message": "Concepts", + "description": "The label for category Concepts in sidebar kuperator" + }, + "sidebar.kuperator.category.Manuals": { + "message": "Manuals", + "description": "The label for category Manuals in sidebar kuperator" + } +} diff --git a/i18n/en/docusaurus-plugin-content-docs-operating/version-v0.4.json b/i18n/en/docusaurus-plugin-content-docs-operating/version-v0.4.json new file mode 100644 index 00000000..ea53bd98 --- /dev/null +++ b/i18n/en/docusaurus-plugin-content-docs-operating/version-v0.4.json @@ -0,0 +1,18 @@ +{ + "version.label": { + "message": "v0.4", + "description": "The label for version v0.4" + }, + "sidebar.kuperator.category.Getting Started": { + "message": "Getting Started", + "description": "The label for category Getting Started in sidebar kuperator" + }, + "sidebar.kuperator.category.Concepts": { + "message": "Concepts", + "description": "The label for category Concepts in sidebar kuperator" + }, + "sidebar.kuperator.category.Manuals": { + "message": "Manuals", + "description": "The label for category Manuals in sidebar kuperator" + } +} diff --git a/i18n/en/docusaurus-plugin-content-docs/current.json b/i18n/en/docusaurus-plugin-content-docs/current.json deleted file mode 100644 index 1a5aa546..00000000 --- a/i18n/en/docusaurus-plugin-content-docs/current.json +++ /dev/null @@ -1,706 +0,0 @@ -{ - "version.label": { - "message": "Next", - "description": "The label for version current" - }, - "sidebar.docs.category.参考手册": { - "message": "Reference", - "description": "The label for category 参考手册 in sidebar docs" - }, - "sidebar.docs.category.命令行工具": { - "message": "Command Line Tools", - "description": "The label for category 命令行工具 in sidebar docs" - }, - "sidebar.docs.category.Kusion 工具": { - "message": "Kusion Tools", - "description": "The label for category Kusion 工具 in sidebar docs" - }, - "sidebar.docs.category.KCL 语言工具": { - "message": "KCL Tools", - "description": "The label for category KCL 语言工具 in sidebar docs" - }, - "sidebar.docs.category.OpenAPI 工具": { - "message": "OpenAPI Tools", - "description": "The label for category OpenAPI 工具 in sidebar docs" - }, - "sidebar.docs.category.Kusion 模型库": { - "message": "Kusion Model", - "description": "The label for category Kusion 模型库 in sidebar docs" - }, - "sidebar.docs.category.核心模型库": { - "message": "Core Model", - "description": "The label for category 核心模型库 in sidebar docs" - }, - "sidebar.docs.category.kusion_kubernetes": { - "message": "kusion_kubernetes", - "description": "The label for category kusion_kubernetes in sidebar docs" - }, - "sidebar.docs.category.api": { - "message": "api", - "description": "The label for category api in sidebar docs" - }, - "sidebar.docs.category.admissionregistration": { - "message": "admissionregistration", - "description": "The label for category admissionregistration in sidebar docs" - }, - "sidebar.docs.category.v1": { - "message": "v1", - "description": "The label for category v1 in sidebar docs" - }, - "sidebar.docs.category.apps": { - "message": "apps", - "description": "The label for category apps in sidebar docs" - }, - "sidebar.docs.category.autoscaling": { - "message": "autoscaling", - "description": "The label for category autoscaling in sidebar docs" - }, - "sidebar.docs.category.batch": { - "message": "batch", - "description": "The label for category batch in sidebar docs" - }, - "sidebar.docs.category.v1beta1": { - "message": "v1beta1", - "description": "The label for category v1beta1 in sidebar docs" - }, - "sidebar.docs.category.core": { - "message": "core", - "description": "The label for category core in sidebar docs" - }, - "sidebar.docs.category.networking": { - "message": "networking", - "description": "The label for category networking in sidebar docs" - }, - "sidebar.docs.category.rbac": { - "message": "rbac", - "description": "The label for category rbac in sidebar docs" - }, - "sidebar.docs.category.apimachinery": { - "message": "apimachinery", - "description": "The label for category apimachinery in sidebar docs" - }, - "sidebar.docs.category.apis": { - "message": "apis", - "description": "The label for category apis in sidebar docs" - }, - "sidebar.docs.category.kusion_models": { - "message": "kusion_models", - "description": "The label for category kusion_models in sidebar docs" - }, - "sidebar.docs.category.kube": { - "message": "kube", - "description": "The label for category kube in sidebar docs" - }, - "sidebar.docs.category.frontend": { - "message": "frontend", - "description": "The label for category frontend in sidebar docs" - }, - "sidebar.docs.category.common": { - "message": "common", - "description": "The label for category common in sidebar docs" - }, - "sidebar.docs.category.configmap": { - "message": "configmap", - "description": "The label for category configmap in sidebar docs" - }, - "sidebar.docs.category.container": { - "message": "container", - "description": "The label for category container in sidebar docs" - }, - "sidebar.docs.category.env": { - "message": "env", - "description": "The label for category env in sidebar docs" - }, - "sidebar.docs.category.lifecycle": { - "message": "lifecycle", - "description": "The label for category lifecycle in sidebar docs" - }, - "sidebar.docs.category.port": { - "message": "port", - "description": "The label for category port in sidebar docs" - }, - "sidebar.docs.category.probe": { - "message": "probe", - "description": "The label for category probe in sidebar docs" - }, - "sidebar.docs.category.ingress": { - "message": "ingress", - "description": "The label for category ingress in sidebar docs" - }, - "sidebar.docs.category.resource": { - "message": "resource", - "description": "The label for category resource in sidebar docs" - }, - "sidebar.docs.category.secret": { - "message": "secret", - "description": "The label for category secret in sidebar docs" - }, - "sidebar.docs.category.service": { - "message": "service", - "description": "The label for category service in sidebar docs" - }, - "sidebar.docs.category.serviceaccount": { - "message": "serviceaccount", - "description": "The label for category serviceaccount in sidebar docs" - }, - "sidebar.docs.category.sidecar": { - "message": "sidecar", - "description": "The label for category sidecar in sidebar docs" - }, - "sidebar.docs.category.strategy": { - "message": "strategy", - "description": "The label for category strategy in sidebar docs" - }, - "sidebar.docs.category.volume": { - "message": "volume", - "description": "The label for category volume in sidebar docs" - }, - "sidebar.docs.category.Konfig 配置大库": { - "message": "Konfig 配置大库", - "description": "The label for category Konfig 配置大库 in sidebar docs" - }, - "sidebar.docs.category.develop": { - "message": "develop", - "description": "The label for category develop in sidebar docs" - }, - "sidebar.docs.category.构建 Kusion": { - "message": "构建 Kusion", - "description": "The label for category 构建 Kusion in sidebar docs" - }, - "sidebar.docs.category.设计与实现": { - "message": "设计与实现", - "description": "The label for category 设计与实现 in sidebar docs" - }, - "sidebar.docs.category.governance": { - "message": "governance", - "description": "The label for category governance in sidebar docs" - }, - "sidebar.docs.category.开源社区": { - "message": "Open Source Community", - "description": "The label for category 开源社区 in sidebar docs" - }, - "sidebar.docs.category.发版策略": { - "message": "Release Policy", - "description": "The label for category 发版策略 in sidebar docs" - }, - "sidebar.docs.category.贡献指南": { - "message": "Contribution Guide", - "description": "The label for category 贡献指南 in sidebar docs" - }, - "sidebar.docs.category.user_docs": { - "message": "user_docs", - "description": "The label for category user_docs in sidebar docs" - }, - "sidebar.docs.category.简介": { - "message": "Intro", - "description": "The label for category 简介 in sidebar docs" - }, - "sidebar.docs.category.快速开始": { - "message": "Quick Start", - "description": "The label for category 快速开始 in sidebar docs" - }, - "sidebar.docs.category.下载和安装": { - "message": "Download and Install", - "description": "The label for category 下载和安装 in sidebar docs" - }, - "sidebar.docs.category.架构 & 概念": { - "message": "Architecture & Concepts", - "description": "The label for category 架构 & 概念 in sidebar docs" - }, - "sidebar.docs.category.用户指南": { - "message": "User Guide", - "description": "The label for category 用户指南 in sidebar docs" - }, - "sidebar.docs.category.开始 Kusion": { - "message": "Begin Kusion", - "description": "The label for category 开始 Kusion in sidebar docs" - }, - "sidebar.docs.category.Kubernetes": { - "message": "Kubernetes", - "description": "The label for category Kubernetes in sidebar docs" - }, - "sidebar.docs.category.项目组织最佳实践": { - "message": "Project Organization Best Practices", - "description": "The label for category 项目组织最佳实践 in sidebar docs" - }, - "sidebar.docs.category.ArgoCD": { - "message": "ArgoCD", - "description": "The label for category ArgoCD in sidebar docs" - }, - "sidebar.docs.category.敏感信息管理": { - "message": "Information security", - "description": "The label for category 敏感信息管理 in sidebar docs" - }, - "sidebar.user_docs.category.简介": { - "message": "Intro", - "description": "The label for category 简介 in sidebar user_docs" - }, - "sidebar.user_docs.category.快速开始": { - "message": "Quick Start", - "description": "The label for category 快速开始 in sidebar user_docs" - }, - "sidebar.user_docs.category.下载和安装": { - "message": "Download and Install", - "description": "The label for category 下载和安装 in sidebar user_docs" - }, - "sidebar.user_docs.category.架构 & 概念": { - "message": "Architecture & Concepts", - "description": "The label for category 架构 & 概念 in sidebar user_docs" - }, - "sidebar.user_docs.category.用户指南": { - "message": "User Guide", - "description": "The label for category 用户指南 in sidebar user_docs" - }, - "sidebar.user_docs.category.开始 Kusion": { - "message": "Begin Kusion", - "description": "The label for category 开始 Kusion in sidebar user_docs" - }, - "sidebar.user_docs.category.Kubernetes": { - "message": "Kubernetes", - "description": "The label for category Kubernetes in sidebar user_docs" - }, - "sidebar.user_docs.category.项目组织最佳实践": { - "message": "Project Organization Best Practices", - "description": "The label for category 项目组织最佳实践 in sidebar user_docs" - }, - "sidebar.user_docs.category.ArgoCD": { - "message": "ArgoCD", - "description": "The label for category ArgoCD in sidebar user_docs" - }, - "sidebar.user_docs.category.敏感信息管理": { - "message": "Information security", - "description": "The label for category 敏感信息管理 in sidebar user_docs" - }, - "sidebar.reference.category.命令行工具": { - "message": "Command Line Tools", - "description": "The label for category 命令行工具 in sidebar reference" - }, - "sidebar.reference.category.Kusion 工具": { - "message": "Kusion Tools", - "description": "The label for category Kusion 工具 in sidebar reference" - }, - "sidebar.reference.category.KCL 语言工具": { - "message": "KCL Tools", - "description": "The label for category KCL 语言工具 in sidebar reference" - }, - "sidebar.reference.category.OpenAPI 工具": { - "message": "OpenAPI Tools", - "description": "The label for category OpenAPI 工具 in sidebar reference" - }, - "sidebar.reference.category.Kusion 模型库": { - "message": "Kusion Model", - "description": "The label for category Kusion 模型库 in sidebar reference" - }, - "sidebar.reference.category.核心模型库": { - "message": "Code Model", - "description": "The label for category 核心模型库 in sidebar reference" - }, - "sidebar.reference.category.kusion_kubernetes": { - "message": "kusion_kubernetes", - "description": "The label for category kusion_kubernetes in sidebar reference" - }, - "sidebar.reference.category.api": { - "message": "api", - "description": "The label for category api in sidebar reference" - }, - "sidebar.reference.category.admissionregistration": { - "message": "admissionregistration", - "description": "The label for category admissionregistration in sidebar reference" - }, - "sidebar.reference.category.v1": { - "message": "v1", - "description": "The label for category v1 in sidebar reference" - }, - "sidebar.reference.category.apps": { - "message": "apps", - "description": "The label for category apps in sidebar reference" - }, - "sidebar.reference.category.autoscaling": { - "message": "autoscaling", - "description": "The label for category autoscaling in sidebar reference" - }, - "sidebar.reference.category.batch": { - "message": "batch", - "description": "The label for category batch in sidebar reference" - }, - "sidebar.reference.category.v1beta1": { - "message": "v1beta1", - "description": "The label for category v1beta1 in sidebar reference" - }, - "sidebar.reference.category.core": { - "message": "core", - "description": "The label for category core in sidebar reference" - }, - "sidebar.reference.category.networking": { - "message": "networking", - "description": "The label for category networking in sidebar reference" - }, - "sidebar.reference.category.rbac": { - "message": "rbac", - "description": "The label for category rbac in sidebar reference" - }, - "sidebar.reference.category.apimachinery": { - "message": "apimachinery", - "description": "The label for category apimachinery in sidebar reference" - }, - "sidebar.reference.category.apis": { - "message": "apis", - "description": "The label for category apis in sidebar reference" - }, - "sidebar.reference.category.kusion_models": { - "message": "kusion_models", - "description": "The label for category kusion_models in sidebar reference" - }, - "sidebar.reference.category.kube": { - "message": "kube", - "description": "The label for category kube in sidebar reference" - }, - "sidebar.reference.category.frontend": { - "message": "frontend", - "description": "The label for category frontend in sidebar reference" - }, - "sidebar.reference.category.common": { - "message": "common", - "description": "The label for category common in sidebar reference" - }, - "sidebar.reference.category.configmap": { - "message": "configmap", - "description": "The label for category configmap in sidebar reference" - }, - "sidebar.reference.category.container": { - "message": "container", - "description": "The label for category container in sidebar reference" - }, - "sidebar.reference.category.env": { - "message": "env", - "description": "The label for category env in sidebar reference" - }, - "sidebar.reference.category.lifecycle": { - "message": "lifecycle", - "description": "The label for category lifecycle in sidebar reference" - }, - "sidebar.reference.category.port": { - "message": "port", - "description": "The label for category port in sidebar reference" - }, - "sidebar.reference.category.probe": { - "message": "probe", - "description": "The label for category probe in sidebar reference" - }, - "sidebar.reference.category.ingress": { - "message": "ingress", - "description": "The label for category ingress in sidebar reference" - }, - "sidebar.reference.category.resource": { - "message": "resource", - "description": "The label for category resource in sidebar reference" - }, - "sidebar.reference.category.secret": { - "message": "secret", - "description": "The label for category secret in sidebar reference" - }, - "sidebar.reference.category.service": { - "message": "service", - "description": "The label for category service in sidebar reference" - }, - "sidebar.reference.category.serviceaccount": { - "message": "serviceaccount", - "description": "The label for category serviceaccount in sidebar reference" - }, - "sidebar.reference.category.sidecar": { - "message": "sidecar", - "description": "The label for category sidecar in sidebar reference" - }, - "sidebar.reference.category.strategy": { - "message": "strategy", - "description": "The label for category strategy in sidebar reference" - }, - "sidebar.reference.category.volume": { - "message": "volume", - "description": "The label for category volume in sidebar reference" - }, - "sidebar.reference.category.Konfig 配置大库": { - "message": "Konfig Lib", - "description": "The label for category Konfig 配置大库 in sidebar reference" - }, - "sidebar.develop.category.构建 Kusion": { - "message": "Build Kusion", - "description": "The label for category 构建 Kusion in sidebar develop" - }, - "sidebar.develop.category.设计与实现": { - "message": "Design and Implementation", - "description": "The label for category 设计与实现 in sidebar develop" - }, - "sidebar.governance.category.开源社区": { - "message": "Open Source Community", - "description": "The label for category 开源社区 in sidebar governance" - }, - "sidebar.governance.category.发版策略": { - "message": "Release Policy", - "description": "The label for category 发版策略 in sidebar governance" - }, - "sidebar.governance.category.贡献指南": { - "message": "Contribution Guide", - "description": "The label for category 贡献指南 in sidebar governance" - }, - "sidebar.governance.link.changelog": { - "message": "changelog", - "description": "The label for link changelog in sidebar governance, linking to /changelog" - }, - "sidebar.docs.category.KCL": { - "message": "KCL", - "description": "The label for category KCL in sidebar docs" - }, - "sidebar.docs.category.Code Lab": { - "message": "Code Lab", - "description": "The label for category Code Lab in sidebar docs" - }, - "sidebar.docs.category.Spec": { - "message": "Spec", - "description": "The label for category Spec in sidebar docs" - }, - "sidebar.docs.category.Errors and Warnings": { - "message": "Errors and Warnings", - "description": "The label for category Errors and Warnings in sidebar docs" - }, - "sidebar.docs.category.Multi-Language": { - "message": "Multi-Language", - "description": "The label for category Multi-Language in sidebar docs" - }, - "sidebar.docs.category.Advanced": { - "message": "Advanced", - "description": "The label for category Advanced in sidebar docs" - }, - "sidebar.docs.category.Plugin System": { - "message": "Plugin System", - "description": "The label for category Plugin System in sidebar docs" - }, - "sidebar.reference.category.KCL": { - "message": "KCL", - "description": "The label for category KCL in sidebar reference" - }, - "sidebar.reference.category.Code Lab": { - "message": "Code Lab", - "description": "The label for category Code Lab in sidebar reference" - }, - "sidebar.reference.category.Spec": { - "message": "Spec", - "description": "The label for category Spec in sidebar reference" - }, - "sidebar.reference.category.Errors and Warnings": { - "message": "Errors and Warnings", - "description": "The label for category Errors and Warnings in sidebar reference" - }, - "sidebar.reference.category.Multi-Language": { - "message": "Multi-Language", - "description": "The label for category Multi-Language in sidebar reference" - }, - "sidebar.reference.category.Advanced": { - "message": "Advanced", - "description": "The label for category Advanced in sidebar reference" - }, - "sidebar.reference.category.Plugin System": { - "message": "Plugin System", - "description": "The label for category Plugin System in sidebar reference" - }, - "sidebar.docs.category.FAQ": { - "message": "FAQ", - "description": "The label for category FAQ in sidebar docs" - }, - "sidebar.user_docs.category.FAQ": { - "message": "FAQ", - "description": "The label for category FAQ in sidebar user_docs" - }, - "sidebar.docs.category. System Module": { - "message": " System Module", - "description": "The label for category System Module in sidebar docs" - }, - "sidebar.reference.category. System Module": { - "message": " System Module", - "description": "The label for category System Module in sidebar reference" - }, - "sidebar.docs.category.Events": { - "message": "Events", - "description": "The label for category Events in sidebar docs" - }, - "sidebar.docs.category.2022": { - "message": "2022", - "description": "The label for category 2022 in sidebar docs" - }, - "sidebar.docs.category.Reference": { - "message": "Reference", - "description": "The label for category Reference in sidebar docs" - }, - "sidebar.docs.category.Command Line Tools": { - "message": "Command Line Tools", - "description": "The label for category Command Line Tools in sidebar docs" - }, - "sidebar.docs.category.Kusion Tools": { - "message": "Kusion Tools", - "description": "The label for category Kusion Tools in sidebar docs" - }, - "sidebar.docs.category.KCL Tools": { - "message": "KCL Tools", - "description": "The label for category KCL Tools in sidebar docs" - }, - "sidebar.docs.category.OpenAPI Tools": { - "message": "OpenAPI Tools", - "description": "The label for category OpenAPI Tools in sidebar docs" - }, - "sidebar.docs.category.Use Case": { - "message": "Use Case", - "description": "The label for category Use Case in sidebar docs" - }, - "sidebar.docs.category.Kusion Model Library": { - "message": "Kusion Model Library", - "description": "The label for category Kusion Model Library in sidebar docs" - }, - "sidebar.docs.category.Core Model Library": { - "message": "Core Model Library", - "description": "The label for category Core Model Library in sidebar docs" - }, - "sidebar.docs.category.Konfig": { - "message": "Konfig", - "description": "The label for category Konfig in sidebar docs" - }, - "sidebar.docs.category.Build Kusion": { - "message": "Build Kusion", - "description": "The label for category Build Kusion in sidebar docs" - }, - "sidebar.docs.category.Design and Implementation": { - "message": "Design and Implementation", - "description": "The label for category Design and Implementation in sidebar docs" - }, - "sidebar.docs.category.Community": { - "message": "Community", - "description": "The label for category Community in sidebar docs" - }, - "sidebar.docs.category.Release Policy": { - "message": "Release Policy", - "description": "The label for category Release Policy in sidebar docs" - }, - "sidebar.docs.category.Contribution Guide": { - "message": "Contribution Guide", - "description": "The label for category Contribution Guide in sidebar docs" - }, - "sidebar.docs.category.Introduction": { - "message": "Introduction", - "description": "The label for category Introduction in sidebar docs" - }, - "sidebar.docs.category.Get Started": { - "message": "Get Started", - "description": "The label for category Get Started in sidebar docs" - }, - "sidebar.docs.category.Download & Install": { - "message": "Download & Install", - "description": "The label for category Download & Install in sidebar docs" - }, - "sidebar.docs.category.Architecture & Concepts": { - "message": "Architecture & Concepts", - "description": "The label for category Architecture & Concepts in sidebar docs" - }, - "sidebar.docs.category.User Guide": { - "message": "User Guide", - "description": "The label for category User Guide in sidebar docs" - }, - "sidebar.docs.category.Adopting KusionStack": { - "message": "Adopting KusionStack", - "description": "The label for category Adopting KusionStack in sidebar docs" - }, - "sidebar.docs.category.Project Best Practices": { - "message": "Project Best Practices", - "description": "The label for category Project Best Practices in sidebar docs" - }, - "sidebar.docs.category.Manage Sensitive Information": { - "message": "Manage Sensitive Information", - "description": "The label for category Manage Sensitive Information in sidebar docs" - }, - "sidebar.user_docs.category.Introduction": { - "message": "Introduction", - "description": "The label for category Introduction in sidebar user_docs" - }, - "sidebar.user_docs.category.Get Started": { - "message": "Get Started", - "description": "The label for category Get Started in sidebar user_docs" - }, - "sidebar.user_docs.category.Download & Install": { - "message": "Download & Install", - "description": "The label for category Download & Install in sidebar user_docs" - }, - "sidebar.user_docs.category.Architecture & Concepts": { - "message": "Architecture & Concepts", - "description": "The label for category Architecture & Concepts in sidebar user_docs" - }, - "sidebar.user_docs.category.User Guide": { - "message": "User Guide", - "description": "The label for category User Guide in sidebar user_docs" - }, - "sidebar.user_docs.category.Adopting KusionStack": { - "message": "Adopting KusionStack", - "description": "The label for category Adopting KusionStack in sidebar user_docs" - }, - "sidebar.user_docs.category.Project Best Practices": { - "message": "Project Best Practices", - "description": "The label for category Project Best Practices in sidebar user_docs" - }, - "sidebar.user_docs.category.Manage Sensitive Information": { - "message": "Manage Sensitive Information", - "description": "The label for category Manage Sensitive Information in sidebar user_docs" - }, - "sidebar.reference.category.Command Line Tools": { - "message": "Command Line Tools", - "description": "The label for category Command Line Tools in sidebar reference" - }, - "sidebar.reference.category.Kusion Tools": { - "message": "Kusion Tools", - "description": "The label for category Kusion Tools in sidebar reference" - }, - "sidebar.reference.category.KCL Tools": { - "message": "KCL Tools", - "description": "The label for category KCL Tools in sidebar reference" - }, - "sidebar.reference.category.OpenAPI Tools": { - "message": "OpenAPI Tools", - "description": "The label for category OpenAPI Tools in sidebar reference" - }, - "sidebar.reference.category.Use Case": { - "message": "Use Case", - "description": "The label for category Use Case in sidebar reference" - }, - "sidebar.reference.category.Kusion Model Library": { - "message": "Kusion Model Library", - "description": "The label for category Kusion Model Library in sidebar reference" - }, - "sidebar.reference.category.Core Model Library": { - "message": "Core Model Library", - "description": "The label for category Core Model Library in sidebar reference" - }, - "sidebar.reference.category.Konfig": { - "message": "Konfig", - "description": "The label for category Konfig in sidebar reference" - }, - "sidebar.develop.category.Build Kusion": { - "message": "Build Kusion", - "description": "The label for category Build Kusion in sidebar develop" - }, - "sidebar.develop.category.Design and Implementation": { - "message": "Design and Implementation", - "description": "The label for category Design and Implementation in sidebar develop" - }, - "sidebar.governance.category.Community": { - "message": "Community", - "description": "The label for category Community in sidebar governance" - }, - "sidebar.governance.category.Release Policy": { - "message": "Release Policy", - "description": "The label for category Release Policy in sidebar governance" - }, - "sidebar.governance.category.Contribution Guide": { - "message": "Contribution Guide", - "description": "The label for category Contribution Guide in sidebar governance" - }, - "sidebar.events.category.2022": { - "message": "2022", - "description": "The label for category 2022 in sidebar events" - } -} diff --git a/i18n/en/docusaurus-theme-classic/footer.json b/i18n/en/docusaurus-theme-classic/footer.json index 526ff251..7db819fc 100644 --- a/i18n/en/docusaurus-theme-classic/footer.json +++ b/i18n/en/docusaurus-theme-classic/footer.json @@ -11,13 +11,21 @@ "message": "More", "description": "The title of the footer links column with title=More in the footer" }, - "link.item.label.Introduction": { - "message": "Introduction", - "description": "The label of footer link with label=Introduction linking to /docs/user_docs/intro/kusion-intro" + "link.item.label.Kusion": { + "message": "Kusion", + "description": "The label of footer link with label=Kusion linking to /docs" + }, + "link.item.label.ControllerMesh": { + "message": "ControllerMesh", + "description": "The label of footer link with label=ControllerMesh linking to /ctrlmesh/intro/" + }, + "link.item.label.Karpor": { + "message": "Karpor", + "description": "The label of footer link with label=Karpor linking to /karpor/" }, "link.item.label.Blog": { "message": "Blog", - "description": "The label of footer link with label=Blog linking to /blog" + "description": "The label of footer link with label=Blog linking to https://blog.kusionstack.io/" }, "link.item.label.Github": { "message": "Github", @@ -27,16 +35,16 @@ "message": "Slack", "description": "The label of footer link with label=Slack linking to https://join.slack.com/t/kusionstack/shared_invite/zt-19lqcc3a9-_kTNwagaT5qwBE~my5Lnxg" }, - "link.item.label.FAQ": { - "message": "FAQ", - "description": "The label of footer link with label=FAQ linking to /docs/user_docs/support" - }, - "link.item.label.Changelog": { - "message": "Changelog", - "description": "The label of footer link with label=Changelog linking to /changelog" + "link.item.label.KCL": { + "message": "KCL", + "description": "The label of footer link with label=KCL linking to https://kcl-lang.io" }, "copyright": { - "message": "Copyright © 2022 KusionStack Authors", + "message": "Copyright © 2024 KusionStack Authors. The Linux Foundation has registered trademarks and uses trademarks. For a list of trademarks of The Linux Foundation, please see our Trademark Usage page.", "description": "The footer copyright" + }, + "logo.alt": { + "message": "AntGroup Open Source Logo", + "description": "The alt text of footer logo" } } diff --git a/i18n/en/docusaurus-theme-classic/navbar.json b/i18n/en/docusaurus-theme-classic/navbar.json index 5d0a3c75..a31cd796 100644 --- a/i18n/en/docusaurus-theme-classic/navbar.json +++ b/i18n/en/docusaurus-theme-classic/navbar.json @@ -3,28 +3,32 @@ "message": "KusionStack", "description": "The title in the navbar" }, - "item.label.UserDoc": { - "message": "Get Started", - "description": "Navbar item with label UserDoc" + "logo.alt": { + "message": "KusionStack Icon", + "description": "The alt text of navbar logo" }, - "item.label.Reference": { - "message": "Reference", - "description": "Navbar item with label Reference" + "item.label.Kusion": { + "message": "Kusion", + "description": "Navbar item with label Kusion" }, - "item.label.DevDoc": { - "message": "Developing", - "description": "Navbar item with label DevDoc" + "item.label.Operating": { + "message": "Operating", + "description": "Navbar item with label Operating" }, - "item.label.Governance": { - "message": "Governance", - "description": "Navbar item with label Governance" + "item.label.Ctrlmesh": { + "message": "Ctrlmesh", + "description": "Navbar item with label Ctrlmesh" + }, + "item.label.Karpor": { + "message": "Karpor", + "description": "Navbar item with label Karpor" + }, + "item.label.Community": { + "message": "Community", + "description": "Navbar item with label Community" }, "item.label.Blog": { "message": "Blog", "description": "Navbar item with label Blog" - }, - "item.label.Events": { - "message": "Events", - "description": "Navbar item with label Events" } } diff --git a/i18n/zh-CN/changelog-plugin/options.json b/i18n/zh-CN/changelog-plugin/options.json deleted file mode 100644 index 1c5c66c0..00000000 --- a/i18n/zh-CN/changelog-plugin/options.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "title": { - "message": "KusionStack 更新日志", - "description": "The title for the blog used in SEO" - }, - "description": { - "message": "Keep yourself up-to-date about new features in every release", - "description": "The description for the blog used in SEO" - }, - "sidebar.title": { - "message": "更新日志", - "description": "The label for the left sidebar" - } -} diff --git a/i18n/zh-CN/code.json b/i18n/zh-CN/code.json deleted file mode 100644 index b6f7d3fd..00000000 --- a/i18n/zh-CN/code.json +++ /dev/null @@ -1,273 +0,0 @@ -{ - "theme.ErrorPageContent.title": { - "message": "This page crashed.", - "description": "The title of the fallback page when the page crashed" - }, - "theme.ErrorPageContent.tryAgain": { - "message": "Try again", - "description": "The label of the button to try again when the page crashed" - }, - "theme.NotFound.title": { - "message": "Page Not Found", - "description": "The title of the 404 page" - }, - "theme.NotFound.p1": { - "message": "We could not find what you were looking for.", - "description": "The first paragraph of the 404 page" - }, - "theme.NotFound.p2": { - "message": "Please contact the owner of the site that linked you to the original URL and let them know their link is broken.", - "description": "The 2nd paragraph of the 404 page" - }, - "theme.AnnouncementBar.closeButtonAriaLabel": { - "message": "Close", - "description": "The ARIA label for close button of announcement bar" - }, - "theme.BackToTopButton.buttonAriaLabel": { - "message": "Scroll back to top", - "description": "The ARIA label for the back to top button" - }, - "theme.blog.archive.title": { - "message": "Archive", - "description": "The page & hero title of the blog archive page" - }, - "theme.blog.archive.description": { - "message": "Archive", - "description": "The page & hero description of the blog archive page" - }, - "theme.blog.paginator.navAriaLabel": { - "message": "Blog list page navigation", - "description": "The ARIA label for the blog pagination" - }, - "theme.blog.paginator.newerEntries": { - "message": "Newer Entries", - "description": "The label used to navigate to the newer blog posts page (previous page)" - }, - "theme.blog.paginator.olderEntries": { - "message": "Older Entries", - "description": "The label used to navigate to the older blog posts page (next page)" - }, - "theme.blog.post.readingTime.plurals": { - "message": "One min read|{readingTime} min read", - "description": "Pluralized label for \"{readingTime} min read\". Use as much plural forms (separated by \"|\") as your language support (see https://www.unicode.org/cldr/cldr-aux/charts/34/supplemental/language_plural_rules.html)" - }, - "theme.blog.post.readMoreLabel": { - "message": "Read more about {title}", - "description": "The ARIA label for the link to full blog posts from excerpts" - }, - "theme.blog.post.readMore": { - "message": "Read More", - "description": "The label used in blog post item excerpts to link to full blog posts" - }, - "theme.blog.post.paginator.navAriaLabel": { - "message": "Blog post page navigation", - "description": "The ARIA label for the blog posts pagination" - }, - "theme.blog.post.paginator.newerPost": { - "message": "Newer Post", - "description": "The blog post button label to navigate to the newer/previous post" - }, - "theme.blog.post.paginator.olderPost": { - "message": "Older Post", - "description": "The blog post button label to navigate to the older/next post" - }, - "theme.blog.sidebar.navAriaLabel": { - "message": "Blog recent posts navigation", - "description": "The ARIA label for recent posts in the blog sidebar" - }, - "theme.blog.post.plurals": { - "message": "One post|{count} posts", - "description": "Pluralized label for \"{count} posts\". Use as much plural forms (separated by \"|\") as your language support (see https://www.unicode.org/cldr/cldr-aux/charts/34/supplemental/language_plural_rules.html)" - }, - "theme.blog.tagTitle": { - "message": "{nPosts} tagged with \"{tagName}\"", - "description": "The title of the page for a blog tag" - }, - "theme.tags.tagsPageLink": { - "message": "View All Tags", - "description": "The label of the link targeting the tag list page" - }, - "theme.CodeBlock.copyButtonAriaLabel": { - "message": "Copy code to clipboard", - "description": "The ARIA label for copy code blocks button" - }, - "theme.CodeBlock.copied": { - "message": "Copied", - "description": "The copied button label on code blocks" - }, - "theme.CodeBlock.copy": { - "message": "Copy", - "description": "The copy button label on code blocks" - }, - "theme.colorToggle.ariaLabel": { - "message": "Switch between dark and light mode (currently {mode})", - "description": "The ARIA label for the navbar color mode toggle" - }, - "theme.colorToggle.ariaLabel.mode.dark": { - "message": "dark mode", - "description": "The name for the dark color mode" - }, - "theme.colorToggle.ariaLabel.mode.light": { - "message": "light mode", - "description": "The name for the light color mode" - }, - "theme.docs.DocCard.categoryDescription": { - "message": "{count} items", - "description": "The default description for a category card in the generated index about how many items this category includes" - }, - "theme.docs.sidebar.expandButtonTitle": { - "message": "Expand sidebar", - "description": "The ARIA label and title attribute for expand button of doc sidebar" - }, - "theme.docs.sidebar.expandButtonAriaLabel": { - "message": "Expand sidebar", - "description": "The ARIA label and title attribute for expand button of doc sidebar" - }, - "theme.docs.paginator.navAriaLabel": { - "message": "Docs pages navigation", - "description": "The ARIA label for the docs pagination" - }, - "theme.docs.paginator.previous": { - "message": "Previous", - "description": "The label used to navigate to the previous doc" - }, - "theme.docs.paginator.next": { - "message": "Next", - "description": "The label used to navigate to the next doc" - }, - "theme.docs.sidebar.collapseButtonTitle": { - "message": "Collapse sidebar", - "description": "The title attribute for collapse button of doc sidebar" - }, - "theme.docs.sidebar.collapseButtonAriaLabel": { - "message": "Collapse sidebar", - "description": "The title attribute for collapse button of doc sidebar" - }, - "theme.DocSidebarItem.toggleCollapsedCategoryAriaLabel": { - "message": "Toggle the collapsible sidebar category '{label}'", - "description": "The ARIA label to toggle the collapsible sidebar category" - }, - "theme.docs.tagDocListPageTitle.nDocsTagged": { - "message": "One doc tagged|{count} docs tagged", - "description": "Pluralized label for \"{count} docs tagged\". Use as much plural forms (separated by \"|\") as your language support (see https://www.unicode.org/cldr/cldr-aux/charts/34/supplemental/language_plural_rules.html)" - }, - "theme.docs.tagDocListPageTitle": { - "message": "{nDocsTagged} with \"{tagName}\"", - "description": "The title of the page for a docs tag" - }, - "theme.docs.versionBadge.label": { - "message": "Version: {versionLabel}" - }, - "theme.docs.versions.unreleasedVersionLabel": { - "message": "This is unreleased documentation for {siteTitle} {versionLabel} version.", - "description": "The label used to tell the user that he's browsing an unreleased doc version" - }, - "theme.docs.versions.unmaintainedVersionLabel": { - "message": "This is documentation for {siteTitle} {versionLabel}, which is no longer actively maintained.", - "description": "The label used to tell the user that he's browsing an unmaintained doc version" - }, - "theme.docs.versions.latestVersionSuggestionLabel": { - "message": "For up-to-date documentation, see the {latestVersionLink} ({versionLabel}).", - "description": "The label used to tell the user to check the latest version" - }, - "theme.docs.versions.latestVersionLinkLabel": { - "message": "latest version", - "description": "The label used for the latest version suggestion link label" - }, - "theme.common.editThisPage": { - "message": "Edit this page", - "description": "The link label to edit the current page" - }, - "theme.common.headingLinkTitle": { - "message": "Direct link to heading", - "description": "Title for link to heading" - }, - "theme.lastUpdated.atDate": { - "message": " on {date}", - "description": "The words used to describe on which date a page has been last updated" - }, - "theme.lastUpdated.byUser": { - "message": " by {user}", - "description": "The words used to describe by who the page has been last updated" - }, - "theme.lastUpdated.lastUpdatedAtBy": { - "message": "Last updated{atDate}{byUser}", - "description": "The sentence used to display when a page has been last updated, and by who" - }, - "theme.navbar.mobileSidebarSecondaryMenu.backButtonLabel": { - "message": "← Back to main menu", - "description": "The label of the back button to return to main menu, inside the mobile navbar sidebar secondary menu (notably used to display the docs sidebar)" - }, - "theme.navbar.mobileVersionsDropdown.label": { - "message": "Versions", - "description": "The label for the navbar versions dropdown on mobile view" - }, - "theme.common.skipToMainContent": { - "message": "Skip to main content", - "description": "The skip to content label used for accessibility, allowing to rapidly navigate to main content with keyboard tab/enter navigation" - }, - "theme.TOCCollapsible.toggleButtonLabel": { - "message": "On this page", - "description": "The label used by the button on the collapsible TOC component" - }, - "theme.tags.tagsListLabel": { - "message": "Tags:", - "description": "The label alongside a tag list" - }, - "theme.navbar.mobileLanguageDropdown.label": { - "message": "Languages", - "description": "The label for the mobile language switcher dropdown" - }, - "theme.SearchBar.seeAll": { - "message": "See all {count} results" - }, - "theme.SearchBar.label": { - "message": "Search", - "description": "The ARIA label and placeholder for search button" - }, - "theme.SearchPage.documentsFound.plurals": { - "message": "One document found|{count} documents found", - "description": "Pluralized label for \"{count} documents found\". Use as much plural forms (separated by \"|\") as your language support (see https://www.unicode.org/cldr/cldr-aux/charts/34/supplemental/language_plural_rules.html)" - }, - "theme.SearchPage.existingResultsTitle": { - "message": "Search results for \"{query}\"", - "description": "The search page title for non-empty query" - }, - "theme.SearchPage.emptyResultsTitle": { - "message": "Search the documentation", - "description": "The search page title for empty query" - }, - "theme.SearchPage.inputPlaceholder": { - "message": "Type your search here", - "description": "The placeholder for search page input" - }, - "theme.SearchPage.inputLabel": { - "message": "Search", - "description": "The ARIA label for search page input" - }, - "theme.SearchPage.algoliaLabel": { - "message": "Search by Algolia", - "description": "The ARIA label for Algolia mention" - }, - "theme.SearchPage.noResultsText": { - "message": "No results were found", - "description": "The paragraph for empty search result" - }, - "theme.SearchPage.fetchingNewResults": { - "message": "Fetching new results...", - "description": "The paragraph for fetching new search results" - }, - "theme.tags.tagsPageTitle": { - "message": "Tags", - "description": "The title of the tag list page" - }, - "Quick Start - 15min ⏱️": { - "message": "快速开始 - 15分钟 ⏱️" - }, - "homepage.title": { - "message": "KusionStack" - }, - "homepage.tagline": { - "message": "可编程配置技术栈" - } -} diff --git a/i18n/zh-CN/docusaurus-plugin-content-blog/2021-05-18-kusion-intro/index.md b/i18n/zh-CN/docusaurus-plugin-content-blog/2021-05-18-kusion-intro/index.md deleted file mode 100644 index 5549038d..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-blog/2021-05-18-kusion-intro/index.md +++ /dev/null @@ -1,254 +0,0 @@ ---- -slug: 2021-kusion-intro -title: 云原生开放运维体系探索实践 -authors: - name: 朵晓东 - title: Kusion 项目负责人 -tags: [kusion] ---- - -*本文是云原生开放协同技术探索与实践一阶段的总结和综述。* - -蚂蚁基础技术在过去3年多以来持续、深入推进全面的云原生化技术演进,我们将在线、离线计算资源装进了一台计算机,将服务体系通过 mesh 的思路和技术手段进行了下沉解耦,可以说比较充分的拥抱了云原生技术,并获取了其带来的技术红利。 - -当完成了资源、服务的云原生化,我们发现在云原生基础能力之上的运维体系与云原生技术开放、共享的思路有较大的距离,在技术体系上也与云原生技术声明式、白盒化的思路相悖,同时由于缺少匹配的技术支撑,历史包袱等问题也成为了云原生运维难以真正代际演进的障碍。今天我要介绍的就是蚂蚁在这样的背景下在云原生运维方向进行的技术探索和实践。 - -## 1. 规模化云原生运维探索 - -我们先来回顾一下在蚂蚁真实的实践方式和面对的问题。首先,我们来看看蚂蚁践行多年的经典运维中台,这类运维平台一般包括了控制器、业务模型、编排引擎、原子任务及管道,在蚂蚁这样的平台是一系列服务的集合,他们较好的满足了集中式、标准化、低变更频率的应用发布及运维需求。但这种模式在实践中也存在着明显的不足。 - -首先对于非标准应用、应用个性化需求、高成本需求、非紧急需求、技改类需求,往往无法较好的满足。在蚂蚁的实践中,非标运维需求、对核心应用模型及运维模型冲击较大的高成本改造需求、大量基础能力或运维功能的透出需求等长期无法得到较好的满足,需求往往是合理的,是难以获得足够的优先级执行落地。在研发阶段,运维平台长期积累了高复杂度的业务逻辑,修改测试涉及跨系统的长改造链路,同时基础能力的透出、运维能力的产品化依赖前端、服务端研发资源。这些问题使得运维平台研发日渐吃力,特别是在产品 GUI、业务模型、编排引擎等变更热点上,受限于扩展机制能力不足,内部实践中甚至出现过线上不断修改代码、发布服务以满足需求的情况。平台上线后,统一的质保和线上全链路功能验证同样面对较大的压力。对于最终的使用者,命令式按钮背后的黑盒计算透明度低,审计难,结果难预测,同时激情操作、操作界面不熟悉等问题也一直影响着线上的稳定性。这些问题长期存在,我们寄希望于代际的技术演进来解决这些问题。 - -![](/img/blog/2021-05-18-kusion-intro/01.png) - -当云原生基础服务逐渐稳定后,对于自身场景不在运维平台管理范围内的应用,研发同学自发的借助云原生社区工具链解决问题。基于 Kubernetes 生态高度开放、高度可配置的特点,研发者可以自助、灵活、透明的声明式应用运行、运维需求,以应用粒度完成发布、运维操作。 - -用户通过 kustomize 等社区技术缩短了对接基础设施的路径,并通过如 velocity 等文本模板技术部分解决了静态 YAML 文件在较多变量时维度爆炸的问题,解决了默认值设定的问题,同时通过 code review 的方式进行多因子变更及评审。由于 Kubernetes 及其生态提供了面向资源、服务、运维、安全的横向能力,使得这种简单的方式可有很好的普遍性和适用性,通过对不同的 Kubernetes 集群 “播放” 这些数据即可完成对基础设施的变更,本质上是一种声明数据的流转。面向 git 仓库的研发方式和 gitops 流程支持对运维产品研发资源的诉求较低,往往可以比较简单的搭建起来,不强依赖产品研发资源投入。相比经典运维中台,这些好处清晰明确,但从工程视角缺点也非常明显。 - -![](/img/blog/2021-05-18-kusion-intro/02.png) - - -首先 Kubernetes API 的设计较为复杂,仅是 Kubernetes 原生提供的 low level API 就暴露了 500 多种模型,2000 多字段,场景上几乎涵盖了基础设施应用层的方方面面,即使是专业同学也很难理解所有细节。其次这种方式的工程化程度很低,违反 DRY 原则,违反各团队职责能力高内聚低耦合的原则,即使在有一定的工具支持的情况下,在内部的典型案例中一个多应用的 infra 项目仍然维护了多达 5 万多行 YAML,同时由于团队边界造成的多个割裂的平台,用户需在多个平台间切换,每个平台的操作方式各异,加上跳板机黑屏命令,完成一次完整的发布需要 2 天时间。 - -由于低工程化程度的问题,各团队间协同依赖人肉拉群同步,最终 YAML 由多个团队定义的部分组合而成,其中一大部分属于 Kubernetes 及运维平台团队的定义,这些内容需要持续跟踪同步避免腐化,长期维护成本高。 - -## 2. KUSION: 云原生开放协同技术栈 - -以上两种模式各有利弊,优势和问题都比较清晰。那么能不能既要也要呢,能不能在继承经典运维平台优势的情况下,充分利用云原生技术带来的红利,打造一个开放、透明、可协同的运维体系? - -带着这样的问题,我们进行了探索和实践,并创建了基于基础设施代码化思路的云原生可编程技术栈 Kusion。 - -大家都知道 Kubernetes 提供了声明式的 low level API,提倡其上生态能力通过 CRD 扩展的方式定义并提供服务,整个生态遵循统一的 API 规范约束,复用 API 技术和工具。Kubernetes API 规范提倡 low level API 对象松耦合、可复用,以支持 high level API 由 low level API “组合” 而成。Kubernetes 自身提供了利于开源传播的极简方案,并不包括 API 之上的技术和方案。 - -回到云原生技术的本源,我们回看了 Kubernetes 前身 Borg 的应用技术生态。如下图示,在 BorgMaster 之上,Borg 团队研发了 Borg 接入三件套,即 BCL(Borg Configuration Language),Command-line tools,以及相应的 web service。用户可以通过 BCL 声明式编写需求,通过 Command-line tools 将 BCL 文件执行到 Borg 集群,并通过 web GUI 视图查看任务细节。经过大量的调研,我们了解到 Google 内部的运维能力及产品生态、质量技术生态都依赖这三件套构建而成,在内部也进行了多年的迭代演进。 - -![](/img/blog/2021-05-18-kusion-intro/03.png) - - -这给了我们启发,今天我们有了容器技术、服务体系,有了大量用户和差异化的需求,有了一定数量的自动化运维平台,我们希望能通过云原生专用的语言和工具来链接 Kubernetes 生态、各个运维平台以及大量的用户,通过唯一事实定义消除运维平台孤岛,完成云原生基础设施在应用、运维层面的代际演进,达到 “Fusion on Kubernetes” 的目标。 - -带着这样的目标,我们持续地进行做技术探索和实践,目前已经形成了 Kusion 技术栈,并在蚂蚁的生产实践中进行应用。 - -![](/img/blog/2021-05-18-kusion-intro/04.png) - -Kusion 技术栈基于这样的基础能力而工作,包括如下组成部分: - -- 云原生配置策略专用语言 KCL (Kusion Configuration Language) -- KCL 解释器及其 Plugin 扩展机制 -- KCL 研发工具集: Lint, Format, Doc-Gen,IDE Plugin(IDEA, VsCode) -- Kusion Kubernetes 生态工具: OpenAPI-tool, KusionCtl(Srv) -- Konfig 配置代码库,其中包括平台侧及用户侧代码 -- OCMP (Open CloudNative Management Practice) 实践说明书 - -![](/img/blog/2021-05-18-kusion-intro/05.png) - - -Kusion 工作在基础设施之上,作为抽象及管理层的技术支撑服务上层应用。不同角色的用户协同使用 Kubernetes 生态提供的横向能力,通过声明式、意图导向的定义方式使用基础设施,在场景上支持典型的云原生场景,也服务了一些经典运维场景,完成了一阶段的建设工作。目前接入 Kusion 的产品包括 IaC 发布、运维产品 InfraForm、建站产品 SiteBuilder、快恢平台等。通过将 Kusion 集成在自动化系统中,我们尽可能的调和黑盒命令式自动化系统与开放声明式配置系统,使其发挥各自的优势。 - -![](/img/blog/2021-05-18-kusion-intro/06.png) - - -## 3. 集成 & 落地 - -新的技术体系首先面临着落地的问题,我们先来看看 Kusion 在集成落地方面的思考和做法。 - -从整体思路上,我们从经典运维系统中的变更热点业务层、编排层着手,以 KCL 声明式配置块的方式外置编写对应逻辑,并被控制器自动化集成。 - -这种思路是有迹可循的,我们来看看同行的经验,以雷神山医院的建设现场为例,我们可以看到现场大量的组件是预制品,经过了测试、验证、交付后由现场的塔吊负责组装。这些组件需要良好的品控,需要内置水管、电线等“能力”,否则即使组装也无法有效工作,同时需要给业务侧一定的自定义配置空间,还要易于组装及自动化以提升现场装配效率。实际上我们面对的大规模运维活动与这样的现场有类似之处,现代基建的高效手段非常值得我们学习借鉴。 - -![](/img/blog/2021-05-18-kusion-intro/07.png) - - -对应我们的实际场景,我们基于 KCL 的工作方式需要满足以下要求: - -- **可分工、可协同:**组件制作、验收、交付、使用可以基于角色需要合理分工协同,满足软件供应链的需求 -- **外置、预制的组件:**组件独立于自动化系统存在,预制的组件在交付前需要经过充分的测试验证 -- **内置资源、服务、身份等要素:**组件仅向用户暴露有效的业务信息,同时内置云原生、可信的逻辑 -- **易于业务定义:**组件需要提供一定的自定义配置能力 -- **易于自动化:**支持自动化组装,自动化对组件进行“增删改查” - -接下来,我们来看看基于 Kusion 工作的典型流程,此处有一定的抽象和简化。 - -前文提到 Kubernetes API 提供了基于 OpenAPI 的 low level API 及扩展机制,基于高内聚、低耦合、易复用、易组装的原则设计,以 Resource、Custome Resource 的方式存在。此外,Kubernetes API 提供了大量的命令以操作容器、Pod 等资源。对于 SDN、Mesh,或是其他的能力扩展都是基于这样的整体约束和方式,大都提供了资源定义或命令操作。 - -基于这样的基础,在蚂蚁的实践中我们将整体的工作流程分为 4 个步骤: - -- **代码化:** 对于资源定义,基于 OpenAPI Model/CRD 定义生成 KCL 结构体;对于命令操作,编写对应的声明式 KCL 结构体。这些结构体对应到平台侧原子能力定义。 -- **抽象化:** 平台侧 PaaS 平台同学基于这些原子声明式编写抽象、组装,并定义出面向用户的前端结构体,从功能场景上涵盖了 AppConfiguration, Action / Ops, Locality / Topology, SA / RBAC, Node / Quota 等场景,并提供了简化编写的 Template 集合。以 AppConfiguration 为例,我们提供了SigmaAppConfiguration、SigmaJobConfiguration 分别对应于服务型和任务型应用定义,此外针对 SOFA 应用的特征提供了 SofaAppConfiguration。这些前端结构体作为 Kusion Models 的“接口层”存在,受限于业务进度等原因各场景积累的水位不同,仍需要长期的积累打磨。 -- **配置化:** 应用侧研发或 SRE 同学基于这些前端结构体描述应用需求。用户可以通过结构体声明的方式为应用定义配置基线及不同环境的配置。在大部分情况下,用户仅需要进行结构体声明,即一些 key-value 对。对于有复杂需求的场景,用户可以进行逻辑编写或通过继承结构体的方式组织代码逻辑 -- **自动化:** 当应用侧配置完成后,实际上已经定义好了可用的“组件”,具备了自动化的条件。平台侧控制器可以通过 KCL CLI 或 GPL binding API 完成编译、执行、输出、代码修改、元素查询等自动化集成工作,用户则可以通过 KusionCtl 工具执行 KCL 代码映射执行到 Kubernetes 集群。 - -![](/img/blog/2021-05-18-kusion-intro/08.png) - -通过这样统一的工作流程,我们轻量级地完成了对 Kubernetes 生态大量基础能力的透出,基于原子能力声明式地封装、抽象出面向应用的配置、运维能力,并完成了一定场景的落地应用。Kusion 提供了研发工具协助使用者完成其工作。我们可以对平台侧、用户侧分层协同模式下的实践做进一步的探讨。平台侧同学抽象并定义出前端结构体,例如 SofaAppConfiguration ,其中定义了业务镜像、所需资源、config、secrect、sidecar、LB、DNS、副本数、逻辑资源池、发布策略、是否超卖、是否访问公网等等。 - -前端结构体无法独立工作,实际上存在着与前端结构体对应的后端结构体,后端对前端透明,前-后端结构体分离解耦。后端结构体在运行时将前端结构体产生的数据“翻译”成对应的 low level API ,这种反向依赖的方式依赖于 KCL 语言能力。 - -从工程角度看平台侧同学实际上完成了一次轻量级、声明式的应用级 API 定义。这种前后端分离的设计有诸多好处。首先应用侧使用的前端结构体可以保持简单干净、业务导向、实现细节无关;其次可以通过编译时指向不同的后端文件动态切换到不同的后端结构体实现,以完成平台版本切换、实现切换等目的;最后这样分离的做法可以在统一模式的前提下保证充分的灵活性,例如平台可以通过 kcl base.k prod.k backend.k 多文件编译完成一次包含基线、环境配置、后端结构体的组合编译。事实上,我们可以将所有场景规约为 kcl user_0.k … user_n.k platform_0.k … platform_n.k 的范式,其中 user.k 代表用户侧代码,platform.k 代表平台侧代码。我们从另一个角度来看多团队协同的方式。由各团队自下而上定义平台能力及约束,并完成应用级的配置基线及配置环境特征,完成最后一公里的定义。 - -![](/img/blog/2021-05-18-kusion-intro/09.png) - - -在理清工作流程后,我们来看 KCL 通过 Konfig 大库落地的实践。我们在 Konfig 代码仓库中定义了平台侧及用户侧的代码空间,通过统一配置代码库完成对代码的共享和复用,保证了对整体基础设施代码定义的可见性。在用户侧,通过 project、stack、component(对应蚂蚁内部应用) 三级目录的方式组织代码。以 cloudmesh 为例,在 tnt/middleware/cloudmesh 的 project 目录下含多个 stack,如 dev、prod,每个 stack 中含多个 component。代码在这三个维度得以隔离,并共享上下文。 - -![](/img/blog/2021-05-18-kusion-intro/10.png) - - -在代码质保方面,我们通过单元测试、集成测试等手段保证对平台侧、用户侧代码的质量,我们正在引入代码扫描、配置回放、配置校验、dry-run 等验证手段保证代码变更的可靠性。在研发方面,我们通过主干开发、分支发布的方式保证不同应用并行研发的前提下尽可能不产生代码腐化的情况,并通过 tag 保护稳定分支。 - -![](/img/blog/2021-05-18-kusion-intro/11.png) - - -在 IaC 产品落地场景中,通过标准化的结构体、代码版本化、多环境代码隔离、CI pipeline 等手段管理基础设施描述代码,通过代码变更的静态、动态 diff、模拟、异常提示、风险管控接入保证基础设施变更可控,通过代码 Pull Request 做变更审计及对变更人员的追踪。下图以业务发布场景为例展示了关键步骤,在业务代码通过质保流程并完成镜像构建后,CI 流程控制器通过 KCL API 对 Konfig 仓库中对应 KCL 文件中的 image 字段进行自动更新,并发起 Pull Request,由此触发发布流程。 - -IaC 提供了编译测试、live-diff、dry-run、风险管控接入等验证方式,并支持执行过程的可视化,产品基于 KCL 语言能力及工具建设,尽可能的减少业务定制。整个流程以 Konfig 代码的自动修改为起点,平台方、应用方、SRE 基于代码协同,通过产品界面进行线上发布,支持分批分步、回滚等运维能力。Konfig 中的代码“组件”可以被多个场景集成使用,例如此处被发布控制器集成的组件还可以被建站控制器集成,控制器只需关注自动化逻辑,无需关心被集成组件的内部细节。以文章开头的典型建站场景为例,在接入 Kusion 后,用户侧配置代码减少到 5.5%,用户面对的 4 个平台通过接入统一代码库而消减,在无其他异常的情况下交付时间从 2 天下降到 2 小时。 - -![](/img/blog/2021-05-18-kusion-intro/12.png) - - -我们再来看更加动态性的大规模快速恢复场景。快恢平台在接到监控告警输入后决策产生异常容器 hostname 列表,并需要对容器进行重启等恢复操作。 - -我们通过 KCL 编写声明式的应用恢复运维代码,其中通过 KCL Plugin 扩展完成对在线 CMDB 的查询,将 hostname 列表转换为多集群 Pod 列表,并声明式定义 Pod 恢复操作。快恢平台执行 KusionCtl run AppRecovery.k 完成跨多集群的 Pod 恢复操作。通过这样的方式,快恢控制器无需理解容器恢复细节、Kubernetes 多集群映射执行细节等,可以更专注于自身异常判断及决策逻辑。 - -![](/img/blog/2021-05-18-kusion-intro/13.png) - - -在项目落地过程中,我们也发现到了不少因为进度等原因造成的平台侧设计问题。例如平台侧操作定义不够规范,应用依赖等共性定义过于分散等问题,这需要我们在后续的落地过程中持续去沉淀提高。开放配置给了用户更大的灵活性和空间,但相比黑盒的方式需要更多的安全性保障。在开放协同推进的同时,可信原生技术部在并行推进云原生可信平台的建设,可信平台通过将身份与 Kubernetes 技术紧密结合提供相比社区方案能力更强的技术支撑。 - -举个例子,通过开放配置我们是不是可以通过 mount 证书的方式使得不可信不安全的服务获得访问目标服务的权限从而获取到关键数据?事实上在没有身份传递及高水位 Pod 安全保障的前提下这是完全可能。通过可信平台对 PSP(Pod Security Policy)、服务验证、服务鉴权等场景的加固,使得我们可以按需增强关键链路的安全策略。相比与社区方案,可信平台定义了更完整的 spiffe 身份标识,并使得身份作用于资源、网络、服务的各个环节,可以说可信是开放的必要前提。同时可信提供的鉴权能力、隔离能力也需要被用户使用,将原子能力封装并在应用配置层面透出依赖于 Kusion 的推进,使得接入 Kusion 的应用可以更简单的使用可信能力。可以说开放协同技术栈与可信平台是能力正交,相辅相成的云原生应用层技术。 - -![](/img/blog/2021-05-18-kusion-intro/14.png) - - -**最后,我们对集成落地做一个小结:** - -平台侧编写 80% 内容,通过面向应用的前端结构体提供规范的配置块,再通过后端结构体定义屏蔽 low level API 资源及操作,最终通过这样的方式描述应用对 workload、编排、运维等方面的需求,重点在于可以定义什么、默认有什么及约束集合,并通过 Konfig 仓库共享复用。平台侧趋向引擎化,专注自动化控制逻辑,由 KCL 代码作为扩展技术外置编写业务逻辑。我们希望面对复杂的运维业务诉求,平台侧控制器逐步演进到低频变更,甚至零变更。 - -应用侧输入 20% 内容,以平台侧前端结构体为界面声明应用侧诉求,重点在于要什么、要做什么,所写即所得。应用侧通过面向多项目、多租户、多环境、多应用的代码工程结构组织代码,通过 Pull Request 发起变更,通过 CICD pipeline 完成白盒化的线上变更。同时,应用侧有对单应用编译、测试、验证、模拟的自由度,在充分验证后交付使用;对多应用可通过 KCL 语言能力按需灵活组合。将大规模的复杂问题拆分缩小到应用粒度,得到充分验证后按需合并,本质上是一种分治思路的实践。针对蚂蚁的实际情况,我们通过 KusionCtl 工具支持研发测试环境的执行及可视化,通过 InfraForm 产品、SiteBuilder 产品等推动线上的部署过程。 - -## 4. 协同配置问题模型 - -理解了落地思路和场景实践方式,我们将进一步下钻拆解具体的协同场景,同时分析 KCL 语言在配置场景的设计和应用。 - -我们先来看平台侧编写轻量级应用级 API 的一些要点。平台侧同学可以通过单继承的方式扩展结构体,通过 mixin 机制定义结构体内属性的依赖关系及值内容,通过结构体内顺序无关的编写方式完成声明式的结构体定义,此外还支持如逻辑判断、默认值等常用功能。 - -对于声明式与命令式的差异做简单的分析,我们以斐波那契数列为例,可以把一组声明式代码看作一个方程组,方程式的编写顺序本质上不影响求解,而“求解”的过程由 KCL 解释器完成,这样可以避免大量命令式拼装过程及顺序判断代码,对于存在复杂依赖的结构体而言优化尤为明显。 - -对于复杂结构,命令式拼装的写法多出一倍以上的代码量,补丁代码使得结果难以预测,同时需要考虑执行顺序问题,特别是在模块化过程中调整存在依赖的模块顺序非常繁琐且易出错。对于各种配套能力,我们通过 mixin 机制编写,并通过 mixin 声明的方式“混入”到不同的结构体中。 - -![](/img/blog/2021-05-18-kusion-intro/15.png) - - -对于平台侧来说,稳定性保证尤为重要。 - -当配置数据量逐步增大时,良构类型是保证编译时问题发现的有效手段,KCL spec 包括了完备的类型系统设计,我们正在实践静态类型检查和推导,逐步增强类型的完备性。 - -同时 KCL 引入了多种不可变手段,支持用户按需定义结构体内属性的不可变性。通过这两种基础而重要的技术手段使得大量违反编写约束的情况可以在编译时被检查发现。 - -![](/img/blog/2021-05-18-kusion-intro/16.png) - -对于业务向的内容,KCL 支持通过结构体内置的校验规则及单元测试的方式支持。以下图所示代码为例,我们在 AppBase 中定义对 containerPort、services、volumes 的校验规则,同时在 MyProdApp 中定义叠加的环境相关的校验规则。目前校验规则在运行时执行判断,我们正在尝试通过编译时的静态分析对规则进行判断从而发现问题。 - -![](/img/blog/2021-05-18-kusion-intro/17.png) - - -此外对于平台侧来说,升级推进是必须面对的问题。我们首先需要考虑最坏情况,即提供给用户的前端结构体需要做不兼容的调整,按照新增配置项并下线老配置项的思路,我们需要对待下线字段进行禁用,并以合理的方式告知用户。 - -当平台自身出现不兼容更新时问题相似,只是需要平台侧后端结构体进行调整,应用侧用户不直接感知。KCL 针对这类问题提供了字段禁用的功能,使用被禁用字段将在编译阶段通过警告或错误的方式提示,编译错误将 block 编译,从而迫使用户在编译阶段进行修改,避免将问题带入运行时造成影响。 - -对于兼容的平台侧调整,通常在后端结构体修改导入的原子定义文件即可。对于 KCL 解释器自身的变化,我们通过单元测试、集成测试、模糊测试等进行验证,对于 plugin 的变更通过 plugin 自身的测试验证。KCL 解释器及 plugin 的变化通过需要 Konfig 代码库的 UT、IT 进行测试验证,保障已有代码正常工作。在经过测试验证后,发起 Pull Request 通过 code review 评审。 - -![](/img/blog/2021-05-18-kusion-intro/18.png) - - -我们再来简单梳理应用侧协同的场景。假设存在基线配置及生产环境配置,在我们的实践中存在三种典型场景。 - -第一种场景中,基线与生产配置中各定义了同名配置的一部分,由 KCL 自动合并生成最终配置块,这适用于对称配置的场景非常有效,如果出现冲突则会进行冲突报错。 - -第二种场景中,我们希望在生产配置中覆盖基线配置中的一些配置项,类似 Kustomize 的 overlay 覆盖功能,事实上这是大多数熟悉 Kubernetes 使用者的诉求。 - -对于第三种场景,编写者希望配置块全局唯一,不能进行任何形式的修改,若出现同名配置则会在编译阶段报错。在真实的场景中,基线与各环境配置可由研发与 SRE 配合完成,也可以由 Dev 独立完成,Kusion 本身不限制使用者职能。 - -![](/img/blog/2021-05-18-kusion-intro/19.png) - - -通过场景分析我们对 KCL 有了初步的了解,我们以编程语言的理论、技术,云原生应用场景三方面为输入设计 KCL,我们希望通过简单有效的技术手段支撑平台侧、应用侧完成基础设施描述,将问题尽可能暴露在 KCL 编译、测试阶段,以减少线上运行时的问题频次。此外我们提供了便利的语言能力和工具帮助不同的使用群体更高效的完成其工作,并通过工程化的方式组织、共享代码,对接 Kubernetes API 生态。 - -![](/img/blog/2021-05-18-kusion-intro/20.png) - - -## 5. 抽象模型 - -通过对 Kusion 落地集成、协同编程场景的分析,我们了解到 Kusion 技术的组成场景及使用方式。我们再来看看 Kusion 关键抽象模型。 - -我们先来看 KCL 代码的抽象模型。以下图为例,首先 KCL 代码在编译过程中形成两张有向无环图,分别对应结构体内部声明代码及结构体使用声明。编译过程可以简单分为展开、合并、代换三步。通过这样的计算过程,在编译时完成了大部分代换运算,最终运行时进行少量计算即可得到最终的解。在编译过程中,我们同步进行类型检查和值的检查,他们的区别是类型检查是做泛化,取偏序上确界,值检查是做特化,取偏序下确界。 > - -![](/img/blog/2021-05-18-kusion-intro/21.png) - -对于 KCLVM 的解释器,我们采用了标准的分层解耦的设计方式,由 parser、compiler、VM 三部分组成。我们希望尽可能的在编译时完成工作,例如图的展开、代换,类型的检查、推导等,这样可以保持 VM 部分尽可能简单。后续我们将在 KCLVM compiler 中支持对 WASM 中间表示的编译支持。此外我们通过 plugin 机制支持对 VM 运行时能力的扩展,并考虑了对 LSP Server 的支持以降低 IDE、编辑器支持成本。 - -![](/img/blog/2021-05-18-kusion-intro/22.png) - - -在工程化方面,我们通过 project、stack、component 三级方式组织 KCL 代码。当代码映射到 Kubernetes 集群时,Kusion 支持两种映射方式。 - -第一种方式支持将 stack 映射为 namespace,component 在 namespace 内存在,即 stack 内共享资源配额,component 间通过 SDN 及 Mesh 能力做隔离,这是社区比较常见的一种实践方式。 - -第二种方式将 component 映射为 namespace,stack 通过 label 标识,通过 SA 管理权限,资源配额定义在 component 维度,component 间通过 namespace 的隔离能力做隔离,这是蚂蚁目前线上环境的实践方式。无论如何映射,用户无需感知物理集群对接及切换细节。此外,KCL 代码中资源定义都可以通过唯一的资源 ID 定位,这也是对代码进行“增删改查”的基础。 - -![](/img/blog/2021-05-18-kusion-intro/23.png) - - -为了支持上述的隔离及映射逻辑,我们提供了 KusionCtl 工具帮助用户完成项目结构初始化、Kubernetes 集群映射、执行状态跟踪及展示、Identity 权限集成等常用功能。用户可以通过 KusionCtl 完成研发、测试环境的执行和验证工作。 - -![](/img/blog/2021-05-18-kusion-intro/24.png) - - -对于线上环境,我们更推荐使用基于 Kusion 的运维产品进行变更操作。我们希望通过 KCL 代码开放、透明、声明式、意图导向、分层解耦的定义基础设施,本质上是面向数据及其约束的一种协同工作,变更是一种数据的流动。我们通过前置的预编译、计算、验证,最终将数据交付到各环境的运行时,相比于经典命令式系统中计算逻辑流动的方式,可以最大程度避免复杂命令式计算造成的运行时数据错误,特别是当计算逻辑发生变更时,这种运行时计算错误的结果通常都是一次线上故障。 - -最后,我们来看一种 Kusion 思路的技术架构,我们仍然以控制器、业务层、编排层、任务及管道的分层逻辑来看。自下而上的,由 Kubernetes API Server 收敛了管道并提供了原生资源定义,并通过 CRD & Operator 进行扩展提供稳定的原子任务定义。从我个人的角度看,Operator 如其名约“操作员”,重复着接收订单、执行操作的简单循环,订单未完成则持续操作。 - -Operator 应尽可能保持简单,避免复杂的业务逻辑拆解、控制逻辑、状态机,同时避免因为微小的差异创建新的 Operator 或通过 Operator 做单纯的数据、YAML 转换。Operator 作为收敛基础设施原子能力的存在,应尽量内聚、稳定。在业务层、编排层,我们通过 KCL 代码在 Konfig 仓库中编写,并结合 GitOps 支持应用粒度的变更、编译、测试、验证。控制器层高度引擎化,聚焦自动化逻辑,根据业务场景需要定制控制器及 GUI 产品界面。应用的配置代码“组件”由多个控制器共享复用,例如建站、发布、部分运维都将依赖应用 AppConfiguration 配置代码块。 - -![](/img/blog/2021-05-18-kusion-intro/25.png) - - -## 6. 总结 & 展望 - -最后,我们对开放协同技术工作做一个总结。 - -我们常说 Kubernetes 是云计算的 Linux/Unix,相比于 Unix 丰富的外围配套生态,Kubernetes 在配套技术能力上还有很长的路径。对比于使用便利的 Shell、Tools,我们还缺少一种符合 Kubernetes 声明式、开放、共享设计理念的语言及工具,Kusion 希望能在这一领域有所帮助,提升基础设施的开放程度及使用效率,易于共享、协同,提升稳定性,简化云原生技术设施的接入方式。 - -![](/img/blog/2021-05-18-kusion-intro/26.png) - - -我们的探索和实践仍然在一个初级阶段,我们希望通过 Kusion 的技术和服务能力在运维、可信、云原生架构演进方面起到积极的作用。 - -我们希望推进真正的基础设施代码化,促成跨团队的 DevOps,成为持续部署与运维的技术支撑。在可信方面,策略及代码、可信集成、标准化的支撑是我们后续的工作重点之一,特别是与策略引擎的结合,是开放可信技术能力的关键步骤。 - -在云原生架构方面,我们将持续推进架构现代化的演进,通过技术手段支持更多上层自动化产品业务的快速创新,同时通过统一的流程、企业级的技术能力支持服务好基础设施应用场景。 - -![](/img/blog/2021-05-18-kusion-intro/27.png) - - -纵观历史,技术总是朝着提高整体社会协作效能演进。 Kusion 带来的云原生开放协同无疑是这条朴素规律再次发挥效力的注脚。 \ No newline at end of file diff --git a/i18n/zh-CN/docusaurus-plugin-content-blog/2021-08-03-kcl-intro/index.md b/i18n/zh-CN/docusaurus-plugin-content-blog/2021-08-03-kcl-intro/index.md deleted file mode 100644 index 889d1f80..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-blog/2021-08-03-kcl-intro/index.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -slug: 2021-kcl-intro -title: KCL云原生配置策略语言 -authors: - name: 柴树杉 - title: Kusion团队成员 -tags: [kcl] ---- - -介绍KCL云原生配置语言在蚂蚁的诞生背景、语言特性、实践探索和未来的发展思考。 - -- 简介:https://giac.msup.com.cn/course?id=15307 -- 内容:https://segmentfault.com/a/1190000040455559 -- [PDF下载](https://gw.alipayobjects.com/os/bmw-prod/2cb0c283-5f24-485e-b635-b6efac887eba.pdf) - -[![KCL云原生配置策略语言](/img/blog/2021-08-03-kcl-intro/talk-cover.png)](https://gw.alipayobjects.com/os/bmw-prod/2cb0c283-5f24-485e-b635-b6efac887eba.pdf) diff --git a/i18n/zh-CN/docusaurus-plugin-content-blog/2022-05-28-open-day/index.md b/i18n/zh-CN/docusaurus-plugin-content-blog/2022-05-28-open-day/index.md deleted file mode 100644 index bd554124..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-blog/2022-05-28-open-day/index.md +++ /dev/null @@ -1,112 +0,0 @@ ---- -slug: 2022-open-day -title: KusionStack Open Day -authors: - name: Kusion - title: Kusion团队成员 -tags: [KusionStack, Kusion, KCL, KCLVM] ---- - -2022 年 5 月 28 日,KusionStack Open Day 线上线下视频直播正式宣布 KusionStack 一站式可编程配置技术栈(包含 KCL 配置语言、Kusion 引擎、Konfig 配置大库)开源。 - -![](/img/blog/2022-05-28-open-day/01.jpg) - -以上是 KusionStack 杭州团队合影。至此,🎉KusionStack Open Day 圆满结束🎉 - -## 1. 精彩瞬间 - -感谢大家抽空参加,我们一起来回顾下👀,本次活动的精彩瞬间吧~👏 - -![](/img/blog/2022-05-28-open-day/02.jpg) - -蚂蚁集团高级技术专家、Kusion 项目发起人及负责人——朵晓东作为整场活动的主持人,带领大家一起回顾了 Kusion 项目演进历程,并宣布了 KusionStack 正式开源的好消息! - -![](/img/blog/2022-05-28-open-day/03.jpg) - -活动开场,Kusion 项目的两位 Sponsor——蚂蚁集团可信原生技术部负责人何征宇和技术风险部负责人陈亮,对 Kusion 从研发至今的发展回顾和未来展望。两位 Sponsor 在视频中给予了 Kusion 项目极大的认可,并为 KusionStack 的开源献上了祝福。 - -![](/img/blog/2022-05-28-open-day/04.jpg) - - -Kata 创始人、木兰社区 TOC —— 王旭 - -王旭在会上也表达了对 KusionStack 开源的喜悦。他指出:一个项目拿出来开源的好的时机,应当是项目还处在未完全成熟的阶段,这样在开源出来之后,才能够通过开源社区的开发者们,一起推动项目更好的发展,开源不是为了秀肌肉。 - -在最后,他再次为 KusionStack 的开源送上了诚挚的祝福。 - -接下来是各位嘉宾的精彩分享,一起来回顾一下吧~ - -## 2. 《数字化出海业务的 DevOps 探索和实践》 - -![](/img/blog/2022-05-28-open-day/05.png) - -开场演讲的众安国际科技 Engineering 负责人李晓蕾(Sherry Lee),介绍了众安国际 DevOps 在支持数字化出海业务过程遇到难点和对应的解决之道。 - -她从数字化出海对 DevOps 带来的挑战、众安国际 DevOps 遇到的难点和解决方案以及 DevOps 具体实践案例分享三个方面展开了宝贵的经验分享。 - - -- PDF: [数字化出海业务的DevOps探索和实践](https://github.com/KusionStack/community/blob/main/2022/open-day/0-Sherry-Lee-数字化出海业务的DevOps探索和实践.pdf) -- Video(Bilibili): https://www.bilibili.com/video/BV1hr4y1x72a -- Video(YouTube): https://www.youtube.com/watch?v=tYDw__lBcYM - -## 3. 《蚂蚁集团规模化 DevOps 的代际演进探索》 - -![](/img/blog/2022-05-28-open-day/06.jpg) - - -蚂蚁集团高级技术专家、Kusion 项目发起人及负责人——朵晓东分享了 Kusion 的项目背景和发展进程,同时他宣布 Kusion 正式开源,并分享了开源计划。 - -目前,基于 Kusion 的新一代 PaaS 体系已逐步应用在蚂蚁众多内外部场景,在多种运维场景覆盖、规模化协同效率提升、多主体/站点交付运维、技术创新运维效率提升等多方面体系出显著的优势和价值。 - - -- PDF: [蚂蚁集团规模化 DevOps 代际演进探索](https://github.com/KusionStack/community/blob/main/2022/open-day/1-朵晓东-蚂蚁集团规模化DevOps代际演进探索.pdf) -- Video(Bilibili): https://www.bilibili.com/video/BV1WZ4y147pC -- Video(YouTube): https://www.youtube.com/watch?v=T6NKkb1L1eM - - -## 4. 《KCL 配置策略语言》 - -![](/img/blog/2022-05-28-open-day/07.jpg) - -蚂蚁集团高级研发工程师徐鹏飞介绍了 KCL 的相关核心特性,分享了 KCL 技术栈的思路、架构、关键技术,并展开讲述了 KCL 的在蚂蚁内部多场景的实践经历。 - -KCL 帮助不同角色的用户以简单、可扩展、稳定、高效、分而治之的方式完成开发和运维任务,同时支持与自动化系统集成,实现极致的执行效率。 - -- PDF: [KCL配置策略语言](https://github.com/KusionStack/community/blob/main/2022/open-day/2-徐鹏飞-KCL配置策略语言.pdf) -- Video(Bilibili): https://www.bilibili.com/video/BV1bv4y1w7ke -- Video(YouTube): https://www.youtube.com/watch?v=mUFFri_eRAQ - -## 5. 中场休息 - -![](/img/blog/2022-05-28-open-day/08.jpg) - -中场休息期间,“开源老兵”、Go 语言大佬——柴树杉老师浅谈了他参与 KusionStack 的心路历程和个人收获,并表达了能有更多伙伴参与到 KusionStack 的开源共建的希望。 - -## 6. 《Kusion 模型库和工具链的实践探索和总结》 - -![](/img/blog/2022-05-28-open-day/09.jpg) - -蚂蚁集团高级研发工程师杨英明以实际的案例介绍了如何通过 KCL 抽象 Kusion 模型库,以及如何结合 Kusion 工具链一站式的完成配置代码的编写和生效,同时总结分享了通过这套模式进行实际交付的经验和建议。 - - -- PDF: [Kusion模型库和工具链的实践探索和总结](https://github.com/KusionStack/community/blob/main/2022/open-day/3-杨英明-Kusion模型库和工具链的实践探索和总结.pdf) -- Video(Bilibili): https://www.bilibili.com/video/BV1Vr4y1x7Ty -- Video(YouTube): https://www.youtube.com/watch?v=HDUm_KrunLY - -## 7. 《Kusion 在蚂蚁的规模化实践》 - -![](/img/blog/2022-05-28-open-day/10.jpg) - -最后,蚂蚁集团技术专家史贵明和蚂蚁集团高级运维工程师李治玮共同带来了 Kusion 在蚂蚁的规模化实践的分享。史贵明主要从 PaaS 配置管理的系统架构角度讲述了蚂蚁目前的多云配置管理能力,李治玮则从 SRE 视角下,分享了使用 KCL 解决多种复杂基础设施的交付效率问题和价值。 - - -- PDF: [Kusion在蚂蚁的规模化探索实践](https://github.com/KusionStack/community/blob/main/2022/open-day/4-莫城-半庭-Kusion在蚂蚁的规模化探索实践.pdf) -- Video(Bilibili): https://www.bilibili.com/video/BV1xB4y1X7sv -- Video(YouTube): https://www.youtube.com/watch?v=F9lZEU5GNYE - - -## 8. 未来展望 - -开源并不代表 KusionStack 已经完成,相反我们还有很多需要完善和改进的地方,同时开源社区和文化也对文档和代码提出了更高的要求。这只是一个开始,希望更多从事相关领域的同学能够参与共建,为国内的云原生、DSL 等新兴领域贡献力量。 - -最后,感谢大家的参与🙏 diff --git a/i18n/zh-CN/docusaurus-plugin-content-blog/2022-06-07-sense-of-open-day/index.md b/i18n/zh-CN/docusaurus-plugin-content-blog/2022-06-07-sense-of-open-day/index.md deleted file mode 100644 index efee575b..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-blog/2022-06-07-sense-of-open-day/index.md +++ /dev/null @@ -1,98 +0,0 @@ ---- -slug: 2022-sense-of-open-day -title: KusionStack 开源有感 -authors: - name: 朵晓东 - title: Kusion 创始人 -tags: [KusionStack, Kusion] ---- - -**历时两年,打破“隔行如隔山”困境** - -本文撰写于 KusionStack 开源前夕,作者有感而发,回顾了团队从 Kusion 项目开发之初到现今成功走上开源之路的艰辛历程。当中既描述了作者及其团队做 Kusion 项目的初心和项目发展至今的成果,也表达了作者自身对团队的由衷感激,字里行间都散发着真情实感。 - -## 1. KusionStack 是什么? - -KusionStack 是开源的可编程云原生协议栈! - -Kusion 一词来源于 fusion(意为融合),希望通过一站式的技术栈融合运维体系的多个角色,提升运维基础设施的开放性、扩展性,从整体上降本增效。KusionStack 通过定义云原生可编程接入层,提供包括配置语言 KCL、模型界面、自动化工具、最佳实践在内的一整套解决方案,连通云原生基础设施与业务应用,连接定义和使用基础设施的各个团队,串联应用生命周期的研发、测试、集成、发布各个阶段,服务于云原生自动化系统建设,加速云原生落地。 - -![](/img/blog/2022-06-07-sense-of-open-day/1.jpg) - -## 2. 为了一个理想的运维体系 - -2019 年秋,MOSN 的工作已持续了近两年,期间我们逐步完成了在支付宝核心链路的形态验证。整个过程中除了 MOSN 本身面对的种种技术挑战和困难,所谓的云原生技术红利,实际上也已经掣肘于运维系统固化所造成的效率制约。 - -有一天主管找我吃饭(下套),期间向我描述了他理想中的运维体系: - -他希望 SRE 能通过一种专用语言来编写需求,通过写代码来定义基础设施的状态,而不是花费极大的精力在检查、发现、修复的循环上。基础设施团队则通过提供开放的可编程语言和工具支撑不同诉求的 SRE 团队,达到更高的整体 ROI。 - -我立刻意识到这和 Hashicorp 的 Terraform 神之相似(后来 Hashicorp 在 2021 年底上市,以超过 150 亿美元的市值成为迄今为止市值最高的一次开源 IPO)。另一方面,不同于 IaaS 交付场景,蚂蚁面对着大量更规模化、复杂度更高的云原生 PaaS 场景,又让我想到了 Google 内部运用专用语言、工具等技术开放 [Borg](https://pdos.csail.mit.edu/6.824/papers/borg.pdf) 和相关的 [运维能力的实践](https://sre.google/workbook/configuration-specifics),当时感觉这是 [一个既有意思又有挑战的事](https://queue.acm.org/detail.cfm?id=2898444)。 - -饭桌上我们聊了一些思路以及一些还不太确定的挑战,他问我想不想搞一个试试,搞不成也没关系。当时没想太多,饭没吃完就答应了。 - -![](/img/blog/2022-06-07-sense-of-open-day/2.jpg) - -## 3. 漫长的学习、探索与实践 - -隔行如隔山。 - -没有过语言设计研发的经验,也没有过开放自动化系统设计的经验,项目开展之初,我们就陷入了举步维艰的困境。 - -经历了一段漫长时间的学习、摸索和实践的反复循环之后,项目依旧没有大的起色,更困难的是我们不但要面对蚂蚁内部复杂又耦合的场景和问题,还要经受「这种高度工程化的方式在蚂蚁是否有生存土壤」的质疑。 - -屋漏偏逢连夜雨,期间又令人惋惜且无奈的经历了一些人事变化,同时由于种种原因,项目一度陷入了各种困境。整个 2020 年,我们在未知、纠结、无奈中度过…… - -感谢瓴熙、庭坚和我的主管,感谢你们当时没有放弃这个项目,依然与我一同坚守。 - -![](/img/blog/2022-06-07-sense-of-open-day/3.jpg) - -## 4. 痛并快乐的孵化之旅 - -通过持续地布道、交流和沟通,我们逐步在基础设施技术团队和 SRE 团队找到了更多有共识的朋友。 - -同时在技术上,我们亦脱离了迷茫,真正意义上地启动了 Kusion 项目,也成功地从 PoC 过渡到了 MVP 的阶段。 - -最终,我们以“非标”应用为切入点,开始了痛并快乐着的孵化之旅。 - -感谢零执、青河、子波、李丰、毋涯、向野、达远……在这里无法一一列举,感谢你们的坚持让这个想法逐步成为现实。 - -![](/img/blog/2022-06-07-sense-of-open-day/4.jpg) - -## 5. 突破与进展 - -略过中间的种种探索和实践,回顾这段历程,在这一年多的时间里我们结合了编译技术、运维及平台技术,成功建立了一个基于 Kusion 可编程技术栈的运维体系。 - -在业务场景上,项目覆盖了从 IaaS 到 SaaS 的大量运维场景,截至目前共接入了 800+ 应用,覆盖 9 个 BG,21 个 BU,其中典型案例交付运维提效 90% 以上,这也是蚂蚁内部第一次将大量异构应用纳入到一整套运维技术栈。 - -在蚂蚁我们基于云原生容器和微服务技术深入探索了 DevOps、CICD 实践,完善了蚂蚁的云原生技术体系,逐步释放了云原生效率红利,同时形成了一个近 300 人的虚拟运维研发团队。 - -不同职能不同团队的参与者凝聚在一起解决各自所面对的问题,贡献了 30K+ commit 和 350K+ 行代码,有一些参与者自发成为 Kusion 的研发者 。我认为这些工程师文化理念和领域知识的积累带来了远超运维业务本身的价值。 - -![](/img/blog/2022-06-07-sense-of-open-day/5.png) - -此外,Kusion 也成为了可编程基线产品、云原生运维产品、多云交付产品等新一代运维产品的基础技术,成为蚂蚁运维体系架构升级的一部分。 - -不忘初心,我们希望通过技术手段促进与运维参与方的合作关系的合理化、基于开放技术栈的自动化,以及运维数据与知识的沉淀积累,以达到整体协作运维效率的不断提升。 - -同时,因蚂蚁内部运维场景较多且链路复杂,每个环节都需要最懂运维业务的 SRE 密切参与,与平台、应用研发协同工作,最终各环节联合在一起形成了一套完整的运维体系,在这样的思路下开放技术也会越来越重要。 - -平台研发、SRE、应用研发等多种角色协同编写的代码是一种数据的沉淀,亦是一种业务知识的沉淀,基于这些数据和知识,未来会有更多的可能性。 - -## 6. 走上开源之路 - -在历经了一段内部探索之后,我们希望把 KusionStack 开源到技术社区。因为我们意识到自身面对的问题,其他公司、团队其实也同样正在面对。借助开源这件事,我们希望团队的这些工作成果能对更多人有所帮助。 - -当然,也受限于自身能力以及精力和资源的投入,我们希望能有更多朋友参与进来,与我们共同去完善 KusionStack,不论你是工作在云原生、运维自动化、编程语言或者是编译器中的哪一个领域,我们都非常期待和欢迎你的加入。 - -## 7. 期待与你共成长 - -这段经历对我来说异常宝贵,不仅仅是在于自身再一次在新的技术领域和蚂蚁的技术升级方面尝试了新的探索并实现了突破,更宝贵的是,自己还拥有了一段与一群人均 95 后的小伙伴一起将想法落地实现的奇幻历程。 - -在未来,Kusion 的朋友圈不再局限于蚂蚁内部,面向开源,我们期待着能有更多的社区朋友在 KusionStack 与我们共同成长! - -## 参考链接 - -- [Large-scale cluster management at Google with Borg (PDF)](https://pdos.csail.mit.edu/6.824/papers/borg.pdf) -- [Borg, Omega, and Kubernetes (PDF)](https://queue.acm.org/detail.cfm?id=2898444) -- [Configuration Specifics](https://sre.google/workbook/configuration-specifics) diff --git a/i18n/zh-CN/docusaurus-plugin-content-blog/2022-09-15-declarative-config-overview/index.md b/i18n/zh-CN/docusaurus-plugin-content-blog/2022-09-15-declarative-config-overview/index.md deleted file mode 100644 index 6c0a72b1..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-blog/2022-09-15-declarative-config-overview/index.md +++ /dev/null @@ -1,761 +0,0 @@ ---- -slug: 2022-declarative-config-overview -title: The Landscape of Declarative Configuration -authors: - name: 徐鹏飞 - title: KusionStack 团队成员 -tags: [KusionStack, Kusion, KCLVM, Configuration] ---- - -## 零、前言 - -文本仅用于澄清声明式配置技术概述,[KCL](https://github.com/KusionStack/KCLVM) 概念以及核心设计,以及与其他配置语言的对比,更多 [KusionStack](https://kusionstack.io/) 的概念、背景与设计、以及与其他开源技术栈的区别将不会在本文中讨论。 - -## 一、声明式配置概述 - -### 1.1 配置的重要性 - -- 软件不是一成不变的,每天有成千上万的配置更新,并且配置本身也在逐渐演进,对规模化效率有较高的诉求 - - **配置更新越来越频繁**:而配置提供了一种改变系统功能的低开销方式,不断发展的业务需求、基础设施要求和其他因素意味着系统需要不断变化。 - - **配置规模越来越大**:一份配置往往要分发到不同的云站点、不同的租户、不同的环境等。 - - **配置场景广泛**:应用配置、数据库配置、网络配置、监控配置等。 - - **配置格式繁多**:JSON, YAML, XML, TOML, 各种配置模版如 Java Velicity, Go Template 等。 -- 配置的稳定性至关重要,系统宕机或出错的一个最主要原因是有大量工程师进行频繁的实时配置更新,表 1 示出了几个由于配置导致的系统出错事件。 - -| 时间 | 事件 | -| --- | --- | -| 2021 年 7 月 | 中国 Bilibili 公司由于 SLB Lua 配置计算出错陷入死循环导致网站宕机 | -| 2021 年 10 月 | 韩国 KT 公司由于路由配置错误导致在全国范围内遭受重大网络中断 | - -表 1 配置导致的系统出错事件 - -### 1.2 声明式配置分类 - -云原生时代带来了如雨后春笋般的技术发展,出现了大量面向终态的声明式配置实践,如图 1 所示,声明式配置一般可分为如下几种方式。 -![](/img/blog/2022-09-15-declarative-config-overview/01-declarative-config.png) -图 1 声明式配置方式分类 - -#### 1.2.1 结构化 (Structured) 的 KV - -- 能力 - - 结构化 kv 满足最小数据化声明需求(int, string, list, dict 等) - - 随着云原生技术快速发展应用,声明式 API 满足 X as Data 发展诉求 - - 面向机器可读可写,面向人类可读 -- 优势 - - 语法简单,易于编写和阅读 - - 多语言 API 丰富 - - 有各种 Path 工具方便数据查询,如 XPath, JsonPath 等 -- 痛点 - - 冗余信息多:当配置规模较大时,维护和阅读配置很困难,因为重要的配置信息被淹没在了大量不相关的重复细节中 - - 功能性不足 - - 约束校验能力 - - 复杂逻辑编写能力 - - 测试、调试能力 - - 不易抽象和复用 - - Kustomize 的 Patch 比较定制,基本是通过固定几种 Patch Merge 策略 -- 代表技术 - - JSON/YAML:非常方便阅读,以及自动化处理,不同的语言均具有丰富的 API 支持。 - - [Kustomize](https://kustomize.io/):提供了一种无需**模板**和 **DSL** 即可自定义 Kubernetes 资源基础配置和差异化配置的解决方案,本身不解决约束的问题,需要配合大量的额外工具进行约束检查如 [Kube-linter](https://github.com/stackrox/kube-linter)、[Checkov](https://github.com/bridgecrewio/checkov) 等检查工具,图 2 示出了 Kustomize 的典型工作方式。 - -![](/img/blog/2022-09-15-declarative-config-overview/02-kustomize.png) -图 2 Kustomize 典型工作方式 - -#### 1.2.3 模版化 (Templated) 的 KV - -- 能力 - - 赋予静态配置数据动态参数的能力,一份模版+动态参数输出不同的静态配置数据 -- 优势 - - 简单的配置逻辑,循环支持 - - 支持外部动态参数输入模版 -- 痛点 - - 容易落入所有配置参数都是模版参数的陷阱 - - 当配置规模变大时,开发者和工具都难以维护和分析它们 -- 代表技术 - - [Helm](https://helm.sh/):Kubernetes 资源的包管理工具,通过配置模版管理 Kubernetes 资源配置。图 3 示出了一个 Helm Jekins Package ConfigMap 配置模版,可以看出这些模版本身都十分短小,可以书写简单的逻辑,适合 Kubernetes 基础组件固定的一系列资源配置通过包管理+额外的配置参数进行安装。相比于单纯的模版化的 KV,Helm 一定程度上提供了模版存储/引用和语义化版本管理的能力相比于 Kustomize 更适合管理外部 Charts, 但是在多环境、多租户的配置管理上不太擅长。 - -![](/img/blog/2022-09-15-declarative-config-overview/03-helm.png) -图 3 Helm Jekins Package ConfigMap 配置模版 - -- 其他各种配置模版:Java Velocity, Go Template 等文本模板引擎非常适合 HTML 编写模板。但是在配置场景中使用时,存在所有配置字段即模版参数的风险,开发者和工具都难以维护和分析它们。 - -#### 1.2.3 代码化 (Programmable) 的 KV - -- 概念 - - Configuration as Code (CaC), 使用代码产生配置,就像工程师们只需要写高级 GPL 代码,而不是手工编写容易出错而且难以理解的服务器二进制代码一样。 - - 配置变更同代码变更同样严肃地对待,同样可以执行单元测试、集成测试等 - - 代码模块化和重用是维护配置代码比手动编辑 JSON/YAML 等配置文件更容易的一个关键原因 -- 能力 - - 必要的编程语言元素(变量定义、逻辑判断、循环、断言等) - - 必要的模板化能力(支持定义数据模版,并用模版得到新的配置数据) - - 代码模块化能力:结构 + 包管理 - - 面向人类可读可写,面向机器部分可读可写 -- 优势 - - 必要的编程能力 - - 代码模块化与抽象 - - 可以抽象配置模版+并使用配置覆盖 -- 痛点 - - 类型检查不足 - - 运行时错误 - - 约束能力不足 -- 代表技术: - - [GCL](https://github.com/rix0rrr/gcl):一种 Python 实现的声明式配置编程语言,提供了必要的语言能力支持模版抽象,但编译器本身是 Python 编写,且语言本身是解释执行,对于大的模版实例 (比如 K8s 模型) 性能较差。 - - [HCL](https://github.com/hashicorp/hcl):一种 Go 实现结构化配置语言,原生语法受到 libucl、nginx 配置等的启发,用于创建对人类和机器都友好的结构化配置语言,主要针对 devops 工具、服务器配置以及 Terraform 中定义资源配置等。 - - [Jsonnet](https://github.com/google/jsonnet):一种 C++ 实现的数据模板语言,适用于应用程序和工具开发人员,可以生成配置数据并且无副作用组织、简化、统一管理庞大的配置。 - -#### 1.2.4 类型化 (Typed) 的 KV - -- 能力 - - 基于代码化 KV,类型化 KV 多了类型检查和约束的能力 -- 优势 - - 配置合并完全幂等,天然防止配置冲突 - - 丰富的配置约束语法用于编写约束 - - 将类型和值约束编写抽象为同一种形式,编写简单 - - 配置顺序无关 -- 痛点 - - 图合并和幂等合并等概念复杂,用户理解成本较高 - - 类型和值混合定义提高抽象程度的同时提升了用户的理解成本,并且所有约束在运行时进行检查,大规模配置代码下有性能瓶颈 - - 对于想要配置覆盖、修改的多租户、多环境场景难以实现 - - 对于带条件的约束场景,定义和校验混合定义编写用户界面不友好 -- 代表技术: - - [CUE](https://github.com/cue-lang/cue):CUE 解决的核心问题是“类型检查”,主要应用于配置约束校验场景及简单的云原生配置场景 - -#### 1.2.5 模型化 (Structural) 的 KV - -- 优势 - - 以高级语言建模能力为核心描述 - - 模型复用 - - 不可变性 - - 约束校验 - - 引入可分块、可扩展的 KV 配置块编写方式 - - 类高级编程语言的编写、测试方式 - - 语言内置的强校验、强约束支持 - - 面向人类可读可写,面向机器部分可读可写 -- 不足 - - 扩展新模型及生态构建需要一定的研发成本 -- 代表技术:[KCL](https://github.com/KusionStack/KCLVM):一种 Rust 实现的声明式配置策略编程语言,把运维类研发统一为一种声明式的代码编写,可以针对差异化应用交付场景抽象出用户模型并添加相应的约束能力,期望借助可编程 devops 解决规模化运维场景中的配置策略编写的效率和可扩展性等问题。图 4 示出了一个 KCL 编写应用交付配置代码的典型场景 - -![](/img/blog/2022-09-15-declarative-config-overview/04-kcl-app-code.png) -图 4 使用 KCL 编写应用交付配置代码 - -### 1.3 不同声明式配置方式的选择标准与最佳实践 - -- 配置的规模:对于小规模的配置场景,完全可以使用 YAML/JSON 等配置,比如应用自身的简单配置,CI/CD 的配置。此外对于小规模配置场景存在的多环境、多租户等需求可以借助 Kustomize 的 overlay 能力实现简单配置的合并覆盖等操作。 -- 模型抽象与约束的必要性:对于较大规模的配置场景特别是对多租户、多环境等有配置模型和运维特性研发和沉淀迫切需求的,可以使用代码化、类型化和模型化的 KV 方式。 - -此外,从不同声明式配置方式的使用场景出发 - -- 如果需要编写结构化的静态的 K-V,或使用 Kubernetes 原生的技术工具,建议选择 YAML -- 如果希望引入编程语言便利性以消除文本(如 YAML、JSON) 模板,有良好的可读性,或者已是 [Terraform](https://www.terraform.io) 的用户,建议选择 HCL -- 如果希望引入类型功能提升稳定性,维护可扩展的配置文件,建议选择 CUE 之类的数据约束语言 -- 如果希望以现代语言方式编写复杂类型和建模,维护可扩展的配置文件,原生的纯函数和策略,和生产级的性能和自动化,建议选择 KCL - -不同于社区中的其他同类型领域语言,KCL 是一种面向应用研发人员并采纳了现代语言设计和技术的静态强类型编译语言 - -> 注意,本文将不会讨论通用语言编写配置的情况,通用语言一般是 Overkill 的,即远远超过了需要解决的问题,通用语言存在各式各样的安全问题,比如能力边界问题 (启动本地线程、访问 IO, 网络,代码死循环等不安全隐患),比如像音乐领域就有专门的音符去表示音乐,方便学习与交流,不是一般文字语言可以表述清楚的。 -> -> 此外,通用语言因为本身就样式繁多,存在统一维护、管理和自动化的成本,通用语言一般用来编写客户端运行时,是服务端运行时的一个延续,不适合编写与运行时无关的配置,最终被编译为二进制从进程启动,稳定性和扩展性不好控制,而配置语言往往编写的是数据,再搭配以简单的逻辑,描述的是期望的最终结果,然后由编译器或者引擎来消费这个期望结果。 - -## 二、KCL 的核心设计与应用场景 - -Kusion 配置语言(KCL)是一个开源的基于约束的记录及函数语言。KCL 通过成熟的编程语言技术和实践来改进对大量繁杂配置的编写,致力于构建围绕配置的更好的模块化、扩展性和稳定性,更简单的逻辑编写,以及更快的自动化集成和良好的生态延展性。 - -KCL 的核心特性是其**建模**和**约束**能力,KCL 核心功能基本围绕 KCL 这个两个核心特性展开,此外 KCL 遵循以用户为中心的配置理念而设计其核心特性,可以从两个方面理解: - -- **以领域模型为中心的配置视图**:借助 KCL 语言丰富的特性及 [KCL OpenAPI](https://kusionstack.io/docs/reference/cli/openapi/quick-start) 等工具,可以将社区中广泛的、设计良好的模型直接集成到 KCL 中(比如 K8s 资源模型),用户也可以根据自己的业务场景设计、实现自己的 KCL 模型 (库) ,形成一整套领域模型架构交由其他配置终端用户使用。 -- **以终端用户为中心的配置视图**:借助 KCL 的代码封装、抽象和复用能力,可以对模型架构进行进一步抽象和简化(比如将 K8s 资源模型抽象为以应用为核心的 Server 模型),做到**最小化终端用户配置输入**,简化用户的配置界面,方便手动或者使用自动化 API 对其进行修改。 - -不论是以何为中心的配置视图,对于代码而言(包括配置代码)都存在对配置数据约束的需求,比如类型约束、配置字段必选/可选约束、范围约束、不可变性约束等,这也是 KCL 致力于解决的核心问题之一。综上,KCL 是一个开源的基于约束和声明的函数式语言,KCL 主要包含如图 5 所示的核心特性: -![](/img/blog/2022-09-15-declarative-config-overview/05-kcl-core-feature.png) -图 5 KCL 核心特性 - -- **简单易用**:源于 Python、Golang 等高级语言,采纳函数式编程语言特性,低副作用 -- **设计良好**:独立的 Spec 驱动的语法、语义、运行时和系统库设计 -- **快速建模**:以 [Schema](https://kusionstack.io/docs/reference/lang/lang/tour#schema) 为中心的配置类型及模块化抽象 -- **功能完备**:基于 [Config](https://kusionstack.io/docs/reference/lang/lang/codelab/simple)、[Schema](https://kusionstack.io/docs/reference/lang/lang/tour/#schema)、[Lambda](https://kusionstack.io/docs/reference/lang/lang/tour/#function)、[Rule](https://kusionstack.io/docs/reference/lang/lang/tour/#rule) 的配置及其模型、逻辑和策略编写 -- **可靠稳定**:依赖[静态类型系统](https://kusionstack.io/docs/reference/lang/lang/tour/#type-system)、[约束](https://kusionstack.io/docs/reference/lang/lang/tour/#validation)和[自定义规则](https://kusionstack.io/docs/reference/lang/lang/tour#rule)的配置稳定性 -- **强可扩展**:通过独立配置块[自动合并机制](https://kusionstack.io/docs/reference/lang/lang/tour/#-operators-1)保证配置编写的高可扩展性 -- **易自动化**:[CRUD APIs](https://kusionstack.io/docs/reference/lang/lang/tour/#kcl-cli-variable-override),[多语言 SDK](https://kusionstack.io/docs/reference/lang/xlang-api/overview),[语言插件](https://github.com/KusionStack/kcl-plugin) 构成的梯度自动化方案 -- **极致性能**:使用 Rust & C,[LLVM](https://llvm.org/) 实现,支持编译到本地代码和 [WASM](https://webassembly.org/) 的高性能编译时和运行时 -- **API 亲和**:原生支持 [OpenAPI](https://github.com/KusionStack/kcl-openapi)、 Kubernetes CRD, Kubernetes YAML 等 API 生态规范 -- **开发友好**:[语言工具](https://kusionstack.io/docs/reference/cli/kcl/) (Format,Lint,Test,Vet,Doc 等)、 [IDE 插件](https://github.com/KusionStack/vscode-kcl) 构建良好的研发体验 -- **安全可控**:面向领域,不原生提供线程、IO 等系统级功能,低噪音,低安全风险,易维护,易治理 -- **生产可用**:广泛应用在蚂蚁集团平台工程及自动化的生产环境实践中 - -![](/img/blog/2022-09-15-declarative-config-overview/06-kcl-code-design.png) -图 6 KCL 语言核心设计 - -更多语言设计和能力详见 [KCL 文档](https://kusionstack.io/docs/reference/lang/lang/tour),尽管 KCL 不是通用语言,但它有相应的应用场景,如图 6 所示,研发者可以通过 KCL 编写**配置(config)**、**模型(schema)**、**函数(lambda)**及**规则(rule)**,其中 Config 用于定义数据,Schema 用于对数据的模型定义进行描述,Rule 用于对数据进行校验,并且 Schema 和 Rule 还可以组合使用用于完整描述数据的模型及其约束,此外还可以使用 KCL 中的 lambda 纯函数进行数据代码组织,将常用代码封装起来,在需要使用时可以直接调用。 - -对于使用场景而言,KCL 的可以进行结构化 KV 数据验证、复杂配置模型定义与抽象、强约束校验避免配置错误、分块编写及配置合并能力、自动化集成和工程扩展等能力,下面针对这些功能和使用场景进行阐述。 - -### 2.1 结构化 KV 数据验证 - -如图 7 所示,KCL 支持对 JSON/YAML 数据进行格式校验。作为一种配置语言,KCL 在验证方面几乎涵盖了 OpenAPI 校验的所有功能。在 KCL 中可以通过一个结构定义来约束配置数据,同时支持通过 check 块自定义约束规则,在 schema 中书写校验表达式对 schema 定义的属性进行校验和约束。通过 check 表达式可以非常清晰简单地校验输入的 JSON/YAML 是否满足相应的 schema 结构定义与 check 约束。 -![](/img/blog/2022-09-15-declarative-config-overview/07-kcl-validation.png) -图 7 KCL 中结构化 KV 校验方式 - -基于此,KCL 提供了相应的[校验工具](https://kusionstack.io/docs/reference/cli/kcl/vet)直接对 JSON/YAML 数据进行校验。此外,通过 KCL schema 的 check 表达式可以非常清晰简单地校验输入的 JSON 是否满足相应的 schema 结构定义与 check 约束。此外,基于此能力可以构建如图 8 所示的 KV 校验可视化产品。 -![](/img/blog/2022-09-15-declarative-config-overview/08-kcl-validation-ui.png) -图 8 基于 KCL 结构化 KV 校验能力构建的可视化产品界面 - -- 更多参考文档:KCL Vet 工具:[https://kusionstack.io/docs/reference/cli/kcl/vet](https://kusionstack.io/docs/reference/cli/kcl/vet) - -### 2.2 复杂配置模型定义与抽象 - -如图 9 所示,借助 KCL 语言丰富的特性及 [KCL OpenAPI](https://kusionstack.io/docs/reference/cli/openapi/quick-start) 等工具,可以将社区中广泛的、设计良好的模型直接集成到 KCL 中(比如 K8s 资源模型 CRD),用户也可以根据自己的业务场景设计、实现自己的 KCL 模型 (库) ,形成一整套领域模型架构交由其他配置终端用户使用。 -![](/img/blog/2022-09-15-declarative-config-overview/09-kcl-modeling.png) -图 9 KCL 复杂配置建模的一般方式 - -基于此,可以像图 10 示出的那样用一个大的 [Konfig 仓库](https://github.com/KusionStack/konfig) 管理全部的 KCL 配置代码,将业务配置代码 (应用代码)、基础配置代码 (核心模型+底层模型)在一个大库中,方便代码间的版本依赖管理,自动化系统处理也比较简单,定位唯一代码库的目录及文件即可,代码互通,统一管理,便于查找、修改、维护,可以使用统一的 CI/CD 流程进行配置管理(此外,大库模式也是 Google 等头部互联网公司内部实践的模式)。 -![](/img/blog/2022-09-15-declarative-config-overview/10-kcl-konfig.png) -图 10 使用 KCL 的语言能力集成领域模型并抽象用户模型并使用 - -- 更多参考文档 - - KCL Schema:[https://kusionstack.io/docs/reference/lang/lang/tour#schema](https://kusionstack.io/docs/reference/lang/lang/tour#schema) - - KCL OpenAPI 规范:[https://kusionstack.io/docs/reference/cli/openapi/spec](https://kusionstack.io/docs/reference/cli/openapi/spec) - - KCL Konfig 配置大库概览:[https://kusionstack.io/docs/reference/konfig/overview](https://kusionstack.io/docs/reference/konfig/overview) - -### 2.3 强约束校验避免配置错误 - -如图 11 所示,在 KCL 中可以通过丰富的强约束校验手段避免配置错误: -![](/img/blog/2022-09-15-declarative-config-overview/11-kcl-constraint.png) -图 11 KCL 强约束校验手段 - -- KCL 语言的类型系统被设计为静态的,类型和值定义分离,支持编译时类型推导和类型检查,静态类型不仅仅可以提前在编译时分析大部分的类型错误,还可以降低后端运行时的动态类型检查的性能损耗。此外,KCL Schema 结构的属性强制为非空,可以有效避免配置遗漏。 -- 当需要导出的 KCL 配置被声明之后,它们的类型和值均不能发生变化,这样的静态特性保证了配置不会被随意篡改。 -- KCL 支持通过结构体内置的校验规则进一步保障稳定性。比如对于如图 12 所示的 KCL 代码,,在 `App` 中定义对 `containerPort`、`services`、`volumes` 的校验规则,目前校验规则在运行时执行判断,后续 KCL 会尝试通过编译时的静态分析对规则进行判断从而发现问题。 - -![](/img/blog/2022-09-15-declarative-config-overview/12-kcl-app-schema.png) -图 12 带规则约束的 KCL 代码校验 - -### 2.4 分块编写及配置合并 - -KCL 提供了配置分块编写及自动合并配置的能力,并且支持幂等合并、补丁合并和唯一配置合并等策略。幂等合并中的多份配置需要满足交换律,并且需要开发人员手动处理基础配置和不同环境配置冲突。 补丁合并作为一个覆盖功能,包括覆盖、删除和添加。唯一的配置要求配置块是全局唯一的并且未修改或以任何形式重新定义。 KCL 通过多种合并策略简化了用户侧的协同开发,减少了配置之间的耦合。 - -如图 13 所示,对于存在基线配置、多环境和多租户的应用配置场景,有一个基本配置 base.k。 开发和 SRE 分别维护生产和开发环境的配置 base.k 和 prod.k,他们的配置互不影响,由 KCL 编译器合并成一个 prod 环境的等效配置代码。 -![](/img/blog/2022-09-15-declarative-config-overview/13-kcl-isolated-config.png) -图 13 多环境场景配置分块编写实例 - -### 2.5 自动化集成 - -在 KCL 中提供了很多自动化相关的能力,主要包括工具和多语言 API。 通过 `package_identifier : key_identifier`的模式支持对任意配置键值的索引,从而完成对任意键值的增删改查。比如图 14 所示修改某个应用配置的镜像内容,可以直接执行如下指令修改镜像,修改前后的 diff 如下图所示。 - -![](/img/blog/2022-09-15-declarative-config-overview/14-kcl-image-update.png) -图 14 使用 KCL CLI/API 自动修改应用配置镜像 - -此外,可以基于 KCL 的自动化能力实现如图 15 所示的一镜交付及自动化运维能力并集成到 CI/CD 当中。 -![](/img/blog/2022-09-15-declarative-config-overview/15-kcl-automation.png) -图 15 典型 KCL 自动化集成链路 - -## 三、KCL 与其他声明式配置的对比 - -### 3.1 vs. JSON/YAML - -YAML/JSON 配置等适合小规模的配置场景,对于大规模且需要频繁修改的云原生配置场景,比较适合 KCL 比较适合,其中涉及到主要差异是配置数据抽象与展开的差异: - -- 对于 JSON/YAML 等静态配置数据展开的好处是:简单、易读、易于处理,但是随着静态配置规模的增加,当配置规模较大时,JSON/YAML 等文件维护和阅读配置很困难,因为重要的配置信息被**淹没在了大量不相关的重复细节**中。 -- 对于使用 KCL 语言进行配置抽象的好处是:对于静态数据,抽象一层的好处这意味着整体系统具有**部署的灵活性**,不同的配置环境、配置租户、运行时可能会对静态数据具有不同的要求,甚至不同的组织可能有不同的规范和产品要求,可以使用 KCL 将最需要、最常修改的配置暴露给用户,对差异化的配置进行抽象,抽象的好处是可以支持不同的配置需求。并且借助 KCL 语言级别的自动化集成能力,还可以很好地支持不同的语言,不同的配置 UI 等。 - -### 3.2 vs. Kustomize - -Kustomize 的核心能力是其 Overlay 能力,并 Kustomize 支持文件级的覆盖,但是存在会存在多个覆盖链条的问题,因为找到具体字段值的声明并不能保证这是最终值,因为其他地方出现的另一个具体值可以覆盖它,对于复杂的场景,Kustomize 文件的继承链检索往往不如 KCL 代码继承链检索方便,需要仔细考虑指定的配置文件覆盖顺序。此外,Kustomize 不能解决 YAML 配置编写、配置约束校验和模型抽象与开发等问题,较为适用于简单的配置场景,当配置组件增多时,对于配置的修改仍然会陷入大量重复不相关的配置细节中,并且在 IDE 中不能很好地显示配置之间的依赖和覆盖关系情况,只能通过搜索/替换等批量修改配置。 - -在 KCL 中,配置合并的操作可以细粒度到代码中每一个配置字段,并且可以灵活的设置合并策略,并不局限于资源整体,并且通过 KCL 的 import 可以静态分析出配置之间的依赖关系。 - -### 3.3 vs. HCL - -#### 3.3.1 功能对比 - -| | HCL | KCL | -| --- | --- | --- | -| 建模能力 | 通过 Terraform Go Provider Schema 定义,在用户界面不直接感知,此外编写复杂的 object 和必选/可选字段定义时用户界面较为繁琐 | 通过 KCL Schema 进行建模,通过语言级别的工程和部分面向对象特性,可以实现较高的模型抽象 | -| 约束能力 | 通过 Variable 的 condition 字段对动态参数进行约束,Resource 本身的约束需要通过 Go Provider Schema 定义或者结合 Sentinel/Rego 等策略语言完成,语言本身的完整能力不能自闭环,且实现方式不统一 | 以 Schema 为核心,在进行建模的同时定义其约束,在 KCL 内部自闭环并一统一方式实现,支持多种约束函数编写,支持可选/必选字段定义 | -| 扩展性 | Terraform HCL 通过分文件进行 Override, 模式比较固定,能力受限。| KCL 可以自定义配置分块编写方式和多种合并策略,可以满足复杂的多租户、多环境配置场景需求 | -| 语言化编写能力 | 编写复杂的对象定义和必选/可选字段定义时用户界面较为繁琐 | 复杂的结构定义、约束场景编写简单,不借助其他外围 GPL 或工具,语言编写自闭环 | - -#### 3.3.2 举例 - -**Terraform HCL Variable 约束校验编写 vs. KCL Schema 声明式约束校验编写** - -- HCL - -```python -variable "subnet_delegations" { - type = list(object({ - name = string - service_delegation = object({ - name = string - actions = list(string) - }) - })) - default = null - validation { - condition = var.subnet_delegations == null ? true : alltrue([for d in var.subnet_delegations : (d != null)]) - } - validation { - condition = var.subnet_delegations == null ? true : alltrue([for n in var.subnet_delegations.*.name : (n != null)]) - } - validation { - condition = var.subnet_delegations == null ? true : alltrue([for d in var.subnet_delegations.*.service_delegation : (d != null)]) - } - validation { - condition = var.subnet_delegations == null ? true : alltrue([for n in var.subnet_delegations.*.service_delegation.name : (n != null)]) - } -} -``` - -- KCL - -```python -schema SubnetDelegation: - name: str - service_delegation: ServiceDelegation - -schema ServiceDelegation: - name: str - actions?: [str] # 使用 ? 标记可选属性 - -subnet_delegations: [SubnetDelegation] = option("subnet_delegations") -``` - -此外,KCL 还可以像高级语言一样写类型,写继承,写内置的约束,这些功能是 HCL 所不具备的 - -- KCL 定义复杂的类型继承和约束 - -```python -schema Person: - firstName: str - lastName: str - -schema Employee(Person): - jobTitle: str - -employee = Employee { - firstName = "Alice" - lastName = "White" - jobTitle = "engineer" -} -``` - -**Terraform HCL 函数 vs. KCL Lambda 函数编写 + Plugin 函数编写** - -- 正如 [https://www.terraform.io/language/functions](https://www.terraform.io/language/functions) 文档和 [https://github.com/hashicorp/terraform/issues/27696](https://github.com/hashicorp/terraform/issues/27696) 中展示的那样,Terraform HCL 提供了丰富的内置函数用于提供,但是并不支持用户在 Terraform 中使用 HCL 自定义函数 (或者需要编写复杂的 Go Provider 来模拟一个用户的本地自定义函数);而 KCL 不仅支持用户使用 lambda 关键字直接在 KCL 代码中自定义函数,还支持使用 Python, Go 等语言为 KCL [编写插件函数](https://kusionstack.io/docs/reference/lang/plugin/overview) - -- KCL 自定义定义函数并调用 - -```python -add_func = lambda x: int, y: int -> int { - x + y -} -two = add_func(1, 1) # 2 -``` - -- KCL 使用 Python 编写的插件函数 - - Python 代码 (hello/plugin.py) - -```python -# Copyright 2020 The KCL Authors. All rights reserved. - -INFO = { - 'name': 'hello', - 'describe': 'hello doc', - 'long_describe': 'long describe', - 'version': '0.0.1', -} - -def add(a: int, b: int) -> int: - """add two numbers, and return result""" - return a + b -``` - - - KCL 调用插件代码 - -```python -import kcl_plugin.hello - -two = hello.add(1, 1) -``` - -**HCL 删除 null 值与 KCL 使用 -n 编译参数删除 null 值** - -- HCL - -```python -variable "conf" { - type = object({ - description = string - name = string - namespace = string - params = list(object({ - default = optional(string) - description = string - name = string - type = string - })) - resources = optional(object({ - inputs = optional(list(object({ - name = string - type = string - }))) - outputs = optional(list(object({ - name = string - type = string - }))) - })) - results = optional(list(object({ - name = string - description = string - }))) - steps = list(object({ - args = optional(list(string)) - command = optional(list(string)) - env = optional(list(object({ - name = string - value = string - }))) - image = string - name = string - resources = optional(object({ - limits = optional(object({ - cpu = string - memory = string - })) - requests = optional(object({ - cpu = string - memory = string - })) - })) - script = optional(string) - workingDir = string - })) - }) -} - -locals { - conf = merge( - defaults(var.conf, {}), - { for k, v in var.conf : k => v if v != null }, - { resources = { for k, v in var.conf.resources : k => v if v != null } }, - { steps = [for step in var.conf.steps : merge( - { resources = {} }, - { for k, v in step : k => v if v != null }, - )] }, - ) -} -``` - -- KCL (编译参数添加 -n 忽略 null 值) - -```python -schema Param: - default?: str - name: str - -schema Resource: - cpu: str - memory: str - -schema Step: - args?: [str] - command?: [str] - env?: {str:str} - image: str - name: str - resources?: {"limits" | "requests": Resource} - script?: str - workingDir: str - -schema K8sManifest: - name: str - namespace: str - params: [Param] - results?: [str] - steps: [Step] - -conf: K8sManifest = option("conf") -``` - -综上可以看出,在 KCL 中,通过 Schema 来声明方式定义其类型和约束,可以看出相比于 Terraform HCL, 在实现相同功能的情况下,KCL 的约束可以编写的更加简单 (不需要像 Terraform 那样重复地书写 validation 和 condition 字段),并且额外提供了字段设置为可选的能力 (`?`运算符,不像 Terraform 配置字段默认可空,KCL Schema 字段默认必选),结构更加分明,并且可以在代码层面直接获得类型检查和约束校验的能力。 - -### 3.4 vs. CUE - -#### 3.4.1 功能对比 - -| | CUE | KCL | -| --- | --- | --- | -| 建模能力 | 通过 Struct 进行建模,无继承等特性,当模型定义之间无冲突时可以实现较高的抽象。由于 CUE 在运行时进行所有的约束检查,在大规模建模场景可能存在性能瓶颈 | 通过 KCL Schema 进行建模,通过语言级别的工程和部分面向对象特性(如单继承),可以实现较高的模型抽象。 KCL 是静态编译型语言,对于大规模建模场景开销较小 | -| 约束能力 | CUE 将类型和值合并到一个概念中,通过各种语法简化了约束的编写,比如不需要泛型和枚举,求和类型和空值合并都是一回事 | KCL 提供了跟更丰富的 check 声明式约束语法,编写起来更加容易,对于一些配置字段组合约束编写更加简单(能力上比 CUE 多了 if guard 组合约束,all/any/map/filter 等集合约束编写方式,编写更加容易) | -| 分块编写能力 | 支持语言内部配置合并,CUE 的配置合并是完全幂等的,对于满足复杂的多租户、多环境配置场景的覆盖需求可能无法满足 | KCL 可以自定义配置分块编写方式和多种合并策略,KCL 同时支持幂等和非幂等的合并策略,可以满足复杂的多租户、多环境配置场景需求 | -| 语言化编写能力 | 对于复杂的循环、条件约束场景编写复杂,对于需要进行配置精确修改的编写场景较为繁琐 | 复杂的结构定义、循环、条件约束场景编写简单 | - -#### 3.4.2 举例 - -**CUE 约束校验编写 vs. KCL Schema 声明式约束校验编写及配置分块编写能力** - -CUE (执行命令 `cue export base.cue prod.cue`) - -- base.cue - -```cue -// base.cue -import "list" - -#App: { - domainType: "Standard" | "Customized" | "Global", - containerPort: >=1 & <=65535, - volumes: [...#Volume], - services: [...#Service], -} - -#Service: { - clusterIP: string, - type: string, - - if type == "ClusterIP" { - clusterIP: "None" - } -} - -#Volume: { - container: string | *"*" // The default value of `container` is "*" - mountPath: string, - _check: false & list.Contains(["/", "/boot", "/home", "dev", "/etc", "/root"], mountPath), -} - -app: #App & { - domainType: "Standard", - containerPort: 80, - volumes: [ - { - mountPath: "/tmp" - } - ], - services: [ - { - clusterIP: "None", - type: "ClusterIP" - } - ] -} - -``` - -- prod.cue - -```python -// prod.cue -app: #App & { - containerPort: 8080, // error: app.containerPort: conflicting values 8080 and 80: -} -``` - -KCL (执行命令 `kcl base.k prod.k`) - -- base.k - -```python -# base.k -schema App: - domainType: "Standard" | "Customized" | "Global" - containerPort: int - volumes: [Volume] - services: [Service] - - check: - 1 <= containerPort <= 65535 - -schema Service: - clusterIP: str - $type: str - - check: - clusterIP == "None" if $type == "ClusterIP" - -schema Volume: - container: str = "*" # The default value of `container` is "*" - mountPath: str - - check: - mountPath not in ["/", "/boot", "/home", "dev", "/etc", "/root"] - -app: App { - domainType = "Standard" - containerPort = 80 - volumes = [ - { - mountPath = "/tmp" - } - ] - services = [ - { - clusterIP = "None" - $type = "ClusterIP" - } - ] -} - -``` - -- prod.k - -```python -# prod.k -app: App { - # 可以使用 = 属性运算符对 base app 的 containerPort 进行修改 - containerPort = 8080 - # 可以使用 += 属性运算符对 base app 的 volumes 进行添加 - # 此处表示在 prod 环境增加一个 volume, 一共两个 volume - volumes += [ - { - mountPath = "/tmp2" - } - ] -} -``` - -此外由于 CUE 的幂等合并特性,在场景上并无法使用类似 kustomize 的 overlay 配置覆盖添加能力,比如上述的 base.cue 和 prod.cue 一起编译会报错。 - -### 3.5 Performance - -在代码规模较大或者计算量较高的场景情况下 KCL 比 CUE/Jsonnet/HCL 等语言性能更好 (CUE 等语言受限于运行时约束检查开销,而 KCL 是一个静态编译型语言) - -- CUE (test.cue) - -```cue -import "list" - -temp: { - for i, _ in list.Range(0, 10000, 1) { - "a\(i)": list.Max([1, 2]) - } -} -``` - -- KCL (test.k) - -```python -a = lambda x: int, y: int -> int { - max([x, y]) -} -temp = {"a${i}": a(1, 2) for i in range(10000)} -``` - -- Jsonnet (test.jsonnet) - -```jsonnet -local a(x, y) = std.max(x, y); -{ - temp: [a(1, 2) for i in std.range(0, 10000)], -} -``` - -- Terraform HCL (test.tf, 由于 terraform range 函数只支持最多 1024 个迭代器,将 range(10000) 拆分为 10 个子 range) - -```python -output "r1" { - value = {for s in range(0, 1000) : format("a%d", s) => max(1, 2)} -} -output "r2" { - value = {for s in range(1000, 2000) : format("a%d", s) => max(1, 2)} -} -output "r3" { - value = {for s in range(1000, 2000) : format("a%d", s) => max(1, 2)} -} -output "r4" { - value = {for s in range(2000, 3000) : format("a%d", s) => max(1, 2)} -} -output "r5" { - value = {for s in range(3000, 4000) : format("a%d", s) => max(1, 2)} -} -output "r6" { - value = {for s in range(5000, 6000) : format("a%d", s) => max(1, 2)} -} -output "r7" { - value = {for s in range(6000, 7000) : format("a%d", s) => max(1, 2)} -} -output "r8" { - value = {for s in range(7000, 8000) : format("a%d", s) => max(1, 2)} -} -output "r9" { - value = {for s in range(8000, 9000) : format("a%d", s) => max(1, 2)} -} -output "r10" { - value = {for s in range(9000, 10000) : format("a%d", s) => max(1, 2)} -} -``` - -- 运行时间(考虑到生产环境的实际资源开销,本次测试以单核为准) - -| 环境 | KCL v0.4.3 运行时间 (包含编译+运行时间) | CUE v0.4.3 运行时间 (包含编译+运行时间) | Jsonnet v0.18.0 运行时间 (包含编译+运行时间) | HCL in Terraform v1.3.0 运行时间 (包含编译+运行时间) | -| --- | --- | --- | --- | --- | -| OS: macOS 10.15.7; CPU: Intel(R) Core(TM) i7-8850H CPU @ 2.60GHz; Memory: 32 GB 2400 MHz DDR4; 不开启 NUMA | 440 ms (kclvm_cli run test.k) | 6290 ms (cue export test.cue) | 1890 ms (jsonnet test.jsonnet) | 1774 ms (terraform plan -parallelism=1)| - -综上可以看出:CUE 和 KCL 均可以覆盖到绝大多数配置校验场景,并且均支持属性类型定义、配置默认值、约束校验等编写,但是 CUE 对于不同的约束条件场景无统一的写法,且不能很好地透出校验错误,KCL 使用 check 关键字作统一处理,支持用户自定义错误输出。 - -## 四、KCL 核心实现原理 - -### 4.1 KCL 技术架构 - -![](/img/blog/2022-09-15-declarative-config-overview/16-kcl-compiler-arch.png) -图 16 KCL 编译器架构 - -如图 16 所示,KCLVM 编译器实现以 Spec 作为驱动(主要包含 KCL 语言规范,KCL 多语言集成规范、KCL OpenAPI 规范),KCL 是一个编译型的语言,整体保持与常规语言编译器相同的三段式架构,并在其中借助了 LLVM-IR 作为 KCL 连接 Native/WASM 代码的中间纽带,主要有以下 3 个核心工作: - -- KCL AST 到 LLVM-IR 的翻译:通过遍历 KCL AST,根据 KCL 语言规范生成相应的 LLVM-IR 代码,相当于用户态代码。 -- KCL Runtime Lib:运行时辅助函数库,提供运行时 KCL 的值/类型计算、内存、上下文管理、内置库和插件库支持,相当于系统态代码。 -- 用户态与系统态代码链接与执行:将用户态代码与系统态代码链接为一个动态链接库/WASM Module,最终通过统一的 Runner 模块执行编译后的 KCL 代码。 - -此外 KCL 在语义检查器和和 Plugin 这块做了增强支持: - -- Resolver - - 静态类型推导与检查:可以在编译时进行类型推导和检查,避免运行时类型检查开销,可以作为 IDE 插件跳转、补全等功能支持和语义 API 构建(如 schema 模型查询、依赖分析等) 的良好基础 - - 配置图合并:在编译过程对配置数据依赖图进行构建与合并,最终运行时仅进行少量计算即可得到最终的解 - - 语义依赖图:通过内建语义依赖图,KCL 可以完成配置变更的依赖分析, 并且根据配置变更结果进行增量编译,对不变的配置进行缓存,可以提升端到端编译性能 - - Schema 为中心的面向对象特性:KCL 语言只保留了单继承的语法。同时 schema 可以通过 mixin 和 protocol 等特性混入复用相同的代码片段,对于不同的能力配套,可以通过 mixin 机制编写,并通过 mixin 声明的方式“混入”到不同的结构体中 -- Plugin: 可以使用 Python/Go 编写扩展库,主要包含一些领域能力,如访问网络或数据库等。 - -### 4.2 KCL 配置图模型 - -![](/img/blog/2022-09-15-declarative-config-overview/17-kcl-graph-unification.png) -图 17 KCL 配置图模型 - -图 17 示出了 KCL 内部的配置图模型,首先 KCL 代码在编译过程中形成两张图(用户不同配置直接的引用和从属关系一般形式一张有向无环图),分别对应结构体内部声明代码及结构体使用声明代码。编译过程可以简单分为三步。 - -- 首先定义平台侧的结构体并形成结构体内部声明代码图 -- 其次声明并合并不同用户侧配置代码图 -- 最后将用户侧配置代码图计算的结果代入平台侧结构体内部声明代码图求解,最终得到完整配置图定义 - -通过这样简单的计算过程,可以在编译时完成大部分代换运算,最终运行时仅进行少量计算即可得到最终的解。同时在编译合并图过程中仍然能够执行类型检查和值的检查,区别是类型检查是做泛化、取偏序上确界(检查某个变量的值是否满足既定类型或者既定类型的子类型),值检查是做特化、取偏序下确界(比如将两个字典合并为一个字典)。 - -## 五、小结 - -文本对声明式配置技术做了整体概述,其中重点阐述了 KCL 概念、核心设计、使用场景以及与其他配置语言的对比,期望帮助大家更好的理解声明式配置技术及 KCL 语言。更多 KusionStack 的概念、背景、设计与用户案例相关的内容,欢迎访问 https://kusionstack.io/ - -## 六、参考 - -- KusionStack Cloud Native Configuration Practice Blog: [https://kusionstack.io/blog/2021-kusion-intro](https://kusionstack.io/blog/2021-kusion-intro) -- Terraform Language: [https://www.terraform.io/language](https://www.terraform.io/language) -- Terraform Provider Kubernetes: [https://github.com/hashicorp/terraform-provider-kubernetes](https://github.com/hashicorp/terraform-provider-kubernetes) -- Terraform Provider AWS: [https://github.com/hashicorp/terraform-provider-aws](https://github.com/hashicorp/terraform-provider-aws) -- Pulumi: [https://www.pulumi.com/docs/](https://www.pulumi.com/docs/) -- Pulumi vs. Terraform: [https://www.pulumi.com/docs/intro/vs/terraform/](https://www.pulumi.com/docs/intro/vs/terraform/) -- Google SRE Work Book Configuration Design: [https://sre.google/workbook/configuration-design/](https://sre.google/workbook/configuration-design/) -- Google Borg Paper: [https://storage.googleapis.com/pub-tools-public-publication-data/pdf/43438.pdf](https://storage.googleapis.com/pub-tools-public-publication-data/pdf/43438.pdf) -- Holistic Configuration Management at Facebook: [https://sigops.org/s/conferences/sosp/2015/current/2015-Monterey/printable/008-tang.pdf](https://sigops.org/s/conferences/sosp/2015/current/2015-Monterey/printable/008-tang.pdf) -- JSON Spec: [https://www.json.org/json-en.html](https://www.json.org/json-en.html) -- YAML Spec: [https://yaml.org/spec/](https://yaml.org/spec/) -- GCL: [https://github.com/rix0rrr/gcl](https://github.com/rix0rrr/gcl) -- HCL: [https://github.com/hashicorp/hcl](https://github.com/hashicorp/hcl) -- CUE: [https://github.com/cue-lang/cue](https://github.com/cue-lang/cue) -- Jsonnet: [https://github.com/google/jsonnet](https://github.com/google/jsonnet) -- Dhall: [https://github.com/dhall-lang/dhall-lang](https://github.com/dhall-lang/dhall-lang) -- Thrift: [https://github.com/Thriftpy/thriftpy2](https://github.com/Thriftpy/thriftpy2) -- Kustomize: [https://kustomize.io/](https://kustomize.io/) -- Kube-linter: [https://github.com/stackrox/kube-linter](https://github.com/stackrox/kube-linter) -- Checkov: [https://github.com/bridgecrewio/checkov](https://github.com/bridgecrewio/checkov) -- KCL Documents: [https://kusionstack.io/docs/reference/lang/lang/tour](https://kusionstack.io/docs/reference/lang/lang/tour) -- How Terraform Works: A Visual Intro: [https://betterprogramming.pub/how-terraform-works-a-visual-intro-6328cddbe067](https://betterprogramming.pub/how-terraform-works-a-visual-intro-6328cddbe067) -- How Terraform Works: Modules Illustrated: [https://awstip.com/terraform-modules-illustrate-26cbc48be83a](https://awstip.com/terraform-modules-illustrate-26cbc48be83a) -- Helm: [https://helm.sh/](https://helm.sh/) -- Helm vs. Kustomize: [https://harness.io/blog/helm-vs-kustomize](https://harness.io/blog/helm-vs-kustomize) -- KubeVela: [https://kubevela.io/docs/](https://kubevela.io/docs/) diff --git a/i18n/zh-CN/docusaurus-plugin-content-blog/2022-09-16-learn-from-scale-practice/index.md b/i18n/zh-CN/docusaurus-plugin-content-blog/2022-09-16-learn-from-scale-practice/index.md deleted file mode 100644 index 79745234..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-blog/2022-09-16-learn-from-scale-practice/index.md +++ /dev/null @@ -1,121 +0,0 @@ ---- -slug: 2022-learn-from-scale-practice -title: 从规模化平台工程实践,我们学到了什么 -authors: - name: 朵晓东 - title: Kusion Creator -tags: [KusionStack, Kusion] ---- - -**摘要**:本文尝试从平台工程、专用语言、分治、建模、自动化和协同文化等几个角度阐述规模化平台工程实践中的挑战和最佳实践。希望通过把我们平台工程的理念和实践分享给更多企业和团队,一起让一些有意思的变化发生。 - -本文基于 [KusionStack](https://kusionstack.io/docs/user_docs/intro/kusion-intro) 技术栈在蚂蚁平台工程及自动化中的实践总结而成。 - - -## 1. 平台工程:让企业级 DevOps 发生 - -DevOps 理念在 10 多年前被提出,从 KVM 到容器再到云原生时代,大量企业投入 DevOps 运动以期望解决内部规模化运维效率和平台建设效率的困境。其中大部分陷入过某种基于对 DevOps 朴素认知的 Anti-Pattern,同时也有部分公司探索出自己的路径。我经历过如下图简示的 Anti-Patterns,Dev 与 Ops 团队各行其是,或者简单的强制 Dev 团队独立完成 Ops 工作。在[这里](https://web.devopstopologies.com/#anti-types)可以找到更多更典型分类。 - -![](/img/blog/2022-09-16-learn-from-scale-practice/devops-anti-pattern.png) - -企业内规模化 DevOps 难以推行的原因多种多样,特别是在企业内自持基础设施、同时采用云上技术平台的公司阻力最大。其中以这几种情况尤为常见: - -- 研发团队和运维团队由于部门墙、领导者缺少洞察等等原因各自为政,难以达成一致意见 -- 研发团队低估了基础设施技术、运维、稳定性工作的专业性、复杂性和快速变化,以朴素的 DevOps 理解强制应用研发者成为专家 -- 领导者建立了专职的 DevOps 团队,但沦为中间的执行者,没能让 Dev 和 Ops 团队各自向前一步,紧密协同 -- 平台研发团队对规模化带来的业务复杂性以及技术演进带来的技术复杂性应对不足,无法对应用研发者提供有效的技术支撑 -- ... - -不同于面向云上托管基础设施服务和 DevOps-as-a-Service 产品工作的小型团队,中大型企业往往需要根据自身团队架构和文化建立适当的 DevOps 体系。从成功案例看,无论是 Meta 公司由 Dev 完全承担 Ops 职能,还是 Google 公司引入 SRE 团队作为中间层,平台工程([Platform Engineering](https://platformengineering.org/blog/what-is-platform-engineering))都扮演了非常重要的角色。平台工程旨在帮助企业构建面向应用研发者的自服务运维体系,尝试通过工程化的技术手段和工作流程解决以下关键问题: - -- 设计合理的抽象层次,帮助应用研发者降低对 Infra、platform 等技术以及运维、稳定性工作的认知负担 -- 为应用研发者提供统一的工作界面和空间,避免用户陷入割裂的平台产品界面、复杂的工作流中 -- 帮助研发者通过有效的工作流程和推荐路径基于 [内部工程平台](https://internaldeveloperplatform.org/what-is-an-internal-developer-platform/) 快速开展工作 -- 帮助研发者通过配套的 CI、CD、CDRA 等产品自服务管理应用生命周期 -- 帮助平台产品研发团队简单、高效、一致的开放其平台基础能力 -- 通过培训、布道、运营等手段营造协同工作和分享的文化 - -事实上,不是所有人都应该或者能够成为这个领域的专家,这非常困难!实际上平台技术团队的专家通常也仅擅长自己的专业领域而已,特别是在云原生理念和技术广泛应用的今天,面向大量高度开放、可配置的平台技术带来的成百上千的应用配置,PaaS 领域的业务复杂性,以及高稳定性和统一治理的要求,而平台工程的目的正是为了让应用研发者尽可能简单无痛的参与到这样规模化的 DevOps 工作中。在蚂蚁的实践中,我们更趋向于以下这种合作状态,在团队架构和工作模式上更靠近 Google 的最佳实践。平台研发者及 SRE 成为 “Enabler” 支持应用研发者自服务的完成研发及交付运维,同时应用研发者使其应用可交付运维的工作结果也成为运维人员可以接手应用运维工作的基础。最终 SRE、应用研发及运维人员把工作过程中的问题和痛点反馈给平台研发者形成正向循环。 - -![](/img/blog/2022-09-16-learn-from-scale-practice/devops-cycle.png) - - -## 2. 专用语言:工程化方式的一极 - -有什么比一种专用语言更适合开放的、自服务的、面向领域业务的问题定义,同时需要满足自动化、低安全风险、低噪音、易治理的企业内部要求吗?正如记录音乐有五线谱,存储时间序列数据有时序数据库一样,在平台工程的特定问题域内,一批配置和策略语言用于编写和管理规模化复杂配置及策略。不同于混合编写范式、混合工程能力的高级通用语言,这类专用语言的核心逻辑是以收敛的有限的语法、语义集合解决领域问题近乎无限的变化和复杂性,将规模化复杂配置和策略编写思路和方式沉淀到语言特性中。 - -在蚂蚁的平台工程实践中,我们强化了客户端的工作方式,将围绕应用运维生命周期的模型、编排、约束和策略稳定、可扩展的通过专用语言 [KCL](https://github.com/KusionStack/KCLVM) 编写维护在共享仓库 [Konfig](https://github.com/KusionStack/konfig) 中。 KCL 是一种面向有编程能力的应用研发者的静态强类型语言,提供现代高级语言的编写体验和围绕领域目的有限功能。在平台工程实践中 KCL 不是一种仅用于编写 K-V 对的语言,而是一种面向平台工程领域的专用语言。应用研发者、SRE、平台研发者面向 Konfig 协同研发,通过 KCL 原生功能编写应用配置,以及在 PaaS 领域更为高频和复杂的[模型](https://kusionstack.io/docs/reference/lang/lang/tour/#schema)抽象、[功能函数](https://kusionstack.io/docs/reference/lang/lang/tour/#function)和[约束](https://kusionstack.io/docs/reference/lang/lang/tour/#validation)[规则](https://kusionstack.io/docs/reference/lang/lang/tour/#rule),即编写稳定、可扩展的业务模型、业务逻辑、防错约束和环境规则。Konfig 仓库则成为统一的编程界面,工作空间和业务层载体,而基于 KCL 的安全、低噪音、低副作用、一致的编写范式更有利于长期管理和治理。 - -![](/img/blog/2022-09-16-learn-from-scale-practice/kcl-dev.png) - - -## 3. 分治:解构规模化问题 - -分治思路是解决规模化问题的钥匙,从 MapReduce 到 Kubernetes 无不体现其功效。在规模化交付运维领域,经典运维平台试图在统一的黑盒平台产品中,以内置的统一模型、编排、provision 技术来应对全量业务场景。这样的实践可以快速启动,在小范围内奏效,但随着不同业务主体采用率提升引入差异化需求,同时随着持续变化的平台技术逐渐进入疲态。 - -![](/img/blog/2022-09-16-learn-from-scale-practice/classic-plats.png) - -在蚂蚁的实践中,Konfig monorepo 是内部工程平台向研发者开放的编程界面和工作空间,帮助应用研发者以统一的编程界面编写围绕应用运维生命周期的配置和策略,从而编排和使用存量和新增的平台基础设施,按需创建管理云原生环境以及基于 RBAC 的权限,并通过 GitOps 方式管理交付过程。Konfig monorepo 为不同场景、项目、应用提供了独立的白盒的编程空间,其内生的扩展性来源于: - -- 灵活、可扩展、独立的客户端的 [工程结构设计](https://kusionstack.io/docs/user_docs/concepts/konfig) -- 独立配置块 [自动合并技术](https://kusionstack.io/docs/reference/lang/lang/tour/#-operators-1)支持任意分块、可扩展的配置块组织 -- [静态类型系统](https://kusionstack.io/docs/reference/lang/lang/tour/#type-system)技术提供现代编程语言可复用、可扩展的类型化建模和约束功能 -- 项目粒度的 GitOps CI 工作流程定义支持 -- 基于 [Kusion](https://github.com/KusionStack/kusion) 引擎的 provision 技术选择 - -Konfig monorepo 提供了分治的、可组合的工程结构设计、代码组织、建模方式、工作流程定义和 provision 技术选择支持,同时又以一致的研发模式和工作流承载了可扩展的业务需求。这样客户端的工作方式在保证灵活性、可扩展性、可移植性的同时也降低了对服务端扩展机制,如 Kubernetes API Machinery,持续增长的压力。 - -下图示意了一种 Konfig monorepo 中 GitOps 方式的典型的自动化工作流程,从面向应用的代码变更开始,通过可配置的 CI、CD 过程触达运行时,这样的机制相比中心化的黑盒产品方式更加开放、可定制、可扩展,也免去了针对不同业务场景、不同项目、应用设计笨拙的配置文件管理 portal 的必要。 - -![](/img/blog/2022-09-16-learn-from-scale-practice/d-c-overview.png) - - -## 4. 建模:边际收益和长尾问题 - -有了分治的白盒化的工程结构设计、代码组织方式、建模方式、工作流程定义和 provision 技术选择,以怎么的策略面向平台 API 工作是另一个需要考虑的问题。在企业内典型的争议在于直面平台细节还是设计一种抽象,或者上升到显式(explicit)和隐式(implict)的理念的争议。 - -抽象的隐式的方式是运维平台工程师们面向非专家型终端用户的普遍选择,他们希望能设计出易于理解、便于使用的应用模型或 Spec 抽象,与具体的平台技术细节隔离,降低用户认知负担,并通过降低细节感知防错。但大部分运维平台的研发者倾向于设计一种强大的、统一的应用模型或 Spec 抽象,在实践中往往会遇到这些阻碍: - -- 随着企业内不同业务主体采用率的提升,统一建模难以落地。在蚂蚁内部最典型的案例是 Infra 基础技术类组件和 SaaS 应用间的巨大差异性,SaaS 应用便于统一,Infra 应用往往需要单独设计。 -- 面向企业内大量的平台技术,统一模型自身难以稳定,特别是应对持续变化的业务需求和平台技术驱动的需求增长。在蚂蚁的实践中,交付运维受多种因素影响有较强的不稳定性,同时围绕应用的 deliverable、runtime、security、instrumentation 的业务需求也在增长。以 instrumentation 为例,近两年对应用运行时可观察性、SLO 定义的需求快速增长直接驱动了终端用户使用的变化。 -- 抽象模型的共性问题是需要面向用户设计出合理的模型,面向平台 API 细节保持同步。 - -在蚂蚁的实践中,面向终端用户即应用研发者我们采用了抽象模型的方式,通过如下思路解决几个关键问题: - -- 面向典型应用场景(如蚂蚁的 Sofa 应用)建模,这些模型由平台研发者、平台 SRE 主导开发,与应用研发者共同维护,达到用户体验、成本和标准兼容的平衡,在蚂蚁的实践中抽象模型的信息熵收敛比约为 1:5,通过广泛的高频使用保证建模投入的边际收益。 -- 对于非典型用户场景或应用,由平台研发者、平台 SRE 支持应用研发者完成针对应用的模型设计。KCL [schema](https://kusionstack.io/docs/reference/lang/lang/tour/#schema) 和 [mixin](https://kusionstack.io/docs/reference/lang/lang/tour#protocol--mixin) 等机制帮助用户建模、抽象、继承、组合、复用,减少重复代码,事实上这样的建模设计工作也是应用 PaaS 领域的重点之一,但对于这样的场景我们需要更合理的分工。最终大量 “非标” 平台技术在蚂蚁内部首次以一致的方式被纳管,有效解决了长尾问题。在典型协同模式下,平台研发者、平台 SRE 编写平台能力基础组件成为 “Enabler”,帮助应用研发者使用平台能力基础组件快速“搭积木”,完成其应用模型的研发工作。 -- 面向平台技术,我们提供了平台 API Spec 到 KCL 类型代码的[生成技术](https://kusionstack.io/docs/reference/cli/openapi/),并通过[组合编译技术](https://kusionstack.io/docs/reference/lang/lang/tour/#multi-file-compilation)原生支持对不同 Kubernetes API 版本的编译时选择,在内部实践中解决了应用抽象模型面向不同版本 Kubernetes 集群工作的灵活需求。同时,KCL 支持 [in-schema 约束](https://kusionstack.io/docs/reference/lang/lang/tour/#validation)和独立环境[规则](https://kusionstack.io/docs/reference/lang/lang/tour/#rule)的编写。此外,KCL 还提供了 [deprecated 装饰器](https://kusionstack.io/docs/reference/lang/lang/tour/#decorators)支持对已下线模型或模型属性的标注。通过在客户端健壮的、完备的模型和约束机制,在编译时暴露如配置错误、类型漂移等常见问题。相对于运行时左移的发现问题,避免推进到集群时发生运行时错误或故障,这也是企业内,特别是高风险等级企业,对生产环境稳定性的必须要求。 - -对于基础平台技术的专家型用户,他们通常非常熟悉特定的技术领域,更希望以直面平台细节的显式的方式工作,语言提供必要的动态性和模块化支持,通过类型和约束机制保证稳定性。但这种显式的方式无法解决专家用户不熟悉跨领域平台技术使用细节的问题,也不能解决面向平台技术的扩展性和复杂性叠加的问题。在蚂蚁内部小范围基于 YAML 的显式的工程实践中,面向大量高度开放、可配置的平台技术,复杂性随着平台技术使用率持续叠加,最终陷入难以阅读、编写、约束、测试及维护的僵化状态。 - -## 5. 自动化:新的挑战 - -运维自动化是基础设施运维领域的经典技术范畴,随着云原生理念及技术的推波助澜,可以被自动化集成成为企业运维实践的基本要求,开源开放、高度可配置的 CI、CD 技术逐步被企业采纳,黑盒的、无法被集成的 “产品” 方式逐步被灵活的可编排方式弱化并替代。这种实践的主要优势在于其强大的自定义编排和链接能力,高度的可扩展性和良好的可移植性。特别是在 Kubernetes 生态,GitOps 方式有更高的采用率,与可配置的 CI、CD 技术有天然的亲和性。这样的变化也在推进以工单和运维产品为中心的工作流逐步转变为以工程效率平台为中心的自服务工作流,而生产环境的运维能力则成为了工作流中面向生产自动运维的一个重要环节。在开源社区,面向不同研发效率平台的抽象层技术创新也在活跃进行中,平台侧研发者希望通过最短的认知和实践路径打通应用到云环境的 CI、CD 过程。 - -在蚂蚁的工程实践中,工程效率平台深度参与了 Konfig monorepo 的开放自动化实践,我们的实践方向也与工程效率平台技术演进方向高度一致。在从几人到几十人再到几百人的协同工作中,面向运维场景的工作流设计,高频的代码提交和 pipelines 执行,实时自动化测试和部署过程,这些对服务于单库的工程效率平台造成了很多的挑战。特别是 monorepo 中多样化的业务需要独立且强大的工作流自定义和操作支持,也需要高实时性、强 SLO 保障的并行的工作流执行能力,这些需求与单库模式的需求有巨大的差异,也给我们制造了很多麻烦。大部分配置语言是解释型语言,而 KCL 被设计为一种编译型语言,由 Rust、C、LLVM 优化器实现,以达到对规模化 KCL 文件提供[高性能](https://kusionstack.io/blog/2022-declarative-config-overview#35-performance)编译和运行时执行的目标,同时支持编译到本地码和 wasm 以满足不同运行时的执行要求。另外 Git 的存储及架构设计不同于 [Citc/Piper](https://cacm.acm.org/magazines/2016/7/204032-why-google-stores-billions-of-lines-of-code-in-a-single-repository/fulltext) 架构,不适用于规模化代码的 monorepo,所幸今天我们的代码量还没有遇到很大的问题。我们正在一起工作解决这些问题,希望随着实践的深入逐步解决他们。 - -## 6. 协同和文化:更重要的事 - -以上的技术、工具、机制都非常重要,但我必须要说,对于工程化、Devops 更重要的是团体与团队的协同、合作和分享的文化,因为这是一种由人组成的工作,人和文化是其中的关键。在企业内,如果部门墙、团队壁垒丛生,流行封闭糟糕的工程文化,我们通常会看到大量私有的代码库和私有文档,小群体的判断和工作方式,本该紧密合作的团队以各自目标为导向各行其是,在这样的文化下我认为一切规模化工作都会非常困难。所以如果你所在的公司或团队想采纳规模化 Devops,我认为最重要的是做好广泛的沟通并开始文化的建设,因为这绝对不只是几个人的事,并且这很有难度且不可控。 - -在蚂蚁的实践中,初期总有各种各样的困难,大家对自服务机制和协同文化的担心尤为突出,例如 “我居然要写代码?” “我的代码居然跟其他团队在一个仓库里?” ,“我负责的工作可不简单,这种方式行不通” 都是很典型的担忧。所幸我们最终建立了一个面向共同目标的虚拟组织,合作方和领导者给予了充分的支持,我们在理念和工作方式上达成一致并协同工作。在实践过程中,大多数工程师并不是障碍,当然他们会吐槽技术、流程和机制还不够完善,希望获得更好的体验,这无可厚非。真正的障碍首先来自于运维平台研发团队自身,我看到一些公司的 Devops 理想最终回归到运维平台团队替代应用研发者做掉所有工作,甚至不让用户接触到代码和工具链这些生产工具,急于隐藏于已有的 GUI 产品界面,我认为这跑偏了,也低估了用户自身的能力和创造力。另外障碍也来自于部分平台技术团队的技术负责人,他们很难放下持续多年的已有工作,难以接受转向到新的用户服务模式。可行的办法是让他们明白这项工作的意义和远景,逐步的分阶段的影响他们。 - -## 7. 小结 - -经过一年多的实践,有 400+ 研发者直接研发参与了 Konfig monorepo 的代码贡献,管理了超过 1500 个 projects,其中平台研发者及平台 SRE 与应用研发者比例不到 1:9,这些应用研发者有些是应用 owner 本人,有些是应用研发团队的代表,这由应用团队自己决定。通过持续的自动化能力搭建,基于 Konfig monorepo 每天发生 200-300 次 commits,其中大部分是自动化的代码修改,以及大约 1k pipeline 任务执行和近 10k KCL 编译执行。在今天如果将 Konfig 中全量代码编译一次并输出会产生 300W+ 行 YAML 文本,事实上一次发布运维过程中需要多次不同参数组合的编译过程。通过轻量化,便于移植的代码库和工具链,我们完成了一次意义重大的外部专有云交付,免去了改造、移植输出一系列老旧运维平台的痛苦。在蚂蚁内部我们服务了几种不同的运维场景,正在扩大应用规模并探索更多的可能性。 - -最后我想说一说下一步的计划,我们的技术和工具在易用性和体验上还有很大的提升空间,需要更多的用户反馈和持续的改进,用户体验工作没有快速路径。在测试方面,我们提供了简单的集成测试手段,起到了冒烟测试的作用,但这还不够,我们正在尝试基于约束、规则而非测试的方式保证正确性。在工作界面方面,我们希望构建基于 IDE 的线下工作空间,持续规约、优化内部线上产品体验和工作流程。同时我们希望持续提升覆盖范围和技术能力。另外我们也希望将实践方式更广泛的应用在 CI 构建,自动化运维等场景,缩短终端用户的信息感知和端到端工作流程。目前KusionStack还处于刚刚开源的非常早期的阶段,在未来还有大量的工作要做。最重要的是我们希望把我们平台工程的理念和实践分享给更多企业和团队,一起推动并见证一些有意思的变化发生。 - -## 8. 引用 - -- [https://kusionstack.io/docs/user_docs/intro/kusion-intro](https://kusionstack.io/docs/user_docs/intro/kusion-intro) -- [https://platformengineering.org/blog/what-is-platform-engineering](https://platformengineering.org/blog/what-is-platform-engineering) -- [https://internaldeveloperplatform.org/what-is-an-internal-developer-platform/](https://internaldeveloperplatform.org/what-is-an-internal-developer-platform/) -- [https://web.devopstopologies.com/#anti-types](https://web.devopstopologies.com/#anti-types) -- [https://github.com/KusionStack/kusion](https://github.com/KusionStack/kusion) -- [https://github.com/KusionStack/KCLVM](https://github.com/KusionStack/KCLVM) -- [https://kusionstack.io/docs/reference/lang/lang/tour](https://kusionstack.io/docs/reference/lang/lang/tour/#%E9%85%8D%E7%BD%AE%E6%93%8D%E4%BD%9C) -- [https://kusionstack.io/docs/user_docs/concepts/konfig](https://kusionstack.io/docs/user_docs/concepts/konfig) -- [https://kusionstack.io/blog/2022-declarative-config-overview#35-performance](https://kusionstack.io/blog/2022-declarative-config-overview#35-performance) -- [https://cacm.acm.org/magazines/2016/7/204032-why-google-stores-billions-of-lines-of-code-in-a-single-repository/fulltext](https://cacm.acm.org/magazines/2016/7/204032-why-google-stores-billions-of-lines-of-code-in-a-single-repository/fulltext) - diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current.json b/i18n/zh-CN/docusaurus-plugin-content-docs/current.json deleted file mode 100644 index 961e9b27..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current.json +++ /dev/null @@ -1,310 +0,0 @@ -{ - "version.label": { - "message": "Next", - "description": "The label for version current" - }, - "sidebar.docs.category.kusion_kubernetes": { - "message": "kusion_kubernetes", - "description": "The label for category kusion_kubernetes in sidebar docs" - }, - "sidebar.docs.category.api": { - "message": "api", - "description": "The label for category api in sidebar docs" - }, - "sidebar.docs.category.admissionregistration": { - "message": "admissionregistration", - "description": "The label for category admissionregistration in sidebar docs" - }, - "sidebar.docs.category.v1": { - "message": "v1", - "description": "The label for category v1 in sidebar docs" - }, - "sidebar.docs.category.apps": { - "message": "apps", - "description": "The label for category apps in sidebar docs" - }, - "sidebar.docs.category.autoscaling": { - "message": "autoscaling", - "description": "The label for category autoscaling in sidebar docs" - }, - "sidebar.docs.category.batch": { - "message": "batch", - "description": "The label for category batch in sidebar docs" - }, - "sidebar.docs.category.v1beta1": { - "message": "v1beta1", - "description": "The label for category v1beta1 in sidebar docs" - }, - "sidebar.docs.category.core": { - "message": "core", - "description": "The label for category core in sidebar docs" - }, - "sidebar.docs.category.networking": { - "message": "networking", - "description": "The label for category networking in sidebar docs" - }, - "sidebar.docs.category.rbac": { - "message": "rbac", - "description": "The label for category rbac in sidebar docs" - }, - "sidebar.docs.category.apimachinery": { - "message": "apimachinery", - "description": "The label for category apimachinery in sidebar docs" - }, - "sidebar.docs.category.apis": { - "message": "apis", - "description": "The label for category apis in sidebar docs" - }, - "sidebar.docs.category.kusion_models": { - "message": "kusion_models", - "description": "The label for category kusion_models in sidebar docs" - }, - "sidebar.docs.category.kube": { - "message": "kube", - "description": "The label for category kube in sidebar docs" - }, - "sidebar.docs.category.frontend": { - "message": "frontend", - "description": "The label for category frontend in sidebar docs" - }, - "sidebar.docs.category.common": { - "message": "common", - "description": "The label for category common in sidebar docs" - }, - "sidebar.docs.category.configmap": { - "message": "configmap", - "description": "The label for category configmap in sidebar docs" - }, - "sidebar.docs.category.container": { - "message": "container", - "description": "The label for category container in sidebar docs" - }, - "sidebar.docs.category.env": { - "message": "env", - "description": "The label for category env in sidebar docs" - }, - "sidebar.docs.category.lifecycle": { - "message": "lifecycle", - "description": "The label for category lifecycle in sidebar docs" - }, - "sidebar.docs.category.port": { - "message": "port", - "description": "The label for category port in sidebar docs" - }, - "sidebar.docs.category.probe": { - "message": "probe", - "description": "The label for category probe in sidebar docs" - }, - "sidebar.docs.category.ingress": { - "message": "ingress", - "description": "The label for category ingress in sidebar docs" - }, - "sidebar.docs.category.resource": { - "message": "resource", - "description": "The label for category resource in sidebar docs" - }, - "sidebar.docs.category.secret": { - "message": "secret", - "description": "The label for category secret in sidebar docs" - }, - "sidebar.docs.category.service": { - "message": "service", - "description": "The label for category service in sidebar docs" - }, - "sidebar.docs.category.serviceaccount": { - "message": "serviceaccount", - "description": "The label for category serviceaccount in sidebar docs" - }, - "sidebar.docs.category.sidecar": { - "message": "sidecar", - "description": "The label for category sidecar in sidebar docs" - }, - "sidebar.docs.category.strategy": { - "message": "strategy", - "description": "The label for category strategy in sidebar docs" - }, - "sidebar.docs.category.volume": { - "message": "volume", - "description": "The label for category volume in sidebar docs" - }, - "sidebar.docs.category.develop": { - "message": "develop", - "description": "The label for category develop in sidebar docs" - }, - "sidebar.docs.category.governance": { - "message": "governance", - "description": "The label for category governance in sidebar docs" - }, - "sidebar.docs.category.user_docs": { - "message": "user_docs", - "description": "The label for category user_docs in sidebar docs" - }, - "sidebar.docs.category.Kubernetes": { - "message": "Kubernetes", - "description": "The label for category Kubernetes in sidebar docs" - }, - "sidebar.docs.category.ArgoCD": { - "message": "ArgoCD", - "description": "The label for category ArgoCD in sidebar docs" - }, - "sidebar.user_docs.category.Introduction": { - "message": "简介", - "description": "The label for category Introduction in sidebar user_docs" - }, - "sidebar.user_docs.category.Get Started": { - "message": "快速开始", - "description": "The label for category Get Started in sidebar user_docs" - }, - "sidebar.user_docs.category.Download & Install": { - "message": "下载和安装", - "description": "The label for category 下载和安装 in sidebar user_docs" - }, - "sidebar.user_docs.category.Architecture & Concepts": { - "message": "架构 & 概念", - "description": "The label for category Architecture & Concepts in sidebar user_docs" - }, - "sidebar.user_docs.category.Adopting KusionStack": { - "message": "开始 Kusion", - "description": "The label for category 开始 Kusion in sidebar user_docs" - }, - "sidebar.user_docs.category.Kubernetes": { - "message": "Kubernetes", - "description": "The label for category Kubernetes in sidebar user_docs" - }, - "sidebar.user_docs.category.Project Best Practices": { - "message": "项目组织最佳实践", - "description": "The label for category Project Best Practices in sidebar user_docs" - }, - "sidebar.user_docs.category.ArgoCD": { - "message": "ArgoCD", - "description": "The label for category ArgoCD in sidebar user_docs" - }, - "sidebar.user_docs.category.Manage Sensitive Information": { - "message": "敏感信息管理", - "description": "The label for category Manage Sensitive Information in sidebar user_docs" - }, - "sidebar.reference.category.kusion_kubernetes": { - "message": "kusion_kubernetes", - "description": "The label for category kusion_kubernetes in sidebar reference" - }, - "sidebar.reference.category.api": { - "message": "api", - "description": "The label for category api in sidebar reference" - }, - "sidebar.reference.category.admissionregistration": { - "message": "admissionregistration", - "description": "The label for category admissionregistration in sidebar reference" - }, - "sidebar.reference.category.v1": { - "message": "v1", - "description": "The label for category v1 in sidebar reference" - }, - "sidebar.reference.category.apps": { - "message": "apps", - "description": "The label for category apps in sidebar reference" - }, - "sidebar.reference.category.autoscaling": { - "message": "autoscaling", - "description": "The label for category autoscaling in sidebar reference" - }, - "sidebar.reference.category.batch": { - "message": "batch", - "description": "The label for category batch in sidebar reference" - }, - "sidebar.reference.category.v1beta1": { - "message": "v1beta1", - "description": "The label for category v1beta1 in sidebar reference" - }, - "sidebar.reference.category.core": { - "message": "core", - "description": "The label for category core in sidebar reference" - }, - "sidebar.reference.category.networking": { - "message": "networking", - "description": "The label for category networking in sidebar reference" - }, - "sidebar.reference.category.rbac": { - "message": "rbac", - "description": "The label for category rbac in sidebar reference" - }, - "sidebar.reference.category.apimachinery": { - "message": "apimachinery", - "description": "The label for category apimachinery in sidebar reference" - }, - "sidebar.reference.category.apis": { - "message": "apis", - "description": "The label for category apis in sidebar reference" - }, - "sidebar.reference.category.kusion_models": { - "message": "kusion_models", - "description": "The label for category kusion_models in sidebar reference" - }, - "sidebar.reference.category.kube": { - "message": "kube", - "description": "The label for category kube in sidebar reference" - }, - "sidebar.reference.category.frontend": { - "message": "frontend", - "description": "The label for category frontend in sidebar reference" - }, - "sidebar.reference.category.common": { - "message": "common", - "description": "The label for category common in sidebar reference" - }, - "sidebar.reference.category.configmap": { - "message": "configmap", - "description": "The label for category configmap in sidebar reference" - }, - "sidebar.reference.category.container": { - "message": "container", - "description": "The label for category container in sidebar reference" - }, - "sidebar.reference.category.env": { - "message": "env", - "description": "The label for category env in sidebar reference" - }, - "sidebar.reference.category.lifecycle": { - "message": "lifecycle", - "description": "The label for category lifecycle in sidebar reference" - }, - "sidebar.reference.category.port": { - "message": "port", - "description": "The label for category port in sidebar reference" - }, - "sidebar.reference.category.probe": { - "message": "probe", - "description": "The label for category probe in sidebar reference" - }, - "sidebar.reference.category.ingress": { - "message": "ingress", - "description": "The label for category ingress in sidebar reference" - }, - "sidebar.reference.category.resource": { - "message": "resource", - "description": "The label for category resource in sidebar reference" - }, - "sidebar.reference.category.secret": { - "message": "secret", - "description": "The label for category secret in sidebar reference" - }, - "sidebar.reference.category.service": { - "message": "service", - "description": "The label for category service in sidebar reference" - }, - "sidebar.reference.category.serviceaccount": { - "message": "serviceaccount", - "description": "The label for category serviceaccount in sidebar reference" - }, - "sidebar.reference.category.sidecar": { - "message": "sidecar", - "description": "The label for category sidecar in sidebar reference" - }, - "sidebar.reference.category.strategy": { - "message": "strategy", - "description": "The label for category strategy in sidebar reference" - }, - "sidebar.reference.category.volume": { - "message": "volume", - "description": "The label for category volume in sidebar reference" - } -} diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/develop/build-docs/_category_.json b/i18n/zh-CN/docusaurus-plugin-content-docs/current/develop/build-docs/_category_.json deleted file mode 100644 index 97ba09e8..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/develop/build-docs/_category_.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "label": "构建文档", - "position": 2 -} diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/develop/build-docs/build-docs.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/develop/build-docs/build-docs.md deleted file mode 100644 index 6ccf4e1b..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/develop/build-docs/build-docs.md +++ /dev/null @@ -1,90 +0,0 @@ -# 构建文档 - -Kusion 的文档采用 [Docusaurus](https://docusaurus.io/) 框架构建,Docusaurus 是基于 React 构建的站点生成器。在构建之前先安装 [Node.js 16+](https://nodejs.org)。 - -文档仓库:https://github.com/KusionStack/kusionstack.io - -## 1. 克隆仓库 - -然后克隆文档仓库到本地: - -``` -$ git clone git@github.com:KusionStack/kusionstack.io.git -``` - -Markdown 格式的文档主要在 docs 和 blog 两个目录,目录对应的内容说明如下: - -- `/docs` - 文档根目录 -- `/docs/user_docs` - 使用文档,针对 Kusion 使用者 -- `/docs/develop` - 开发文档,针对 Kusion 项目本身开发和完善 -- `/docs/referece` - 参考手册,工具、语言、模型的参考 -- `/docs/governance` - 治理,开源社区、路线规划等 -- `/blog` - 博客文章 - -## 2. 本地预览 - -预览和构建之前需要先执行 `npm install` 命令安装 Node.js 依赖的包,然后执行 `npm run start` 命令启动本地预览: - -``` -$ npm install -$ npm run start - -> website@0.1.0 start -> docusaurus start - -[INFO] Starting the development server... -[SUCCESS] Docusaurus website is running at http://localhost:3000/. - -✔ Client - Compiled successfully in 3.84s - -client (webpack 5.69.1) compiled successfully - -█ -``` - -该命令会通过默认浏览器打开 http://localhost:3000 页面。左上角的导航栏有:使用文档、开发文档、内部文档、参考手册、治理和博客按钮,分别对应前文对应的目录。右上角对应多语言、文档仓库和主题切换按钮。主体页面是 Kusion 一句话简介和快速开始的按钮链接,下面是 KCL 配置语言、Kusion 模型库和 Kusion 引擎的介绍。 - - -## 3. 构建发布 - -同样需要先执行 `npm install` 命令安装 Node.js 依赖的包(至少执行一次),然后通过 `npm run build` 构建最终的页面资源: - -``` -$ npm run build - -> website@0.1.0 build -> docusaurus build - -[INFO] [zh-CN] Creating an optimized production build... - -█ -``` - -构建是会有更严格的检查,比如内部的坏链接会输出红色的错误信息、橘黄色输出警告信息。对于测试测试,如果遇到比较多的坏链接,可以先将 `docusaurus.config.js` 文件中的 `onBrokenLinks` 和 `onBrokenMarkdownLinks` 设置为 `"ignore"` 关闭。产生的文件输出到 `build` 目录,该目录可以直接部署发布。 - - -## 4. 配置文件 - -配置文件有文档配置、侧边栏和内部文档几个: - -- [docusaurus.config.js](https://github.com/KusionStack/kusionstack.io/blob/main/docusaurus.config.js) 是 [Docusaurus](https://docusaurus.io/) 的主配置文件。 -- [sidebars.js](https://github.com/KusionStack/kusionstack.io/blob/main/sidebars.js) 对应文档的侧边栏配置,被 [docusaurus.config.js](https://github.com/KusionStack/kusionstack.io/blob/main/docusaurus.config.js) 文件引用。 - -## 5. 主页面内容 - -主页面内容由以下文件构建产生: - -- [docusaurus.config.js](https://github.com/KusionStack/kusionstack.io/blob/main/docusaurus.config.js) 是 [Docusaurus](https://docusaurus.io/) 的主配置文件,包含顶部的导航栏和底部的链接。 -- [src/pages/index.js](https://github.com/KusionStack/kusionstack.io/blob/main/src/pages/index.js) 对应页面主体区域,包含快速开始的链接按钮。 -- [src/components/HomepageFeatures.js](https://github.com/KusionStack/kusionstack.io/blob/main/src/components/HomepageFeatures.js) 对应 Kusion 的特性介绍。 - -## 6. 内部链接 - -网址内部的相对链接可以通过 Markdown 文件的相对路径映射,比如 [`/docs/develop/build-docs`](https://github.com/KusionStack/kusionstack.io/docs/develop/build-docs) 文件中可以通过 [`/docs/user_docs/intro/kusion-intro`](/docs/user_docs/intro/kusion-intro) 绝对路径或 [`../repos`](../repos) 相对路径引用。 - -注意:目录内部的 `index.md` 或与目录同名的 Markdonwn 文件对应目录链接的页面。 - -## 7. 更新模型文档 - -Konfig 中的 [模型文档](/docs/reference/model) 是从 KCL 代码,通过 [docgen](/docs/reference/cli/kcl/docgen) 工具自动提取产生(比如自动生成的 [ConfigMap](/docs/reference/model/kusion_models/kube/frontend/configmap/doc_configmap) 模型文档)。如果希望完善模型文档,首先需要到 [Konfig 代码仓库](https://github.com/KusionStack/konfig) 添加或更新文档,然后重新生成文档(目前还不能自动同步,有兴趣的用户可以尝试 [提供帮助](/docs/governance/contribute/contribute-docs))。 diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/develop/build-from-source/_category_.json b/i18n/zh-CN/docusaurus-plugin-content-docs/current/develop/build-from-source/_category_.json deleted file mode 100644 index c33b8244..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/develop/build-from-source/_category_.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "label": "构建 Kusion", - "position": 2 -} diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/develop/build-from-source/build-from-source.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/develop/build-from-source/build-from-source.md deleted file mode 100644 index 7904e07e..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/develop/build-from-source/build-from-source.md +++ /dev/null @@ -1,3 +0,0 @@ -# 构建 KusionStack - -当用户希望自己修改底层代码并查看效果时,希望从代码构建 KusionStack 工具。从代码构建不仅仅是开发 KusionStack 项目的必要条件,也是正常二进制打包和发布的必经流程。 diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/develop/build-from-source/docker.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/develop/build-from-source/docker.md deleted file mode 100644 index 6871f420..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/develop/build-from-source/docker.md +++ /dev/null @@ -1,184 +0,0 @@ ---- -sidebar_position: 1 ---- - -# Docker 和 Ubuntu 环境 - -KusionStack 主要工具以 Rust、Golang 等语言为主开发,KCL 语言通过 Python 提供一些扩展插件,同时还需要依赖 Git、Makefile 等工具。为了方便配置开发环境,我们提供了基于 ubuntu:20.04 的 Dockerfile 配置文件:https://github.com/KusionStack/KCLVM/blob/main/Dockerfile 。用户可以基于该自行构建镜像,也可以通过 `docker pull kusionstack/kclvm-builder-ubuntu` 命令拉取镜像。 - -如果是本地的 Ubuntu 环境,可以参考 Dockerfile 文件的命令安装依赖环境。 - -注意: -1. 本地除了 Docker 之外,还需要有 Bash 和 GMake 等工具 -2. macOS m1 系统对 Docker 的支持还有待完善,构建时可能遇到阻塞等问题 - -## 1. 构建 KCLVM - -KCLVM 是 Kusion 中 KCL 配置语言的实现,通过以下命令克隆 KCLVM 代码到一个新的目录: - -``` -$ git clone git@github.com:KusionStack/KCLVM.git -``` - -然后在命令行环境切换到 KCLVM 代码根目录,执行以下命令: - -``` -$ make sh-in-docker -root@ubuntu:~/kclvm# pwd -/root/kclvm -``` - -以上命令会将宿主机器当前的 KCLVM 目录映射到容器中的 `/root/kclvm` 目录,同时进入 Bash 环境。 - -然后通过 `run.sh` 脚本构建 CPython: - -``` -root@ubuntu:~/kclvm# ./run.sh -1) build 3) build-kclvm 5) test 7) lint-check 9) release-arm64 -2) build-cpython 4) update-kclvm 6) format 8) release -Please select the action: 2 -... -``` - -选择 2 进行 CPython 构建,构建时间几十分钟不等。构建的结果在 `/root/kclvm/_build/dist/ubuntu/cpython` 目录,CPython 只需要构建一次。 - -然后是构建 KCLVM 的 Python 和 Rust 版本,同时安装依赖的包(包括依赖的插件等): - -``` -root@ubuntu:~/kclvm# ./run.sh -1) build 3) build-kclvm 5) test 7) lint-check 9) release-arm64 -2) build-cpython 4) update-kclvm 6) format 8) release -Please select the action: 3 -... -``` - -构建结果在 `/root/kclvm/_build/dist/ubuntu/kclvm` 目录,其中插件在 `plugins` 子目录,二进制程序在 `bin` 子目录。将 `/root/kclvm/_build/dist/ubuntu/kclvm/bin` 目录添加到 `PATH` 环境变量,然后输入 `which kcl` 或 `kcl -h` 测试 KCL 命令行。 - -然后编译执行 `/root/kclvm/hello.k` 配置程序: - -``` -root@ubuntu:~/kclvm# kcl hello.k -name: kcl -age: 1 -two: 2 -x0: - name: kcl - age: 1 -x1: - name: kcl - age: 101 -``` - -一切正常就说明构建成功了。 - -## 2. 构建 kclvm-go 和 kcl-go - -kclvm-go 是基于 KCLVM 命令包装的 Go 语言 SDK,上层的 Kusion 命令也是通过 `kclvm-go` 使用 KCLVM 的功能。`kcl-go` 是基于 `kclvm-go` SDK,采用 Go 语言实现了一个命令行工具,其中包含 KCL 语言的 Playground 和单元测试等功能。 - -在 Docker 镜像中已经安装了 Go 版本,可以通过以下命令查看。 - -``` -root@ubuntu:~/kclvm# go version -go version go1.16.3 linux/amd64 -``` - -克隆 `kclvm-go` 仓库: - -``` -root@ubuntu:~/kclvm# cd -root@ubuntu:~# git clone git@github.com:KusionStack/kclvm-go.git -``` - -然后执行 `kclvm-go/examples/hello/main.go`: - -``` -root@ubuntu:~# cd kclvm-go -root@ubuntu:~/kclvm-go# go run ./examples/hello -age: 1 -name: kcl -two: 2 -x0: - age: 1 - name: kcl -x1: - age: 101 - name: kcl -``` - -测试程序正常运行说明 `kclvm-go` 的构建已经成功了。现在可以执行更复杂的 `kcl-go` 命令: - -``` -root@ubuntu:~/kclvm-go# go run ./cmds/kcl-go -NAME: - kcl-go - K Configuration Language Virtual Machine - -USAGE: - kcl-go - kcl-go [global options] command [command options] [arguments...] - - kcl-go kcl -h - kcl-go -h -... -``` - -也可以通过 `kcl-go` 命令行执行 `kclvm-go/hello.k`: - -``` -root@ubuntu:~/kclvm-go# go run ./cmds/kcl-go run hello.k -age: 1 -name: kcl -two: 2 -x0: - age: 1 - name: kcl -x1: - age: 101 - name: kcl -``` - -或者通过 `go run ./cmds/kcl-go play` 启动 Playground 服务,然后浏览器打开 http://127.0.0.1:2021 页面测试。 - -## 3. 构建 Kusion 命令 - -Kusion 是更上层的工具集合,其核心命令是采用 Go 语言实现,底层和 KCLVM 的交互是通过 `kclvm-go` 包完成。 - -克隆 Kusion 仓库: - -``` -root@ubuntu:~/kclvm# cd -root@ubuntu:~# git clone git@github.com:KusionStack/kusion.git -``` - -然后执行 `kusion/cmds/kusionctl` 程序: - -``` -root@ubuntu:~/kusion# go run ./cmd/kusionctl -kusion 作为云原生可编程技术栈,通过代码管理 kubernetes 集群。 -... -``` - -正常情况可以看到 kusion 命令的帮助信息。 - -## 4. KCLOpenapi - -KCLOpenapi 是 KCL 语言版本的 OpenAPI 工具,完整采用 Go 语言实现。因此可以在任何一个安装了 Go 1.16+ 的环境编译。 - -``` -$ git clone git@github.com:KusionStack/kcl-openapi.git -$ cd kcl-openapi -$ go run ./cmd/swagger -h -Usage: - swagger [OPTIONS] - -Swagger tries to support you as best as possible when building APIs. - -It aims to represent the contract of your API with a language agnostic -description of your application in json or yaml. -... -``` - -正常可以看到帮助信息。 - -## 5. 其它 - -KCLVM、KusionCtl 是本地开发需要经常构建的仓库,目前因为开发资源和时间的原因导致开发文档还不够完善,希望社区同学多多反馈共同参与完善。此外还有 VSCode 插件等外围工具的构建,用户可以参考仓库内部实现代码和文档操作。 diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/develop/build-from-source/windows.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/develop/build-from-source/windows.md deleted file mode 100644 index 200c5748..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/develop/build-from-source/windows.md +++ /dev/null @@ -1,92 +0,0 @@ ---- -sidebar_position: 4 ---- - -# Windows/X64 环境 - -Windows 是最流行的桌面系统,有着庞大的用户群体,但是默认缺少完整的开发环境。如果需要在 Windows 下构建 KusionStack 工具,首先需要安装开发环境。 - -假设是 Windows/X64 环境,首先安装以下命令: - -1. 安装 VC2019,确保,默认的 C++ 工具都已经安装好 - - https://visualstudio.microsoft.com/zh-hans/downloads/ -1. 安装 Rust 和 cargo - - https://forge.rust-lang.org/infra/other-installation-methods.html - - 安装 x86_64-pc-windows-msvc 版本,配套 MSVC 编译器(待确认) -1. 安装 Go1.16+,必要时可以根据网络环境配置代理服务 - - https://go.dev/dl/ -1. 安装 TDM-GCC-x64 工具 - - https://jmeubank.github.io/tdm-gcc/download/ -1. 安装 LLVM-12.0.1-win64 - - https://github.com/PLC-lang/llvm-package-windows/releases/tag/v12.0.1 - - 设置 `LLVM_SYS_120_PREFIX` 和 `LLVM_SYS_70_PREFIX` 环境变量为安装的目录 -1. 打开 VS2019-x64 命令行 - -## 1. 构建 KCLVM - -KCLVM 是 Kusion 中 KCL 配置语言的实现,通过以下命令克隆 KCLVM 代码到一个新的目录,地址:`git@github.com:KusionStack/KCLVM.git`。 - -然后在 VS2019-x64 命令行环境切换到 `KCLVM` 目录执行 `cargo build` 测试 Rust 等环境。 - -然后在 VS2019-x64 命令行环境切换到 `.\Scripts\build-windows` 目录,执行 `build.bat` 批处理脚本进行构建。输出的文件在 `.\Scripts\build-windows\_output\kclvm-windows` 目录。 - -构建成功后通过以下命令测试 KCL 命令: - -``` -_output\kclvm-windows\kclvm.exe -m kclvm ..\..\hello.k -_output\kclvm-windows\kcl-go.exe run ..\..\hello.k -_output\kclvm-windows\kcl.exe ..\..\hello.k -``` - -一切正常就说明构建成功了。 - -## 2. 构建 kclvm-go 和 kcl-go - -kclvm-go 是基于 KCLVM 命令包装的 Go 语言 SDK,上层的 Kusion 命令也是通过 `kclvm-go` 使用 KCLVM 的功能。`kcl-go` 是基于 `kclvm-go` SDK,采用 Go 语言实现了一个命令行工具,其中包含 KCL 语言的 Playground 和单元测试等功能。 - -首先将 `kclvm.exe` 命令所在目录添加到 `PATH` 环境变量,然后重新登陆系统通过 `where kclvm` 命令检查是否可以找到 kclvm 命令。 - -然后克隆 kclvm-go 仓库,地址为:`git@github.com:KusionStack/kclvm-go.git`。然后进入 `kclvm-go` 命令执行以下命令: - -- `go run ./examples/hello` -- `go run ./cmds/kcl-go` - -测试程序正常运行说明 `kclvm-go` 的构建已经成功了。 - -也可以通过 `go run ./cmds/kcl-go run hello.k` 命令行执行 `kclvm-go/hello.k`,输出以下结果: - -```yaml -age: 1 -name: kcl -two: 2 -x0: - age: 1 - name: kcl -x1: - age: 101 - name: kcl -``` - -或者通过 `go run ./cmds/kcl-go play` 启动 Playground 服务,然后浏览器打开 http://127.0.0.1:2021 页面测试。 - - -## 3. 构建 Kusion 命令 - -Kusion 是更上层的工具集合,其核心命令是采用 Go 语言实现,底层和 KCLVM 的交互是通过 `kclvm-go` 包完成。 - -克隆 Kusion 仓库:`git@github.com:KusionStack/kusion.git` - -然后进入 kusion 目录执行 `go run ./cmd/kusionctl` 命令。正常情况可以看到 kusion 命令的帮助信息。 - -## 4. KCLOpenapi - -KCLOpenapi 是 KCL 语言版本的 OpenAPI 工具,仓库地址:`git@github.com:aKusionStack/kcl-openapi.git`。 - -KCLOpenapi 是纯 Go 语言实现的工具,按照正常的 Go 程序构建流行即可。 - -可以通过 `go run ./cmd/swagger -h` 查看命令的帮助信息。 - - -## 5. 其它 - -KCLVM、KusionCtl 是本地开发需要经常构建的仓库,目前因为开发资源和时间的原因导致开发文档还不够完善,希望社区同学多多反馈共同参与完善。此外还有 VSCode 插件等外围工具的构建,用户可以参考仓库内部实现代码和文档操作。 diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/develop/design/_category_.json b/i18n/zh-CN/docusaurus-plugin-content-docs/current/develop/design/_category_.json deleted file mode 100644 index 107c2084..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/develop/design/_category_.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "label": "设计与实现", - "position": 4 -} diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/develop/design/design.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/develop/design/design.md deleted file mode 100644 index 339d9791..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/develop/design/design.md +++ /dev/null @@ -1,3 +0,0 @@ -# 设计与实现 - -Kusion 内部设计与实现。 diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/develop/design/kclvm.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/develop/design/kclvm.md deleted file mode 100644 index c8da646b..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/develop/design/kclvm.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -sidebar_position: 99 ---- - -# KCLVM 整体架构 - -![](/img/docs/develop/design/kcl-tech-arch.png) - -KCLVM 编译器实现以 Spec 作为驱动(主要包含 KCL 语言规范,KCL 多语言集成规范、KCL OpenAPI 规范),KCL 是一个编译型的语言,整体保持与常规语言编译器相同的三段式架构,并在其中借助了 LLVM-IR 作为 KCL 连接 Native/WASM 代码的中间纽带,主要有以下 3 个核心工作: - -* KCL 代码到 LLVM-IR 的翻译:通过遍历 KCL AST,根据 KCL 语言规范生成相应的 LLVM-IR 代码,相当于用户态代码。 -* KCL Runtime Lib:运行时辅助函数库,提供运行时 KCL 的值/类型计算、内存、上下文管理、内置库和插件库支持,相当于系统态代码。 -* 用户态与系统态代码链接与执行:将用户态代码与系统态代码链接为一个动态链接库/WASM Module,最终通过统一的 Runner 模块执行编译后的 KCL 代码。 - -此外 KCL 在语义检查器和和 Plugin 这块做了增强支持: - -* Resolver - * **静态类型推导与检查**:可以在编译时进行类型推导和检查,避免运行时类型检查开销,可以作为 IDE 插件跳转、补全等功能支持和语义 API 构建(如 schema 模型查询、依赖分析等) 的良好基础 - * **配置图合并**:在编译过程对配置数据依赖图进行构建与合并,最终运行时仅进行少量计算即可得到最终的解 - * **语义依赖图**:通过内建语义依赖图,KCL 可以完成配置变更的依赖分析, 并且根据配置变更结果进行增量编译,对不变的配置进行缓存,可以提升端到端编译性能 - * **Schema 为中心的面向对象特性**:KCL 语言只保留了单继承的语法。同时 schema 可以通过 mixin 和 protocol 等特性混入复用相同的代码片段,对于不同的能力配套,可以通过 mixin 机制编写,并通过 mixin 声明的方式“混入”到不同的结构体中 -* Plugin: 可以使用 Python/Go 编写扩展库,主要包含一些领域能力,如访问网络或数据库等。 diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/develop/design/konfig.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/develop/design/konfig.md deleted file mode 100644 index ddb7eec5..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/develop/design/konfig.md +++ /dev/null @@ -1,82 +0,0 @@ ---- -sidebar_position: 4 ---- - -# Konfig 大库结构 - -本文主要解释 Konfig 配置大库的目录和代码结构,其中涉及的基本概念解释可见[《模型概览》](/docs/reference/model/overview)。 - -## 1. 整体结构 - -```bash -. -├── Makefile # 通过 Makefile 封装常用命令 -├── README.md # 配置大库说明 -├── appops # 应用运维目录,用来放置所有应用的 KCL 运维配置 -│ ├── guestbook-frontend -│ ├── http-echo -│ └── nginx-example -├── base # Kusion Model 模型库 -│ ├── examples # Kusion Model 样例代码 -│ │ ├── monitoring # 监控配置样例 -│ │ ├── native # Kubernetes 资源配置样例 -│ │ ├── provider # 基础资源配置样例 -│ │ └── server # 云原生应用运维配置模型样例 -│ └── pkg -│ ├── kusion_kubernetes # Kubernetes 底层模型库 -│ ├── kusion_models # 核心模型库 -│ ├── kusion_prometheus # Prometheus 底层模型库 -│ └── kusion_provider # 基础资源 底层模型库 -├── hack # 放置一些脚本 -└── kcl.mod # 大库配置文件,通常用来标识大库根目录位置以及大库所需依赖 -``` - -## 2. 核心模型库结构 - -核心模型库一般命名为 kusion_models,主要包含前端模型、后端模型、Mixin、渲染器等,目录结构为: - -```bash -├── commons # 基础资源核心模型库 -├── kube # 云原生资源核心模型库 -│ ├── backend # 后端模型 -│ ├── frontend # 前端模型 -│ │ ├── common # 通用前端模型 -│ │ ├── configmap # ConfigMap 前端模型 -│ │ ├── container # 容器前端模型 -│ │ ├── ingress # Ingress 前端模型 -│ │ ├── resource # 资源规格前端模型 -│ │ ├── secret # Secret 前端模型 -│ │ ├── service # Service 前端模型 -│ │ ├── sidecar # Sidecar 容器前端模型 -│ │ ├── strategy # 策略前端模型 -│ │ ├── volume # Volume 前端模型 -│ │ └── server.k # 云原生应用运维前端模型 -│ ├── metadata # 应用运维的元数据模型 -│ ├── mixins # 统一放置可复用的 Mixin -│ ├── render # 渲染器,把前后端模型联系在一起的桥梁 -│ ├── templates # 静态配置 -│ └── utils # 工具方法 -└── metadata # 通用元数据模型 -``` - -## 3. Project 和 Stack 结构 - -Project 和 Stack 的基本概念可见 [《Project & Stack》](/user_docs/concepts/konfig.md)。 - -Project 在配置大库的应用运维(appops)场景中对应的概念是「应用」,Stack 对应的概念是「环境」,更多映射关系可见[《映射关系》](/user_docs/guides/organizing-projects-stacks/mapping.md)。 - -本节以应用「nginx-example」为例,介绍 Project 和 Stack 在配置大库中的基本目录结构: - -```bash -├── README.md # Project 介绍文件 -├── base # 各环境通用配置 -│ └── base.k # 通用 KCL 配置 -├── dev # 环境特有配置 -│ ├── ci-test # 测试目录 -│ │ ├── settings.yaml # 测试数据 -│ │ └── stdout.golden.yaml # 测试期望结果 -│ ├── kcl.yaml # 多文件编译配置,是 KCL 编译的入口 -│ ├── main.k # 当前环境 KCL 配置 -│ └── stack.yaml # Stack 配置文件 -└── project.yaml # Project 配置文件 -``` diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/develop/kep.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/develop/kep.md deleted file mode 100644 index c1292ae5..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/develop/kep.md +++ /dev/null @@ -1,5 +0,0 @@ -# KEP 提案 - -KEP 是 KCL 语言增强建议(KCL Enhancement Proposals)的缩写。一个 KEP 是一份为 KCL 社区提供各种增强功能的技术规格,也是提交新特性,以便让社区指出问题,精确化技术文档的提案。可以参考 [KEP-1](https://github.com/KusionStack/KEP/blob/main/keps/KEP-0001.md) 文件格式书写。 - -KEP 仓库:https://github.com/KusionStack/KEP diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/develop/repos.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/develop/repos.md deleted file mode 100644 index fbebbee7..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/develop/repos.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -sidebar_position: 1 ---- - -# 仓库结构 - -## 1. 主要仓库 - -Kusion 的顶级仓库包含 Kusion 主仓库、Konfig 模型仓库、文档仓库等、IDE 扩展仓库、KCLVM 相关仓库等,关系如下图: - -![](/img/docs/develop/repos/repo-dag-01.png) - -- Kusion 主库:https://github.com/KusionStack/kusion -- Kusion 网站仓库:https://github.com/KusionStack/kusionstack.io -- Kusion 模型库:https://github.com/KusionStack/konfig -- KCLVM 主库:https://github.com/KusionStack/KCLVM -- IDE 扩展仓库:https://github.com/KusionStack/vscode-kcl - -## 2. 文档仓库 - - -文档相关的仓库关系如下: - -![](/img/docs/develop/repos/repo-dag-docs.png) - -文档主要包含网址的文档、相关的案例代码文档、语言规范文档和 Kusion 模型库自带的文档等。 - -- 文档主仓库:https://github.com/KusionStack/kusionstack.io -- 电子书:https://github.com/KusionStack/kusion-in-action-book - -## 3. KCLVM 仓库 - -其中 KCLVM 相关仓库是 KCL 配置语言、规范、工具等实现的仓库,其展开的子仓库关系如下: - -![](/img/docs/develop/repos/repo-dag-02.png) - -最上面提供 KCLVM 实现的多语言绑定接口,目前主要提供 Go、Python 等绑定,后续计划提供 Java、NodeJS 等更多的语言绑定。中间部分是 KCL 语言的实现和语言规范部分。此外、还有 KCL 语言的插件和配套的 IDE 插件等。 - -- KCLVM 主库:https://github.com/KusionStack/KCLVM -- kclvm-go:https://github.com/KusionStack/kclvm-go -- KCL 插件:https://github.com/KusionStack/kcl-plugin diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/events/2022/glcc.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/events/2022/glcc.md deleted file mode 100644 index 9c7d1dce..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/events/2022/glcc.md +++ /dev/null @@ -1 +0,0 @@ -# GitLink Code Camp diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/events/2022/kcl_paper.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/events/2022/kcl_paper.md deleted file mode 100644 index 1126ca4b..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/events/2022/kcl_paper.md +++ /dev/null @@ -1,13 +0,0 @@ -# KCL 论文被 SETTA 2022 会议录用 - -近日,由 KusionStack 团队成员撰写的关于 KCL 创新论文被 SETTA 2022 国际会议长文录用。 - -![](/img/docs/events/2022/kcl_paper_setta.png) - -Symposium on Dependable Software Engineering(以下简称 SETTA)可靠软件工程研讨会旨在将国际研究人员聚集在一起,就缩小形式化方法与软件工程之间的差距交流研究成果和想法。例如,将形式化技术和工具应用于工程大型系统(如网络物理系统 (CPS)、物联网 (IoT)、企业系统、基于云的系统等)。 - -此次被录用的论文为《KCL: A Declarative Language for Large-scale Configuration and Policy Management》,该论文的核心创新点是提出了 KCL 声明式语言、开发机制以及一致的工作流程。通过语言的建模及约束能力,可以提升运维开发过程中的多团队协作生产力以及效率,同时确保大规模配置和策略管理的稳定性。 - -此外,SETTA 2022 将在北京时间 10 月 27 日至 10 月 28 日举办线上会议,届时会分享 KCL 论文详细内容,欢迎加入 [KusionStack 社区](https://github.com/KusionStack/community) 进行围观。SETTA 2022 会议议程详情请参考:https://lcs.ios.ac.cn/setta2022/program.php。 - -注:目前 KCL 已在 Github 开源,欢迎访问 https://github.com/KusionStack/KCLVM 获得更多信息。 diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/governance/contribute/_category_.json b/i18n/zh-CN/docusaurus-plugin-content-docs/current/governance/contribute/_category_.json deleted file mode 100644 index 131bb8e3..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/governance/contribute/_category_.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "label": "贡献指南", - "position": 4 -} diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/governance/contribute/contribute-code.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/governance/contribute/contribute-code.md deleted file mode 100644 index fa859435..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/governance/contribute/contribute-code.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -sidebar_position: 2 ---- - -# 如何贡献代码 - -欢迎参与 Kusion 共建贡献完善代码、完善代码文档和测试,同时也欢迎通过 Issue 提供反馈。本文主要针对修改和完善已有的代码,如果是要增加 Konfig 新模型请先通过 Issue 或讨论中充分讨论,如果是希望增加 KCL 语言请通过 [KEP](/docs/develop/kep) 流程提交。 - -## 1. 代码和注释中的错别字 - -如果只是修改代码和注释中的错别字,不涉及代码逻辑的调整,那么可以直接在 Github 克隆仓库后直接修改并提交 PR。需要注意的是尽量保持代码风格一致。 - -## 2. 如何贡献 Kusion 模型库代码 - -- 先确保本地测试环境正常 -- 修改代码并补充测试 -- 本地测试通过后提交 PR - -## 3. 如何贡献 KusionCtl 代码 - -- 先确保本地测试环境正常 -- 修改代码并补充测试 -- 本地测试通过后提交 PR - -## 4. 如何贡献 KCLVM 代码 - -- 先确保本地测试环境正常 -- 修改代码并补充测试 -- 本地测试通过后提交 PR - -## 5. 如何贡献 VS Code 插件代码 - -请参考 VS Code 插件仓库的相关文档 - -## 6. 开发流程相关代码 - -欢迎通过 Issue 和讨论组讨论。 diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/governance/contribute/contribute-docs.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/governance/contribute/contribute-docs.md deleted file mode 100644 index b512c511..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/governance/contribute/contribute-docs.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -sidebar_position: 1 ---- - -# 如何贡献文档 - -本文主要针对已有的文档做局部修改。如果是投稿博客文章、添加新的文档或者调整文档目录结构请先联系团队成员。 - -Kusion 文档分为用户指南、开发文档、内部文档、参考手册和博客文章等,他们的区别如下: - -- 用户指南:对应使用文档,是让用户以最小的代价快速使用 Kusion 工具完整工作,不要涉及太多的内部原理和实现 -- 开发文档:内部是怎么实现的,主要针对希望了解 Kusion 原理和参与贡献和开发的同学 -- 内部文档:针对企业用户的一些内部场景定制的文档 -- 参考手册:Kusion 工具、KCL 配置语言、模型库 等全部特性的文档,内容覆盖最广但比较琐碎 -- 博客文章:没有特别的限制,可以是针对某些具体的场景、某些技术点或者是整体发展展望等分享文章 - -在贡献不同类型的文档时,最好能够结合上面的定位对不同的内容做一些适当的裁剪,给读者最佳体验。 - -## 1. 基本规范 - -- 除标题外,内部小标题尽量带编号,便于阅读 -- 工具自动输出的文档需要由到源代码的链接,小标题可以不带编号 -- 尽量不要贴大段的代码(30行以内),代码最好给出文字解释和对应的参考链接 -- 有图有真相,但是不推荐过度复杂的架构图 -- 内部链接:采用 [`/docs/user_docs/intro/kusion-intro`](/docs/user_docs/intro/kusion-intro) 绝对路径形式 - -**标点和空格** - -- 在中文的文档中优先使用中文的标点 -- 中文和英文之间需要增加 1 个空格 -- 中文和数字之间需要增加 1 个空格 -- 中文使用全角标点,标点前后均不添加空格 -- 英文内容使用半角标点,标点后面加 1 个空格 -- 链接前后需要保留一个空格,但是段落开头和中文全角标点附近不用添加空格。 - -**图片和资源文件名** - -- 文件名和目录名只能用数字、英文字母、下划线 `_` 和减号 `-` 组成 -- 当前文档的图片放在当前目录的 images 目录下 -- 矢量图片可以通过 [drawio 离线版](https://github.com/jgraph/drawio-desktop/releases) 绘制(并同时提交源文件),以 200% 分辨率导出 png 格式图片 - -## 2. 使用文档内容的基本模式 - -每个使用文档可以看作是一个相对完整的分享或博客文章(参考手册不再此类)。使用文档遵循以下模式组织内容: - -1. 概览:本文希望解决什么问题,达到什么效果,可以先放最终效果截图 -1. 依赖的环境:需要安装什么工具,并给出相关链接 -1. 引入本文构建资源的关系图或架构图 - - 需要用到的 Konfig 模型,给出模型参考页面链接,以及对应的上游原始模型的文档链接 -1. 具体的操作步骤 - - 尽量确保最小化代码,甚至可以刻意隐藏一些干扰代码,同时给出完整代码对应的链接 - - 列出每个步骤命令的概要输出信息,并配以文字描述 -1. 给出测试方式 - - 尽量采用社区通用的方式(比如kube、curl命令、或浏览器)测试 - - 给出测试结果的截图(和开头呼应) -1. 总结和展望 - - 简单回顾当前操作的流程,以及一些可以展开的地方(可以给出一些链接) - -## 3. 测试和提交 PR - -先克隆文档仓库,本地通过 `npm run start` 和 `npm run build` 命令测试查看效果,可以参考 [构建文档](/docs/develop/build-docs)。确保可以正常浏览后提交 PR 即可。 diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/governance/contribute/contribute.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/governance/contribute/contribute.md deleted file mode 100644 index f2fa7d4d..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/governance/contribute/contribute.md +++ /dev/null @@ -1,3 +0,0 @@ -# 贡献指南 - -如果是第一次参与开源社区,可以先查看 Issue 和 PR(PullRequest)中相似的修改。然后针对自己的问题和社区或开发团队同学充分讨论,也可以同创建 Issue 的方式反馈遇到的问题,然后就可以针对相关的 Issue 提供代码补丁。刚开始可以从文档完善和局部代码改进开始,比如:文档、Examples、多语言 Binding等。同时这对希望深入参与的同学能够针对语言功能、语言测试、编程框架、多种后端等核心特性做贡献。 diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/governance/contribute/docs/_category_.json b/i18n/zh-CN/docusaurus-plugin-content-docs/current/governance/contribute/docs/_category_.json deleted file mode 100644 index 00663568..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/governance/contribute/docs/_category_.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "label": "文档规范", - "position": 4 -} diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/governance/contribute/git-guideline.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/governance/contribute/git-guideline.md deleted file mode 100644 index 496abcdb..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/governance/contribute/git-guideline.md +++ /dev/null @@ -1,119 +0,0 @@ -# Git 提交指南 - -本文介绍了 Git 提交变更时需要注意的事项,如果拒绝接受本文的内容会导致提交的变更无法被接受。 - -## 1. 关于 issue - -在提交一个 issue 之前,请先查阅已经关闭的 issue ,也许在关闭的 issue 中已经存在合适的解决方案。 - -如果没有找到合适的方案,我们提供了4种模版在创建 issue 的时候使用。 -- Bug Report : 发现了一个 Bug,可以通过 Bug Report 模版创建 issue 与我们联系。 -- Enhancement : 开发者对工具进行了增强,可以通过 Enhancement 模版创建 issue 来介绍增加的内容。 -- Feature Request : 在使用的过程中想要为工具增加某些新的特性或者功能,可以通过 Feature Request 模版创建 issue 来描述新特性。 -- Ask a Question : 如果有任何的疑问,可以通过 Ask a Question 模版来创建一个 issue 与我们联系。 - -在选择合适的模版后,只需要填写模版上的要求填写的内容即可。如果在创建 issue 的时候发现没有模版,或者模版内容为空,可以通过微信群,钉钉群或者邮件向我们反馈这个问题。 - -## 2. 关于 Git 分支 - -要向 KusionStack 贡献代码,您必须拥有一个 GitHub 帐户,以便您可以将代码推送到您自己的 KusionStack 分支并创建拉取请求。我们推荐参考 [Angular 规范](https://github.com/angular/angular.js/blob/master/DEVELOPERS.md#-git-commit-guidelines) 为您自己的分支命名。 -推荐的格式如下: - -``` -{type}-{a_short_description} -``` -分支名称主要包括两个字段,并通过 “-” 分割。其中: - - {type} : 当前分支内容的类型。 - - {a_short_description}: 一个简短的描述,介绍这个分支的主要内容。 - -e.g. 张三首先 Fork 仓库到自己账户下,然后创建对应名称 `zhangsan:fix-output-fmt-bug` 的分支(冒号之前是张三的账号),用于修复输出格式化 bug。 - -## 3. 关于 Git Commit -我们参考 [Commitizen](https://github.com/commitizen/cz-cli) 书写 Commit Message。 -``` -注: 如果直接使用 Commitizen 生成 Commit Message,需要注意因为 Commitizen -是开发人员管理 commit 的工具,与项目本身无关联,因此由 Commitizen 生成的中间产物 -(如: node_modules 文件目录)可能没有在项目 .gitignore 文件中。 - -您可以 git add {filename} 选择要提交的文件而忽视中间产物。 -或者您可以向 .gitignore 文件中添加如下内容而自动忽视中间产物: -# commitizen -package.json -package-lock.json -node_modules/* -``` -如果手动编写 Commit Message,我们也建议采用 [Commitizen](https://github.com/commitizen/cz-cli) 的 commit message 格式。 - -``` -{type} ( {component_or_file} ) {a_short_description} - {a_longer_description} - BREAKING CHANGE: {breaking_change_description}. - {linked issue} -``` - -其中主要包括6个字段: - - {type} : 当前 commit 对应的分支的类型。 - - {component_or_file}: 当前 commit 改动的模块或者文件的名称。 - - {a_short_description}: 简短的描述介绍 commit 中的内容。 - - {a_longer_description}: 详细的描述用来介绍 commit 中的内容。 - - {breaking_change_description}: 如果 commit 中包含破环兼容性的改动,需要对兼容性改动产生的影响进行介绍。 - - {linked issue}: 与当前这个 commit 关联的 issue。 - - 其中 {breaking_change_description} 和 {linked issue} 如果 commit 中不包含破坏兼容性的改动和关联的 issue,可以省略。 - - e.g. 张三在分支 `zhangsan:fix-output-fmt-bug` 中创建的 commit。 - ``` - - fix(kclvm-printer): fix an output format bug in kclvm-printer - - There is an output format bug in kclvm-printer because ..., - So, The calling of method "XXX" is replaced by "HHHH"..., - ... - - -- 如果没有破坏兼容性的改动和关联的 issue 可以省略下面内容。 - - BREAKING CHANGE: This change maybe cause ....... - - fix #123 - - ``` - -## 4. 关于 pull request - -在提交一个 PR 之前,可能需要优先考虑以下几个问题: -- 请先查阅已经关闭的 PR ,也许在已经关闭的 PR 中,可能存在已经完成的解决方案。 -- 我们建议在提交变更之前,提交一个对应的 issue 描述变更中将要解决的问题,并将变更对应的 PR 与 issue 关联。 -- 在向我们提交 PR 之后,请签署 [Contributor License Agreement(CLA)](#cla) ,如果拒绝签署,我们将无法接受 PR。 -- 请确保每次改动都创建了一个新的分支,并根据上文中提到的规范为分支命名。 -- 一次 PR 请不要超过两个 commit ,请将多余的 commit 通过 squash 压缩,并根据上文中提到的规范,编写 commit message 。 -- 我们提供了 [PR 模版](https://github.com/KusionStack/.github/blob/main/.github/PULL_REQUEST_TEMPLATE.md),只需要添加模版中要求的内容即可,如果在创建PR时发现没有模版或者模版内容为空,可以通过微信群,钉钉群或者邮件向我们反馈这个问题。 - -我们建议PR的标题与分支名、commit message 风格保持一致: -``` -{type} ( {component_name_or_file_name} ) :{a_short_description} -``` - -e.g. 张三为分支`fix/zhangsan/fix_output_fmt_bug`创建的PR名称。 -``` -fix(kclvm-printer): fix an output format bug in kclvm-printer. -``` - -## 5. 目前 type 支持的类型 -参考[ Angular 规范](https://github.com/angular/angular.js/blob/master/DEVELOPERS.md#-git-commit-guidelines),type 支持类型的类型如下: -``` -- feat: -- 添加了新的功能特性。 -- fix: -- 进行了 Bug 的修复。 -- docs: -- 进行了文档部分的修改。 -- style: -- 对代码格式的修改,并不影响代码的功能,如:删除多余空格,代码缩进等。 -- refactor: -- 在不改变代码功能的基础上对代码进行了的重构。 -- perf: -- 对代码进行了性能优化。 -- test: -- 添加或者调整已有的测试用例。 -- build: -- 对构建系统或者外部依赖库进行了调整。 -- ci: -- 调整了 CI 的配置文件或者脚本。 -- chore: -- 对源代码和测试文件之外其他部分的调整。 -- revert: -- 对 commit 进行回滚。 -``` - -## 6. Contributor License Agreement(CLA) - -在第一次向我们提交 PR 之后,在 PR 中的 CLA 检查将会失败并提示签署 CLA。您可以通过自己的账户之间在 PR 回复 "I have read the CLA Document and I hereby sign the CLA" 表示同意签署 CLA,然后手动重启失败的 CLA 检查 Action 即可。当 PR 被成功合并之后将会被锁定不能再修改。 diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/governance/contribute/tasks.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/governance/contribute/tasks.md deleted file mode 100644 index 3f9ea4cb..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/governance/contribute/tasks.md +++ /dev/null @@ -1,38 +0,0 @@ -# 新手任务计划 - -为了帮助新同学更好的参与 Kusion 开源项目,我们将提供一些新手任务参考思路,帮助大家成为 Kusion Contributor!并为有价值的贡献者颁发 Kusion 贡献者证书和小纪念品。 - -## 1. 完善文档 - -文档虽然重要,但是不能和代码脱节。Kusion 推崇的是 XaC 的万物皆码的哲学:我们不仅仅通过 Git 来管理网站的 Markdown 文档,还通过直接提取 KCL 代码的方式产出文档。目前有以下文档依然需要大量完善,大家可以从以下任务开始: - -- 寻找文档中错别字、不准确文档、过时文档,提交 Issue 和修改补丁 -- 给 [Konfig](https://github.com/KusionStack/konfig) 补全模型和属性文档,增加使用的例子 -- 将已有的中文文档翻译为英文文档 -- 补充更多的 Example - -## 2. 增加多语言绑定 - -为了方便更多的语言集成 Kusion 工具,我们需要给给多的通用编程语言提供 SDK 绑定。 - -- 为 Node.js 提供 SDK -- 为 Java 提供 SDK - - -## 3. 增加新的 Konfig 模型 - -- 针对 Konfig 缺少的场景,补充对应的模型和文档 - -## 4. 完善测试 - -测试是代码改进和演化的基本保障,可以尝试为以下模块提供更多的测试: - -- 给 Konfig 补充测试代码 -- 给 Plugin 补充测试代码 -- 给 KCL OpenAPI 工具补充测试代码 -- 给 KCLVM 补充内部实现模块测试代码 -- 给 KusionCtl 内部模块补充测试代码 - -## 5. 挑战性任务 - -- KCL 语言:语言功能、语言测试、编程框架、多种后端 diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/governance/intro/_category_.json b/i18n/zh-CN/docusaurus-plugin-content-docs/current/governance/intro/_category_.json deleted file mode 100644 index e34ac94c..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/governance/intro/_category_.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "label": "开源社区", - "position": 1 -} diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/governance/intro/intro.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/governance/intro/intro.md deleted file mode 100644 index 80dc3689..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/governance/intro/intro.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -sidebar_position: 1 ---- -# 开源社区 - -欢迎访问 [Kusion](/docs/user_docs/intro/kusion-intro) 可编程协议栈开源社区,大家的共同参与是所有开源项目健康成长的动力!参与开源有多种方式,社区同学可以通过 Issue 提交发现的 BUG、可以通过提交 PR 完善文档或修改代码、或者通过提交 KEP 开启新特性讨论,或者跟周边朋友分享布道使用 Kusion 过程中的故事。 - diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/governance/intro/license.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/governance/intro/license.md deleted file mode 100644 index 82b0e03f..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/governance/intro/license.md +++ /dev/null @@ -1,210 +0,0 @@ ---- -sidebar_position: 99 ---- -# 开源协议 - -Kusion 采用 [Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0) 协议开源。 - -``` - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2022 The Kusion Authors. All rights reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -``` diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/governance/intro/roadmap.mdx b/i18n/zh-CN/docusaurus-plugin-content-docs/current/governance/intro/roadmap.mdx deleted file mode 100644 index d85f543b..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/governance/intro/roadmap.mdx +++ /dev/null @@ -1,415 +0,0 @@ ---- -sidebar_position: 3 ---- -# 路线规划 - -- 2022.5 - 开源 - -## KCLVM 路线规划 - -KCL 语言目前正处于快速发展的阶段,因此未来一段时间 KCL 将围绕稳定性、易用性和生态扩展的目标继续迭代演进,同时将 KCL 语言用于更多的领域场景不断地进行迭代和发展。 - -### KCLVM 2022 规划 - -KCLVM 2022 路线规划将围绕文档改进、语言改进、编译器改进、工具改进、生态扩展和目标领域增强等内容展开。 - -#### 文档改进 - -KCL 2022 年的部分目标是持续地改进文档(包括所有文档的 KCL 用例和多语言文档构建等),主要内容包含 KCL 规范文档、KCL Tour 文档、KCL 代码实验室、KCL 语言课程、KCL 最佳实践文档和 KCL 编程书籍等。 - -#### 语言改进 - -KCL 语言在 2022 年将完成如下工作,使语言特性到达生产可用稳定状态: - -- 语言功能特性简化:根据更多的场景提炼精简语言特性,如删除非必须的语言特性,增强常用的语言特性。 -- 语言稳定性提升:包含编译时和运行时稳定性两部分:编译时稳定性:支持完备的类型系统,支持反向类型推导等核心能力,进一步提升语言类型完备性与编译时稳定性;运行时稳定性:语言内存管理(垃圾回收,运行时内存泄漏检查),语言调试能力支持。 -- 完备的包管理体系:基于 Github 支持 KCL 包 / 模块的上传、下载、安装与链接等。 -- 约束与策略能力支持:约束能力:语言支持编译时约束检查,提升静态校验能力;策略能力:语言实现 KCL 策略规则校验,重点支持语言策略前端,实现语言规则编写语法,规则逻辑方程求解,规则冲突检查等功能。 -- 资源顺序能力支持:通过 KCL 子语言支持资源顺序顺序依赖声明,包括显式依赖和隐式依赖。 - -#### 编译器改进 - -除了语言功能特性的简化与迭代,KCL 编译器将会在 2022 年逐步完成如下工作: - -- 使用 Rust 逐步重写 Python 代码(主要是 KCL 语言工具和 IDE 插件类代码)。 -- 编译器并行解析、增量编译等特性支持,进一步提升编译器主体的性能。 -- 编译器前端架构演进,使编译器自身具备更好的扩展性,基于抽象语法树 AST 扩展中间 IR 代码以尽可能地进行代码优化,未来逐步支持除 LLVM 更多的语言编译器后端如 BPF 等。 -- 编译器自身能够编译到 WASM,并提供相应的 Javascript 库,支持 KCL 嵌入到浏览器中编译并执行。 -- 基于 KCL IR 的 JIT 编译执行预研与探索 - -#### 生态扩展 - -除了 KCL 语言自身演进,2022 年 KCL 还期望在生态扩展方面得到进一步的增强: - -- KCL 定义 API 规范:作为配置语言,通过 KCL 定义自身的 API/CLI/Plugin 等接口定义,通过半自举的方式加速自身的发展和完善。 -- 多种领域语言的生态扩展:支持更多的领域语言和格式集成,比如 HCL、Jsonnet 和 Protobuf 等语言支持。 -- 完备的多语言 API 构建:支持 KCL 编译,语法检查,自动化查询修改等,序列化 / 反序列化,KCL 代码编写等核心 API 函数,结合社区力量支持 Rust/Python/Java/Go 等多种语言。 - -#### 目标领域增强 - -KCL 目前聚焦于云原生配置策略场景,后续会聚焦在在多种运行时(中间件、容器、服务、网络、浏览器、端等)场景和技术形态(Lang、DSL、WASM 等)的深度集成,进一步提升 KCL 的运行时能力。 - -#### 其他目标 - -- 探索通用领域语言编译器研发框架,支持新领域编程语言的快速研发、构建和上线。 - -### 里程碑 - -KCL 2022 路线规划以季度周期建立里程碑: - -- 2022.5 - - KCLVM 开源 - - KCLVM API - - 基于 C Native API 的多语言和 Restful API - - KCL 语言工具 Rust 代码重写 - - 增删改查工具 - - 格式化工具 - - 测试工具 - - 校验工具 - - IDE - - Rust 实现的 IDE 功能:悬停、跳转、补全 - - Go LSP Server 对接 IDE 功能 - - KCL 完成语言特性简化:语言自身无二义性、逻辑自洽 -- 2022.6 - - KCL 完备的包管理体系 - - KCL 完备的类型系统:支持反向类型推导 - - KCL 完备的多语言 API:支持 Python/Rust/Go/Java 四种语言 API - - KCLVM 支持编译器自身编译到 WASM,可以在浏览器中直接运行 KCL 代码 - - KCLVM 支持增量编译、并发编译 -- 2022.9 - - KCL 约束与策略能力支持:支持语言编译时约束检查,策略编写支持 - - KCL 资源顺序能力支持:支持 KCL 子语言描述资源依赖 - - KCL 多种领域语言的生态扩展:支持 HCL/Jsonnet/Protobuf 生态集成 - - KCL JIT 方式编译执行 -- 2022.12 - - 产出通用领域语言编译器研发框架 - - 多种后端支持:包括多种本地平台支持 (x86, ARM 和 risc5 等),多种 VM 格式支持(WASM 和 BPF 等) - -### 期望社区工作 - -随着 KCL 的开源,我们期望借助社区的力量推动 KCL 自身更好的发展,更好地完成如下工作: - -- 文档改进:对 KCL 文档进行错误修改、内容提升、组织相应的开源课程或书籍资源等; -- 语言改进:在社区使用 KEP 机制推动 KCL 语言本身及语言工具的语法、语义等内容改进; -- 生态扩展:对接更多的多语言 API,如 Java、C++ 等; -- 目标领域增强:使用 KCL 探索更多的目标领域、集成更多的技术生态; - -## KCL CLI 路线规划 - -KCL 语言工具正在快速发展的阶段,未来一段时间将面向功能完备性、易用性和生态扩展的目标继续迭代演进,并围绕 KCL 语言自动化的能力注入更多领域场景。 - -### KCL CLI 2022 规划 - -KCL CLI 2022 路线规划包括功能完备性及易用性提升、性能提升、生态扩展、文档改进和目标领域增强等内容展开。 - -#### 完备性及易用性提升 - -KCL CLI 工具包含 OpenAPI 工具、文档生成、测试、格式化、校验、代码审查、查询修改、源码分析和可视化等,2022 年针对以上功能进一步提升功能完备性,并针对更多领域场景提供自动化工具支持。为此,KCL 语言工具在 2022 年将完成如下工作: - -- 命令行界面易用性提升 - - 更统一和完备的命令行 & API 界面 - - 更友好的报错信息与修正建议 -- OpenAPI 工具:支持云原生 / 非云原生场景下,多语言模型向 KCL 的转换 -- KCL 文档生成工具:完成文档自动生成 & 同步脚本,集成到 Konfig CICD 最佳实践;其他功能提升例如:支持从源代码生成 DocString 脚手架的 DocString 快速编写;生成信息更全面和精确的模型文档;支持对 DocString 内容的检查和自动纠错等 -- KCL 格式化工具:集成到 Konfig CICD 最佳实践;功能提升例如:支持代码块竖向对齐等 -- KCL 代码审查工具:集成到 Konfig CICD,提供代码质量保障;功能提升例如:支持除 import 语句外更多的审查项,并支持自动修复; -- KCL 测试工具:集成到 Konfig CICD,提供代码质量保障;功能提升例如:支持基于变更行的精准单元测试、集成测试,支持测试覆盖率统计; -- 源码分析(属性列表分析、依赖分析等)和图形化展示工具 - -#### 文档改进 - -KCL CLI 2022 年期望持续地改进文档,包括: - -- KCL 工具相关规范:包括 KCL DocString 规范,KCL Lint 规范,KCL format 规范等 -- KCL 工具用户指南及示例:主要是面向实践的一站式工具使用手册,通过串联组合多个 KCL 语言工具,演示项目迁移、发布运维 CICD 等场景案例 - -#### 生态扩展:对接社区标准 - -- Lint 结果支持输出到 SARIF 规范; -- 文档生成工具支持输出到 xml,html -- 测试覆盖率支持输出为 xUnit 及 cobertura 规范 - -### 里程碑 - -KCL CLI 2022 规划里程碑如下: - -- 2022.4 - - KCL CLI 开源 - - 文档生成、代码审查、格式化工具达到稳定可用状态,应用于 Konfig CICD - - 测试工具达到可精准测试,应用于 Konfig CICD -- 2022.6 - - 提供稳定完善的自动化迁移工具(云原生 / 非云原生存量模型转换;KCL 项目语言版本升级的自动化迁移),完整串联用户迁移和使用的全流程,并产出典型场景的全流程操作文档。 - - 测试工具支持测试覆盖率统计 - - 提供完备的代码审查工具,产出完善的代码审查规范文档 - - 提供完备的代码分析(属性列表分析、依赖分析等)和图形化展示工具 - - 提供基本可用的代码调试工具 -- 2022.9 - - 完成更多生态标准对接(SARIF,xUnit,cobertura 等) - - 源码分析工具支持:资源顺序可视化工具 - -### 期望社区工作 - -我们期望借助社区的力量推动 KCL CLI 更好地发展,推进和完成如下工作: - -- 文档改进:对 KCL CLI 文档进行错误修改、内容提升,分享 KCL CLI 使用的实践和案例等 -- 功能提升:对以上所列 KCL CLI 工具功能的提升和补充 -- 迁移工具:x2kcl 转换工具 -- 生态扩展:各项工具输出对接社区标准(SARIF,xUnit,cobertura 等) - - -## Kusion CLI 路线规划 - -Kusion CLI 即 Kusion 工具链的简称,是基于 KCL 的 DevOps 工具集合,主要包括 kusionctl、kusionup、kube2kcl、crd2kcl 等,工具链清单见 [《Overview of Kusion CLI》](/docs/reference/cli/kusionctl/overview)。 - -### Kusion CLI 2022 规划 - -Kusion CLI 2022 路线规划将围绕核心能力建设、用户使用体验优化、代码质量、文档改进等目标展开: - -* 核心能力建设:提供更丰富的工具集,增强工具链内核,比如提供 Builtin K8s Provider、Custom Hook 等核心能力,增强围绕 IaC & GitOps 的 CLI 端的能力边界 -* 用户使用体验优化:丰富工具链的 Console UI,提升用户使用体验,提升工具链 Console 界面在各个平台的兼容性,推动界面统一 -* 代码质量:从代码质量、仓库治理、建设质量体系等角度全面提升工具链本身的质量,覆盖率达到 80%+ -* 文档改进:丰富使用文档内容,改进文档生成工具,添加贡献文档等 - -#### 核心能力增强 - -Kusion CLI 在 2022 年将完成以下工作,使工具链从能力边界上满足用户需求: - -- 丰富工具集:提供更丰富的工具集合,从用户接入、安装使用、包管理等角度满足各个场景对工具的需求,具体规划有: - - kusionctl:基于 KCL 的 DevOps 主工具,实现 KCL 配置生命周期的管理 - - kusionup:kusionup 是一个优雅的 kusion 多版本管理工具 - - kube2kcl:从 yaml/kustomize 转换成 KCL 配置的工具 - - crd2kcl:从 crd yaml 转换为 KCL 模型定义的工具 - - tf2kcl:从 tf 转换为 KCL 模型定义的工具 - - openapi2kcl:从 openapi yaml 转换为 KCL 模型定义的工具 - - imageupdater:自动化更新大库指定应用镜像的工具 -- 功能增强 - - 脚手架能力升级,支持从远端仓库拉取脚手架模板 - - 强化云原生场景能力,支持 Watch、Custom Hook、自定义发布顺序等功能 - - Project & Stack 管理 - - KCL 配置包管理,包括打包、分发、部署等功能 - - 提供易于扩展的插件机制 -- 第三方系统集成 - - ArgoCD - - Github Action - - KubeVela - -#### 代码质量 - -Kusion CLI 需要可靠的稳定性和被集成能力,这对 Kusion CLI 的代码质量提出较高的要求,2022 年将从以下几个角度提升 Kusion CLI 整体的代码质量: - -- 代码覆盖率:补充单元测试,提升工具链仓库的代码覆盖率,稳定在 80%+ -- 集成测试:建设 Kusion 工具链的集成测试体系,添加集成测试样例,保证工具链功能的稳定性 -- 仓库治理:丰富 Kusion 工具链代码仓库的持续集成流水线,通过自动化手段保证工具链的质量 -- 代码复用性:优化代码架构,抽象接口,提升复用性和被集成能力 - -#### 用户使用体验优化 - -Kusion CLI 作为用户操作 KCL 配置的 Client 端之一,是用户使用 Kusion 技术栈的入口,且会被其它第三方系统集成,这就需要易于理解和美观的 Console UI 以及足够丰富的指引文档,2022 年将从以下角度提升 Kusion CLI 的用户使用体验: - -- Console UI:优化 Console UI 美观度和接受度,在体现足够信息的同时保证美观,同时提示 Console UI 在各个平台显示的兼容性,以便被第三方系统集成 - -#### 文档改进 - -除了 Kusion CLI 能力和体验的增强,配套的文档和工具也会在 2022 年逐步丰富起来: - -- 用户使用文档:提供工具链中各个工具的使用文档,便于用户查阅 -- 快速开始文档:提供用户友好的快速开始文档 -- 研发文档:提供研发视角的文档指引,主要面向工具链被集成的场景 -- 贡献文档:提供贡献指引文档,说明社区用户如何参与工具链的贡献 -- 文档生成工具:开发文档生成工具,保证大部分使用文档能够自动生成 - -### 里程碑 - -Kusion CLI 2022 路线规划以季度周期建立里程碑: - -![打开路线规划图 (SVG)](/img/docs/governance/intro/Kusion-CLI-Roadmap.svg) - -[打开路线规划图 (SVG)](/img/docs/governance/intro/Kusion-CLI-Roadmap.svg) - -### 期望社区工作 - -随着 Kusion CLI 的开源,我们期望借助社区的力量推动 Kusion 更好的发展,更好地完成如下工作: - -- 文档改进:对 Kusion CLI 文档进行错误修改、内容提升、组织相应的开源课程或书籍资源等; -- 能力增强:在社区使用 KEP 机制推动 Kusion CLI 工具链能力的增强; -- 生态扩展:对接更多的第三方平台,比如 ArgoCD、KubeVela 等; -- 代码质量:帮助提升工具链的稳定性和复用性; - -## Kusion IDE 规划 - -Kusion IDE 工具包含 KCL 语言编写辅助、Kusion 一站式运维、静态代码提示三个主要模块,未来将围绕这些模块进一步完善功能及易用性,并持续建设在领域场景下的产品能力。 - -### Kusion IDE 2022 规划 - -#### KCL 语言编写辅助 - -KCL 语言编写辅助模块关注开发者在编写、编译、测试 KCL 代码过程中的体验问题。2022 年重点建设 LSP (Language Server Protocol) 核心能力,提升功能易用性、准确率及性能,并借助生态力量扩展到更多 IDE 平台。 - -- LSP 核心能力建设 - - 重点提升错误提示、智能补全、代码重构等功能的易用性,结合 KCL Cli 能力提供源码分析及可视化展示功能,其余能力包括:编译及测试、格式化、代码审查、文档生成等。 - - 性能提升::优化 LSP 响应耗时:核心功能 3s 内成功率 95%+ - - 脱离容器运行环境:基于 KCLVM WASM,提供可脱离容器运行的在线 IDE 支持 - - 量化工具:建设 LSP 功能的量化统计工具,重点指标包括用户数、用量、各操作成功率和耗时等。 -- IDE 平台支持:围绕 LSP 能力将 KCL IDE 支持扩展到覆盖 Jetbrains 各类产品、VS Code、github.dev 在线 IDE、Vim、Sublime 等主流平台 - -#### Kusion 一站式运维 IDE - -Kusion 一站式运维 IDE 模块关注开发者使用 Kusion 技术栈进行应用配置管理的全生命周期,帮助用户在 Kusion IDE 中一站式完成应用的配置修改、测试、部署、监控的全生命周期的管理。 - -- 项目初始化脚手架:帮助开发者快速创建 Kusion project -- 本地研发支持:支持资源在 minikube、[kind](https://sigs.k8s.io/kind) 等本地 Kubernetes 集群预览、计费估算、下发、观测 -- 对接多云厂商:支持开发者通过在本地登录生产环境,实现配置的预览、编排、下发、观测一站式运维体验 -- 项目视图、编排预览等:基于项目目录和代码分析,支持项目列表和详情视图,生成编排拓扑图预览等 -- 资源状态监控:支持对下发配置及其相关资源的状态跟踪和显示 - -#### 静态代码提示 - -KCL 静态代码提示关注在 Web 浏览器中针对 KCL 代码的阅读、编写提供辅助。2022 年将借助社区力量,扩大对在线代码编辑产品的支持。 - -- Github 产品支持:支持在 Github 网页的代码高亮、跳转、搜索 -- 生态对接:支持流行的高亮库和 Web 编辑器产品,例如 highlight.js, tree-sitter, stack-graphs - -### 里程碑 - -Kusion IDE 2022 规划里程碑如下: - -- 2022.4 - - Kusion IDE 开源,开放 IntelliJ IDEA 及 VS Code 平台的 KCL IDE 插件下载 - - 错误提示、格式化功能达到稳定可用状态,应用于 IntelliJ IDEA、VS Code - - 初步完成量化工具建设,支持 LSP 核心功能成功率和耗时统计 -- 2022.6 - - 支持主流 IDE(IntelliJ IDEA,VS Code,Vim)中的代码编译和测试入口,实现编写 - - 支持 LSP 脱离容器运行环境,应用于 github.dev 等在线 IDE 产品 - - 基于 LSP 支持 KCL 在 Vim 中的编写辅助,Jetbrains 系列产品支持:Goland、PyCharm -- 2022.9 - - 建设 Kusion IDE 一站式运维工具,支持项目初始化脚手架等基础功能,实现本地研发支持的预览和下发 - - 扩展静态代码提示工具生态,覆盖 highlight.js, Monaco, code Mirror, tree-sitter, stack-graphs - -### 期望社区工作 - -我们期望借助社区的力量推动 Kusion IDE 更好地发展,推进和完成如下工作: - -- 文档改进:对 Kusion IDE 文档进行错误修改、内容提升 -- 需求和错误上报:通过上报需求和错误,帮助提升 Kusion IDE 功能的易用性和准确度。我们也欢迎针对领域场景的自定义 IDE 需求。 -- 功能提升:对以上所列 Kusion IDE 工具功能的提升和补充 -- 生态扩展: - - 将 KCL LSP 扩展到更多 IDE 平台,包括且不限于不同 IDE 下的安装及配置指南文档、完善 LSP 在各 IDE 平台的客户端功能、在各 IDE 平台的 icon style 插件中支持 KCL icon 等。 - - 将 KCL 静态代码提示扩展到更多产品(例如 highlight.js, Monaco, code Mirror, tree-sitter, stack-graphs, [difftastic](https://github.com/Wilfred/difftastic) ...) - - Kusion IDE 多云厂商的对接 - -## Kusion Model 路线规划 - -Kusion Model 即 Kusion 模型库的简称,是基于 KCL 的模型库集合,主要包括底层模型库、核心模型库,模型库的基本概念可见 [《Overview of Kusion Model》](/docs/reference/model/overview)。 - -### Kusion Model 2022 规划 - -Kusion Model 2022 路线规划将围绕核心模型建设、用户使用体验优化、模型质量、文档改进等目标展开: - -* 核心模型建设:提供更丰富的核心模型,适配应用运维、监控、基础资源等场景 -* 用户使用体验优化:提升模型的易用性,以更简洁的配置清单描述更多场景 -* 模型质量:从模型注释、单元测试、集成测试等角度保证模型质量 -* 文档改进:丰富模型使用文档和 Use Case 文档,改进文档生成工具,添加贡献文档等 - -#### 核心能力增强 - -Kusion Model 在 2022 年将完成以下工作,使模型库从能力边界上满足用户需求: - -- 核心模型建设:提供更丰富的核心模型,适配应用运维、监控、基础资源等场景,具体规划有: - - Server:云原生应用运维模型,围绕着应用运维定义的配置模型 - - Job:一次性任务模型 - - RBAC:围绕着应用权限描述的模型 - -#### 代码质量 - -Kusion Model 是用户界面之一,需要从以下几个方面保证它们的质量: - -- 单元测试:编写 Kusion Model 单元测试,保证渲染结果符合预期 -- 模型注释:编写 Kusion Model 注释,包括简介、属性描述、样例,方便用户使用和工具自动生成文档 -- 集成测试:搭建 Kusion Model 的集成测试框架,端到端的保证模型质量 - -#### 文档改进 - -除了 Kusion Model 核心模型建设和代码质量,配套的文档和工具也会在 2022 年逐步丰富起来: - -- 模型说明文档:提供大而全的模型说明文档 -- 快速开始文档:提供用户友好的快速开始文档 -- 研发文档:提供研发视角的文档指引,主要介绍如何开始开发 Kusion Model -- 贡献文档:提供贡献指引文档,说明社区用户如何参与 Kusion Model 的贡献 -- 文档生成工具:开发文档生成工具,保证大部分模型说明文档能够自动生成 - -### 里程碑 - -Kusion Model 2022 路线规划以季度周期建立里程碑: - -- 2022.5 - - Kusion Model 开源 - - Server 模型支持 - - 提供模型文档生成工具 - - 添加贡献文档 -- 2022.6 - - Job 模型支持 - - RBAC 模型支持 - - 补充模型说明文档 - - 补充模型快速开始文档 - - 补充模型注释 -- 2022.9 - - 补充 Kusion Model 单元测试,保证稳定性 - - 搭建模型集成测试框架 -- 2022.12 - - 产出 Kusion Model 模型开发最佳实践 - -### 期望社区工作 - -随着 Kusion Model 的开源,我们期望借助社区的力量推动 Kusion 更好的发展,更好地完成如下工作: - -- 模型丰富:建设核心模型,对接更多场景 -- 文档改进:对 Kusion Model 文档进行错误修改、内容提升、组织相应的开源课程或书籍资源等; -- 代码质量:帮助提升模型的易用性和稳定性; - -## Kusion Engine 路线规划 -Kusion Engine 负责对 KCL 编译结果的解析和资源下发,主要包含三个部分:资源依赖解析与执行,资源 State 管理,异构 Runtime 管理 - -### Kusion Engine 2022 规划 -Kusion Engine 2022 将围绕核心能力建设、用户使用体验优化和配套文档这几个方面展开 - -#### 核心能力增强 -- 持续建设 Kubernetes 资源运维能力,从 apply、patch、event 等多个方面构建云原生时代的 IaC 引擎,打造更加 Kubernetes Native 的运维体验 -- 构建统一的 State 并发控制能力,任何对接过的存储介质都可以具备并发访问控制的能力 -- 精细化 Resource Lifecycle 管理,可以通过 Hook 机制在 Resource 生命周期里自定义外部扩展 -- 兼容 Terraform 的 provider 生态,可以纳管 Terraform 的 provider 资源 - -#### 用户使用体验优化 -- 错误信息展示优化,让用户更容易定位到错误原因 -- LiveDiff 展示结果优化,敏感信息特殊处理 - -#### 配套文档 -- 持续补充完善用户手册,尽量做到新手可以半小时跑完 demo -- 补充 Kusion Engine 系统架构 -- 补充 Kusion Engine Workflow - -### 里程碑 -- 2022.5 - - Apply 支持三路 merge - - LiveDiff 支持三路 merge - -- 2022.7 - - 兼容 Terraform Provider - - 支持 K8s CRD - -- 2022.9 - - 支持 Watch 能力 - - 精细化 Resource Lifecycle 管理 - -- 2022.12 - - 支持执行过程中暂停、继续能力 - - 全类型资源 Watch 能力 - -- 2023.2 - - LiveDiff 敏感信息隐藏 - - 支持资源配置漂移检查 - -### 期望社区工作 -核心能力还在持续构建阶段,欢迎大家提供新的想法,把 Kusion 打造成云原生时代的 IaC 引擎。2022 年希望社区参与的工作如下: -- 编写引擎相关文档,方便用户理解和快速上手 -- Terraform Provider 生态对接,如 Provider 接口适配、生命周期管理等 -- 精细化 Resource Lifecycle 管理 diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/governance/intro/support.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/governance/intro/support.md deleted file mode 100644 index 22cc0cfe..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/governance/intro/support.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -sidebar_position: 1 ---- - -# 寻求帮助 - -Kusion 有诸多爱好者组成的开发者和用户社群。 - -此页中,我们列出了您可参与的 Kusion 相关社群;请参见本节的其他页面以获得额外的线上及线下学习材料。 - -在加入 Kusion 社群前,请先阅读 [参与者公约](https://www.contributor-covenant.org/zh-cn/version/2/0/code_of_conduct),期望所有社区成员都遵守该准则。 - -## 讨论区 - -- 在 Github 提交 issue -- 在 Github 讨论组进行交流 -- 通过 官网、Github、Twitter、微博 等账户获取最新状态 - -## 新功能建议 - -请尽量避免提交新功能的合并请求 (Pull Request),我们可能已有专人正在处理,或有可能此功能已经是我们未来规划的一部分。 总之,在提交新功能之前请先联系我们! - -## 最新资讯 - -请关注 Kusion 相关的项目和博客。 diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/governance/intro/team.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/governance/intro/team.md deleted file mode 100644 index f679964a..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/governance/intro/team.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -sidebar_position: 2 ---- - -# KusionStack 团队 - -## 1. 活跃团队 - -Kusion 团队致力于 KCL 语言、KusionCtl、Kusion 模型库、IDE 插件等核心功能以及 Kusion 文档网站的开发和完善。按字母顺序列出了 Kusion 团队的现有成员。 - -## 2. 荣誉开发者 - -Kusion 的发展离不开为其做出巨大贡献的人们,感谢你们。 - -## 3. 学生开发者 - -一部分学生于在校、实习期间帮助了 Kusion,并为其贡献了包括插件选项校验、迁移工具及 Bootstrap 主题在内的优秀功能。 - -## 4. 特别鸣谢 diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/governance/release-policy/_category_.json b/i18n/zh-CN/docusaurus-plugin-content-docs/current/governance/release-policy/_category_.json deleted file mode 100644 index e8ddc4e4..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/governance/release-policy/_category_.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "label": "发版策略", - "position": 3 -} diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/governance/release-policy/index.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/governance/release-policy/index.md deleted file mode 100644 index 1e631423..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/governance/release-policy/index.md +++ /dev/null @@ -1,3 +0,0 @@ -# 发版策略 - -发版策略定义了版本的语义、发版的流程等。为了最大化并发开发流程,Kusion 和 KCLVM 等采用独立的发版策略。 diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/governance/release-policy/kcl_plugin.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/governance/release-policy/kcl_plugin.md deleted file mode 100644 index 74a10eef..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/governance/release-policy/kcl_plugin.md +++ /dev/null @@ -1,25 +0,0 @@ -# KCL 插件版本 - -KCL 插件的设计初衷只是为了扩展 KCL 语言的功能,其定位并不是完全复刻通用的编程语言全部生态。因此 KCL 插件刻意做了一些必要的限制:首先插件之间不能相互导入;其次在同一个模块中插件不能同名;最后Python实现的插件指南使用标准库和插件框架提供的功能。 - -## 1. 每个插件独立维护版本 - -基于以上的设计背景,同一个 kcl.mod 定义的模块中每个插件是相互独立的,插件之间和其依赖均不会出现依赖冲突的问题。因此,每个 KCL 插件可以独立发布独立维护。 - -## 2. kcl.mod 指定依赖的插件信息 - -kcl.mod 中 `[kcl_plugin]` 字段标注插件信息: - -```toml -# kcl.mod - -[kcl_plugin] -hello = { git = "https://github.com/KusionStack/kcl-plugin.git", path = "hello", branch = "master" } -project_context = { git = "https://github.com/KusionStack/kcl-plugin.git", path = "project_context", version = "0.1.0"} -utils = { path = "${PATH}/plugins/utils" } -``` - -## 3. 插件对 KCLVM 的版本依赖 - -插件本身可以指定依赖的 KCL 语言的版本,相关工具做检查。 - diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/governance/release-policy/kclvm.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/governance/release-policy/kclvm.md deleted file mode 100644 index ebda0c53..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/governance/release-policy/kclvm.md +++ /dev/null @@ -1,23 +0,0 @@ -# KCLVM 发版策略 - -KCLVM 是 KCL 语言的实现,开源之后开发团队希望采用 [语义化版本](https://semver.org/lang/zh-CN/) 来简化管理。版本格式:主版本号.次版本号.修订号。版本号递增规则如下:主版本号对应不兼容的 API 修改,次版本号对应向下兼容的功能性新增,修订号对应向下兼容的问题修正。其中主版本号和次版本号均包含了不一样的特性统一称之为大版本,补丁修复称之为小版本。 - -总体目标是每个季度发布一个特性增强的大版本,并支持最近发布的两个大版本,根据需要不定期发布其他版本的修订。 - -## 1. 发布流程 - -发布流程如下: - -- master 主干开发,每日产出一个 Nightly 版本,CI 系统进行测试 -- beta 测试分支,经过 6 周后从 Nightly 版本产出一个 Beta 版本 -- stable 稳定分支,经过 6 周后从 Beta 版本产出一个 Stable 版本 -- release-branch.kcl-x.y 发布分支,每个季度从 Stable 版本产出一个 rc 候选版本,并最终发布 -- release-branch.kcl-x.y 分支的 BUG 修复需要合并回 master,然后逐步同步到 beta、stable 分支 - -其中 stable、beta 只是延迟的 master 分支,release-branch.kcl-x.y 发布后将和 master 保存独立。 - -如果本次发布失败,则顺延到下个发布周期。 - -## 2. 发布维护 - -发布次要版本以解决一个或多个没有解决方法的关键问题(通常与稳定性或安全性有关)。版本中包含的唯一代码更改是针对特定关键问题的修复。重要的仅文档更改和安全测试更新也可能包括在内,但仅此而已。一旦 KCL 1.x+2 发布,解决 KCL 1.x 的非安全问题的次要版本就会停止更新。解决 KCL 1.x 安全问题的次要版本在 KCL 1.x+2 发布后停止。 diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/governance/release-policy/kusion.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/governance/release-policy/kusion.md deleted file mode 100644 index a73fddf0..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/governance/release-policy/kusion.md +++ /dev/null @@ -1,20 +0,0 @@ -# kusionctl 发版策略 - -kusionctl 是基于 KCL 的 DevOps 工具链,开源之后开发团队希望采用 [语义化版本](https://semver.org/lang/zh-CN/) 来简化管理。版本格式:主版本号.次版本号.修订号。版本号递增规则如下:主版本号对应不兼容的 API 修改,次版本号对应向下兼容的功能性新增,修订号对应向下兼容的问题修正。其中主版本号和次版本号均包含了不一样的特性统一称之为大版本,补丁修复称之为小版本。 - -总体目标是每个季度发布一个特性增强的大版本,并支持最近发布的两个大版本,根据需要不定期发布其他版本的修订。 - -## 1. 发布流程 - -发布流程如下: - -- master 主干开发,每日产出一个 Nightly 版本,CI 系统进行测试 -- beta 测试分支,经过 3 周后从 Nightly 版本产出一个 Beta 版本 -- stable 稳定分支,经过 3 周后从 Beta 版本产出一个 Stable 版本 -- rc 发布分支,每个季度从 Stable 版本产出一个 rc 候选版本,并最终发布 - -如果本次发布失败,则顺延到下个发布周期。 - -## 2. 发布维护 - -发布次要版本以解决一个或多个没有解决方法的关键问题(通常与稳定性或安全性有关)。版本中包含的唯一代码更改是针对特定关键问题的修复。重要的仅文档更改和安全测试更新也可能包括在内,但仅此而已。一旦 Kusion 1.x+2 发布,解决 Kusion 1.x 的非安全问题的次要版本就会停止更新。解决 Kusion 1.x 安全问题的次要版本在 Kusion 1.x+2 发布后停止。 diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/_category_.json b/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/_category_.json deleted file mode 100644 index 1910abe4..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/_category_.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "label": "参考手册", - "position": 5 -} diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/cli/_category_.json b/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/cli/_category_.json deleted file mode 100644 index 8fac9ad8..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/cli/_category_.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "label": "命令行工具", - "position": 1 -} diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/cli/kcl/_category_.json b/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/cli/kcl/_category_.json deleted file mode 100644 index b22440b7..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/cli/kcl/_category_.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "label": "KCL 语言工具", - "position": 2 -} diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/cli/kcl/docgen.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/cli/kcl/docgen.md deleted file mode 100644 index aae0fe25..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/cli/kcl/docgen.md +++ /dev/null @@ -1,312 +0,0 @@ ---- -sidebar_position: 5 ---- -# 文档生成工具 - -Kusion 命令行工具支持从 KCL 源码中一键提取模型文档,并支持丰富的输出格式:JSON,YAML 和 Markdown 等。本文介绍 KCL 语言的文档规范,举例说明如何使用 KCL 文档生成工具提取文档,并展示新增本地化语言文档的流程。 - -## 1. KCL 语言的文档规范 - -KCL文件的文档主要包含如下两个部分: - -* 当前 KCL Moudle 的文档:对当前 KCL 文件的说明 -* KCL 文件内包含的所有 Schema 的文档:对当前 Schema 的说明,其中包含 Schema 描述、Schema 各属性的描述、Examples 三部分,具体格式如下: - -1. Schema 描述 - - ```python - """这是Schema一个简短的描述信息 - """ - ``` - -2. Schema 各属性的描述:包含属性描述、属性类型、默认值、是否可选 - - ```python - """ - Attributes - ---------- - x : type, default is a, optional. - Description of parameter `x`. - y : type, default is b, required. - Description of parameter `y`. - """ - ``` - - 其中,使用 `----------` 表示 `Attributes` 为一个标题(`-` 符号长度与标题长度保持一致),属性名称与属性类型用冒号 `:` 分隔,属性的说明另起一行并增加缩进进行书写。属性的默认值说明跟在属性类型之后使用逗号 `,` 分隔,书写为 `default is {默认值}` 形式,此外需要说明属性是否为可选/必选,对于可选属性在默认值之后书写 `optional`,对于必选属性在默认值之后书写 `required`。 - - -3. Examples - - ```python - """ - Examples - -------- - val = Schema { - name = "Alice" - age = 18 - } - """ - ``` - - 此外,KCL 文档字符串语法应采用 [re-structured text (reST)](https://docutils.sourceforge.io/rst.html) 语法子集,并使用 [Sphinx](https://www.sphinx-doc.org/en/master/) 渲染呈现。 - -## 2. 从 KCL 源码生成文档 - -使用 kcl-doc generate 命令,从用户指定的文件或目录中提取文档,并输出到指定目录。 - -1. 参数说明 - ``` - usage: kcl-doc generate [-h] [--format YAML] [-o OUTPUT] [--r] - [--i18n-locale LOCALE] [--repo-url REPO_URL] - [files [files ...]] - - positional arguments: - files KCL file paths. If there's more than one files to - generate, separate them by space - - optional arguments: - -h, --help show this help message and exit - --format YAML Doc file format, support YAML, JSON and MARKDOWN. - Defaults to MARKDOWN - -o OUTPUT, --output-path OUTPUT - Specify the output directory. Defaults to ./kcl_doc - --r, -R, --recursive Search directory recursively - --i18n-locale LOCALE I18n locale, e.g.: zh, zh_cn, en, en_AS. Defaults to - en - --repo-url REPO_URL The source code repository url. It will displayed in - the generated doc to link to the source code. - ``` - -2. 从指定的一个或多个文件中提取文档,并输出到指定目录 - - ```text - kcl-doc generate your_config.k your_another_config.k -o your_docs_output_dir - ``` - -3. 从指定目录内,递归地查找 KCL 源码文件,并提取文档 - - ```text - kcl-doc generate your_config_dir -r -o your_docs_output_dir - ``` - -4. 在生成文档时,指定源码仓库地址。一经指定,生成的文档中将包含指向源码文件的链接 - - ```text - kcl-doc generate your_config.k -o your_docs_output_dir --repo-url https://url/to/source_code - ``` - -## 3. 新增本地化语言的文档 - -如前所示,默认情况下,文档生成工具提取的文档以源码 docstring 的内容为准,因而文档的语言随 docstring 编写语言而定。如果需要为源文件新增本地化语言的文档,则可以遵循按如下步骤: - -1. 初始化 i18n 配置文件。该步骤基于指定的 KCL 源码文件,生成相应的 i18n 配置文件,文件格式可选 JSON/YAML,默认为 YAML. 输出的配置文件名称将以指定的目标本地化方言结尾 - - ```text - kcl-doc init-i18n your_config.k --format JSON --i18n-locale your_target_locale - ``` - -2. 手动修改上述生成的 i18n 配置文件,使用目标语言修改配置中的 doc 字段 - -3. 基于修改后的 i18n 配置,生成本地化语言的文档。工具将查找指定目标语言的 i18n 配置文件,并转化为最终的文档 - - ```text - kcl-doc generate your_config_dir --i18n-locale your_target_locale --format Markdown - ``` - - 接下来,通过一个小例子演示新增本地化语言文档的过程。 - - 3.1 准备 KCL 源码文件,例如 server.k: - - ```python - schema Server: - """Server is the common user interface for long-running - services adopting the best practice of Kubernetes. - - Attributes - ---------- - workloadType : str, default is "Deployment", required - Use this attribute to specify which kind of long-running service you want. - Valid values: Deployment, CafeDeployment. - See also: kusion_models/core/v1/workload_metadata.k. - name : str, required - A Server-level attribute. - The name of the long-running service. - See also: kusion_models/core/v1/metadata.k. - labels : {str:str}, optional - A Server-level attribute. - The labels of the long-running service. - See also: kusion_models/core/v1/metadata.k. - - Examples - ---------------------- - myCustomApp = AppConfiguration { - name = "componentName" - } - """ - - workloadType: str = "Deployment" - name: str - labels?: {str: str} - ``` - - 3.2 从 server.k 得到初始化的 i18n 配置文件,例如希望为其增加中文文档,指定生成的配置文件格式为 YAML - - ```text - kcl init-i18n server.k --format YAML --i18n-locale zh_cn - ``` - - 该命令将在当前目录下创建 kcl_doc 目录,并生成 i18n 配置文件 kcl_doc/i18n_server_zh_cn.yaml,其内容如下: - - ```yaml - name: server - relative_path: ./server.k - schemas: - - name: Server - doc: | - Server is the common user interface for long-running - services adopting the best practice of Kubernetes. - attributes: - - name: workloadType - doc: | - Use this attribute to specify which kind of long-running service you want. - Valid values: Deployment, CafeDeployment. - See also: kusion_models/core/v1/workload_metadata.k. - type: - type_str: str - type_category: BUILTIN - builtin_type: STRING - default_value: '"Deployment"' - is_optional: false - - name: name - doc: | - A Server-level attribute. - The name of the long-running service. - See also: kusion_models/core/v1/metadata.k. - type: - type_str: str - type_category: BUILTIN - builtin_type: STRING - is_optional: false - default_value: '' - - name: labels - doc: | - A Server-level attribute. - The labels of the long-running service. - See also: kusion_models/core/v1/metadata.k. - type: - type_str: '{str: str}' - type_category: DICT - dict_type: - key_type: - type_str: str - type_category: BUILTIN - builtin_type: STRING - value_type: - type_str: str - type_category: BUILTIN - builtin_type: STRING - is_optional: true - default_value: '' - examples: | - myCustomApp = AppConfiguration { - name = "componentName" - } - doc: '' - source_code_url: '' - ``` - - 3.3 修改初始化得到的 i18n 配置,将其中的 doc 字段修改为中文的描述,修改后的配置如下: - - ```yaml - name: server - relative_path: ./server.k - schemas: - - name: Server - doc: | - Server 模型定义了采用 Kubernetes 最佳实践的持续运行的服务的通用配置接口 - attributes: - - name: workloadType - doc: | - workloadType 属性定义了服务的类型,是服务级别的属性。合法的取值有:Deployment, CafeDeployment. - 另请查看:kusion_models/core/v1/workload_metadata.k. - type: - type_str: str - type_category: BUILTIN - builtin_type: STRING - default_value: '"Deployment"' - is_optional: false - - name: name - doc: | - name 为服务的名称,是服务级别的属性。 - 另请查看:kusion_models/core/v1/metadata.k. - type: - type_str: str - type_category: BUILTIN - builtin_type: STRING - is_optional: false - default_value: '' - - name: labels - doc: | - labels 为服务的标签,是服务级别的属性。 - 另请查看:kusion_models/core/v1/metadata.k. - type: - type_str: '{str: str}' - type_category: DICT - dict_type: - key_type: - type_str: str - type_category: BUILTIN - builtin_type: STRING - value_type: - type_str: str - type_category: BUILTIN - builtin_type: STRING - is_optional: true - default_value: '' - examples: | - myCustomApp = AppConfiguration { - name = "componentName" - } - doc: '' - source_code_url: '' - ``` - - 3.4 基于修改后的 i18n 配置,生成本地化语言的文档,执行如下命令,将输出中文的文档 kcl_doc/doc_server_zh_cn.md,命令及生成的文档内容如下: - - ```text - kcl-doc generate server.k --i18n-locale zh_cn --format Markdown - ``` - - ~~~markdown - # server - ## Schema Server - Server 模型定义了采用 Kubernetes 最佳实践的持续运行的服务的通用配置接口 - - ### Attributes - |Name and Description|Type|Default Value|Required| - |--------------------|----|-------------|--------| - |**workloadType**
workloadType 属性定义了服务的类型,是服务级别的属性。合法的取值有:Deployment, CafeDeployment.
另请查看:kusion_models/core/v1/workload_metadata.k.|str|"Deployment"|**required**| - |**name**
name 为服务的名称,是服务级别的属性。
另请查看:kusion_models/core/v1/metadata.k.|str|Undefined|**required**| - |**labels**
labels 为服务的标签,是服务级别的属性。
另请查看:kusion_models/core/v1/metadata.k.|{str: str}|Undefined|optional| - ### Examples - ``` - myCustomApp = AppConfiguration { - name = "componentName" - } - ``` - - - - ~~~ - -## 4. 附录 - -### 1. 常见的 reST 概念 - -对于 reST 格式的文档,段落和缩进很重要,新段落用空白行标记,缩进即为表示输出中的缩进。可以使用如下方式表示字体样式: - -* \*斜体\* -* \*\*粗体\*\* -* \`\`等宽字体\`\` - -参考 [reST 文档](https://docutils.sourceforge.io/rst.html)获得更多帮助。 diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/cli/kcl/fmt.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/cli/kcl/fmt.md deleted file mode 100644 index ca9fa451..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/cli/kcl/fmt.md +++ /dev/null @@ -1,70 +0,0 @@ ---- -sidebar_position: 2 ---- - -# 格式化代码 - -KCL 支持通过内置的命令行工具一键格式化多个 KCL 文件文档。本文展示 KCL 编码风格和 KCL 格式化工具的使用方式。 - -## KCL 编码风格 - -KCL 格式化对文件的修改样式具体见 KCL 编码风格:[Style Guide for KCL Code](../../lang/lang/spec/codestyle.md) - -## 使用方式 - -* 单文件格式化 - -```text -kcl-fmt your_config.k -``` - -* 文件夹内多文件格式化 - -```text -kcl-fmt your_config_path -R -``` - -* 命令行参数 - * `-R|--recursive` 设置是否递归遍历子文件夹 - * `-w|--fmt-output` 设置是否输出到标准输出流,不加 `-w` 表示原地格式化 KCL 文件 - -## 格式化文件效果展示 - -* 格式化前 - -```py -import math -mixin DeploymentMixin: - service:str ="my-service" -schema DeploymentBase: - name: str - image : str -schema Deployment[replicas] ( DeploymentBase ) : - mixin[DeploymentMixin] - replicas : int = replicas - command: [str ] - labels: {str: str} -deploy = Deployment(replicas = 3){} -``` - -* 格式化后 - -```py -import math - -mixin DeploymentMixin: - service: str = "my-service" - -schema DeploymentBase: - name: str - image: str - -schema Deployment[replicas](DeploymentBase): - mixin [DeploymentMixin] - replicas: int = replicas - command: [str] - labels: {str:str} - -deploy = Deployment(replicas=3) {} - -``` diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/cli/kcl/index.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/cli/kcl/index.md deleted file mode 100644 index b0ccd146..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/cli/kcl/index.md +++ /dev/null @@ -1,3 +0,0 @@ -# KCL 语言工具 - -KCL 作为 Kusion 的配置策略语言,不仅仅提供了 kcl 命令编译和执行配置程序,还提供了 fmt、lint、test、vet、docgen 等配套的辅助工具。 diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/cli/kcl/lint.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/cli/kcl/lint.md deleted file mode 100644 index accca82f..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/cli/kcl/lint.md +++ /dev/null @@ -1,131 +0,0 @@ ---- -sidebar_position: 3 ---- - -# Lint 检查代码风格 - -KCL 支持通过内置的命令行工具对 KCL 代码进行检查,并支持多种输出格式。本文档展示 KCL Lint 工具的使用方式。 - -## 示例 - -### 工程结构 - -```text -. -└── Test - └── kcl.mod - └── .kcllint - └── a.k - └── b.k - └── dir - └── c.k - └── test.k -``` - -其中,`.kcllint` 文件为配置参数文件,非必需项,`a.k`,`b.k`,`c.k`,`test.k` 为测试的 kcl 文件。 - -命令: - -```shell -kcl-lint your_config.k -``` - -或 - -```shell -kcl-lint your_config_path -``` - -lint 配置文件 - -```shell -kcl-lint --config abspath/.kcllint your_config.k -``` - -输出结果示例: - -```shell -/Users/../test.k:12:1: E0501 line too long (122 > 120 characters) -# line too long, line too long, line too long, line too long, line too long, line too long, line too long, line too long, -^ - -/Users/../test.k:14:1: E0413 Import b should be placed at the top of the module -import b -^ - - -Check total 1 files: -1 E0413: ImportStmt is not at the top of the file -1 E0501: Line too long -KCL Lint: 2 problems -``` - -## KCL Lint 工具使用方式 - -### CLI 参数 - -```shell -usage: kcl-lint [-h] [--config file] [file] - -positional arguments: - file KCL file path - -optional arguments: - -h, --help show this help message and exit - --config file KCL lint config path -``` - -+ --config : lint 配置文件 `.kcllint` 的路径 -+ file : 需要检查的单个 `.k` 文件路径或路径目录下的所有 `.k` 文件,支持绝对路径或当前目录的相对路径 - -### Lint 配置参数 - -#### 优先级 - -Lint 的配置参数的优先级如下: - -1. CLI 参数中的 `--config file` 路径的 `.kcllint` 文件 -2. 被检查 `.k` 文件所在目录或被检查目录下的 `.kcllint` 文件 -3. 默认配置 - -#### .kcllint - -`.kcllint` 文件以 yaml 格式书写。其内容包括: - -- check_list 用于选择检查的 checker,可选项为 `"import"` 和 `"misc"` -- ignore 用于选择忽略的检查项,可选项见错误代码 -- max_line_length 为检查的参数,即单行代码最大长度 -- output 用于选择输出流和输出格式,可选项为 `"stdout"`、`"file"`、`"sarif"` -- output_path 为可选项,当 output 选择了“file”或"sarif",则必须设置输出文件的路径 - -示例: - -```yaml -check_list: ["import","misc"] -ignore: ["E0501"] -max_line_length: 120 -output: ["stdout"] -output_path: -``` - -#### 默认配置 - -```yaml -check_list: ["import", "misc"] -ignore": [] -max_line_length: 200 -output: ["stdout"] -``` - -### 错误代码 - -目前提供 import_checker 和 misc_checker - -+ import_checker - + E0401: Unable to import. - + W0401: Reimport. - + E0406: Module import itself. - + W0411: Import but unused. - + E0413: ImportStmt is not at the top of the file -+ misc_checker - + E0501: Line too long diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/cli/kcl/overview.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/cli/kcl/overview.md deleted file mode 100644 index 3723f928..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/cli/kcl/overview.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -sidebar_position: 1 ---- - -# 概览 - -KCL 工具链是 KCL 语言的工具集合,旨在提升 KCL 的批量迁移、编写、编译和测试的效率。 - -| 类别 | 工具名称 | 说明 | -| -------------- | --------------- | -------------------------------------------------------------------------------- | -| 主工具集 | **kcl** | kcl 命令行工具提供对基于 KCL 语言的配置编写、编译和测试。 | -| | kcl build | (未支持)kcl build 子命令提供对 KCL 代码的构建 | -| | kcl test | kcl 测试工具,对 KCL 的单元测试(未支持)及集成测试 | -| | kcl fmt | kcl-fmt 工具提供对 KCL 代码的格式化 | -| | kcl list | (目前为 kcl list-options 及 kcl list-attributes 子命令形态)kcl-list 工具解析 KCL 代码,并列表展示 option 参数/schema attributes 信息。 | -| 自动化工具集 | kcl-lint | kcl-lint 工具提供对 KCL 代码的 lint 检查和自动修复 | -| | kcl-doc | kcl-doc 工具提供对 KCL 代码的文档解析和生成 | -| | kcl-fmt | 等同于 kcl fmt 子命令 | -| ide 插件集 | IntelliJ IDEA KCL 插件 | 提供 IntelliJ IDEA 平台的 KCL 编写、编译辅助 | -| | VS Code KCL 插件 | 提供 VS Code 平台的 KCL 编写、编译辅助 | diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/cli/kcl/test.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/cli/kcl/test.md deleted file mode 100644 index 30532e76..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/cli/kcl/test.md +++ /dev/null @@ -1,177 +0,0 @@ ---- -sidebar_position: 5 ---- - -# 测试工具 - -## 简介 - -KCL 支持通过内置的 `kcl-test` 命令行工具和 `testing` 包提供了简易的测试框架。每个目录下的全部 KCL 文件是一个测试整体,每个 `_test.k` 中 `Test` 开头的 schema 是一个测试案例。 - -## 使用方式 - -假设有 hello.k 文件,代码如下: - -```python -schema Person: - name: str = "kcl" - age: int = 1 - -hello = Person { - name = "hello kcl" - age = 102 -} -``` - -构造 hello_test.k 测试文件,内容如下: - -```python -schema TestPerson: - a = Person{} - assert a.name == 'kcl' - -schema TestPerson_age: - a = Person{} - assert a.age == 1 - -schema TestPerson_ok: - a = Person{} - assert a.name == "kcl" - assert a.age == 1 -``` - -然后再目录下执行 `kcl-test` 命令: - -``` -$ kcl-test -ok /pkg/to/app [365.154142ms] -$ -``` - -## 失败的测试 - -将 hello_test.k 测试代码修改如下,构造失败的测试: - -```python -# Copyright 2021 The KCL Authors. All rights reserved. - -import testing - -schema TestPerson: - a = Person{} - assert a.name == 'kcl2' - -schema TestPerson_age: - a = Person{} - assert a.age == 123 - -schema TestPerson_ok: - a = Person{} - assert a.name == "kcl2" - assert a.age == 1 -``` - -测试输出的错误如下: - -``` -$ kcl-test -FAIL /pkg/to/app [354.153775ms] ----- failed [48.817552ms] - KCL Runtime Error: File /pkg/to/app/hello_test.k:7: - assert a.name == 'kcl2' - Assertion failure ----- failed [47.515009ms] - KCL Runtime Error: File /pkg/to/app/hello_test.k:11: - assert a.age == 123 - Assertion failure ----- failed [47.26677ms] - KCL Runtime Error: File /pkg/to/app/hello_test.k:15: - assert a.name == "kcl2" - Assertion failure -$ -``` - -## 配置 option 参数 - -可以通过 testing 包指定面值类型的命令行参数: - -```python -schema TestOptions: - testing.arguments("name", "ktest") - testing.arguments("age", "123") - testing.arguments("int0", 0) - testing.arguments("float0", 0.0) - testing.arguments("bool-true", True) - testing.arguments("bool-false", False) - - name = option("name") - assert name == "ktest" - - age = option("age") - assert age == 123 - - assert option("int0") == 0 - assert option("float0") == 0.0 - assert option("bool-true") == True - assert option("bool-false") == False -``` - -其中 `testing.arguments` 定义一组 key-value 参数,只有在当前的测试中有效。 - -option 参数也可以从 settings.yaml 文件读取。假设有 `./settings.yaml` 文件如下: - -```yaml - - key: app-name - value: app - - key: env-type - value: prod - - key: image - value: reg.docker.inc.com/test-image -``` - -然后可以通过 `testing.setting_file("./settings.yaml")` 方式配置参数。同时依然支持 `testing.arguments()` 覆盖配置文件中的参数: - -```py -schema TestOptions_setting: - testing.setting_file("./settings.yaml") - testing.arguments("file", "settings.yaml") - - assert option("app-name") == "app" - assert option("file") == "settings.yaml" -``` - -testing.setting_file("settings.yaml") 则是从 yaml 文件加载对应的 key-value 参数。 - -## 测试插件 - -如果要测试的目录含有 `plugin.py` 和测试文件,自动切换到插件模式。那么将测试前设置 `KCL_PLUGINS_ROOT` 环境变量(不能再访问其他目录的插件)用于测试当前插件,测试完成之后恢复之前的 `KCL_PLUGINS_ROOT` 环境变量。 - -## 集成测试 - -目录含有 `*.k` 时自动执行集成测试,如果有 `stdout.golden` 则验证输出的结果,如果有 `stderr.golden` 则验证错误。支持 `settings.yaml` 文件定义命令行参数。 - -如果有 k 文件含有 `# kcl-test: ignore` 标注注释将忽略测试。 - -## 批量测试 - -- `kcl-test path` 执行指定目录的测试, 当前目录可以省略该参数 -- `kcl-test -run=regexp` 可以执行匹配模式的测试 -- `kcl-test ./...` 递归执行子目录的单元测试 - -## 命令行参数 - -``` -$ kcl-test -h -NAME: - kcl-go test - test packages - -USAGE: - kcl-go test [command options] [packages] - -OPTIONS: - --run value Run only those tests matching the regular expression. - --quiet, -q Set quiet mode (default: false) - --verbose, -v Log all tests as they are run (default: false) - --debug, -d Run in debug mode (for developers only) (default: false) - --help, -h show help (default: false) -``` diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/cli/kcl/vet.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/cli/kcl/vet.md deleted file mode 100644 index f722d99b..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/cli/kcl/vet.md +++ /dev/null @@ -1,81 +0,0 @@ ---- -sidebar_position: 4 ---- - -# Validation 校验代码 - -## 简介 - -KCL 支持通过内置的 `kcl-vet` 命令行工具提供了基本的配置数据校验能力,可以编写 KCL schema 对输入的 JSON/YAML 格式文件进行类型以及数值的校验。 - -## 使用方式 - -假设有 data.json 文件,代码如下: - -```json -{ - "name": "Alice", - "age": "18", - "message": "This is Alice", - "data": { - "id": "1", - "value": "value1" - }, - "labels": { - "key": "value" - }, - "hc": [1, 2, 3] -} -``` - -构造 schema.k 校验文件,内容如下: - -```py -schema User: - name: str - age: int - message?: str - data: Data - labels: {str:} - hc: [int] - - check: - age > 10 - -schema Data: - id: int - value: str -``` - -在目录下执行如下命令 - -``` -$ kcl-vet data.json schema.k -Validate succuss! -``` - -## 指定校验的 schema - -当校验的 KCL 文件中存在多个 schema 定义时,kcl-vet 工具会默认取第一个 schema 定义进行校验,如果需要指定校验的 schema,可以使用 `-d|--schema` 参数 - -``` -$kcl-vet data.json schema.k -d User -``` - -## 命令行参数 - -``` -$ kcl-vet -h -usage: kcl-vet [-h] [-d schema] [--format format] [-n attribute_name] - data_file kcl_file - -positional arguments: - data_file Validation data file - kcl_file KCL file - -optional arguments: - -h, --help show this help message and exit - -d schema, --schema schema - --format format Validation data file format, support YAML and JSON - -n attribute_name, --attribute-name attribute_name -``` diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/cli/openapi/_category_.json b/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/cli/openapi/_category_.json deleted file mode 100644 index 5df621d8..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/cli/openapi/_category_.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "label": "OpenAPI 工具", - "position": 3 -} diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/cli/openapi/spec.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/cli/openapi/spec.md deleted file mode 100644 index 9f76bd43..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/cli/openapi/spec.md +++ /dev/null @@ -1,425 +0,0 @@ -# KCL OpenAPI 规范 - -[OpenAPI](https://www.openapis.org/) 允许 API 提供方规范地描述 API 操作和模型,并基于它生成自动化工具和特定语言的客户端。 - -## KCL OpenAPI 文件结构 - -依据 OpenAPI 3.0 规范,OpenAPI 文件中应至少包含 openapi、components、 info、paths 四种根节点对象,KCL OpenAPI 聚焦于其中模型定义的部分,即 OpenAPI 文件中的 `definitions`,而描述操作的 Restful API 部分(即 OpenAPI 文件中的 `paths`)则不属于 KCL OpenAPI 定义的范畴。 -​ - -注:除以上列出的节点外,OpenAPI 官方规范还支持 servers、security、tags、externalDocs 四种可选的根节点,但都不是 KCL OpenAPI 所关心的,因此用户无需填写这部分内容,即使填写了也不会产生任何影响。 -​ - -| OpenAPI 顶层对象 | 类型 | 含义 | KCL OpenAPI 工具支持情况 | -| ---------------- | ----------------- | --------------------------------------------------------------- | ------------------------------------------------------------------------------------------------ | -| swagger | string | openapi 版本信息 | 必填项,目前支持 openapi 2.0,即合法取值为 "2.0" | -| definitions | Definition Object | 模型定义 | 必填项 | -| info | Info Object | 当前 API 文件的元数据信息,例如标题、描述信息、版本、开源协议等 | 必填项,定义当前 OpenAPI 文件的基本信息,不会输出到 KCL 代码,但可用于 Swagger-UI 工具可视化展示 | - -为方便初学者快速理解,下面给出一个典型的 KCL OpenAPI 文件(截取自 swagger example [Petstore](https://petstore.swagger.io/))应包含的节点图示。KCL OpenAPI 工具重点关注其中的 definitions 节点,可以看到文件中定义了两个模型(Pet 和 Category),并且 Pet 模型中包含三个属性(name、id、category) - -## KCL schema - -KCL 中使用 schema 结构来定义配置数据的“类型”,关于 KCL schema,可参考文档:传送门 -在 definitions 节点下新增 definition 元素,即可定义 KCL schema. -示例: -下例在 KCL 代码中定义了 Pet、Category 两个 schema,同样地,其对应的 OpenAPI 也在 definitions 节点下包含这两个模型的描述。 - -```python -# KCL schema: -schema Pet: - name: str - id?: int - category?: Category - -schema Category: - name?: str - -# 对应的 OpenAPI 描述 -{ - "definitions": { - "Pet": { - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "id": { - "type": "integer", - "format": "int64" - }, - "category": { - "$ref": "#/definitions/Category" - } - }, - "required": [ - "name" - ] - }, - "Category": { - "type": "object", - "properties": { - "name": { - "type": "string" - } - } - } - }, - "swagger": "2.0", - "info": { - "title": "demo", - "version": "v1" - } -} - -``` - -### schema 名称 - -在 KCL 中,schema 名称紧跟在 schema 关键字后声明,在 OpenAPI 中,模型的名称通过 definition 元素的 key 来定义。 - -### schema 类型 - -KCL schema 在 OpenAPI 中的类型为 "object". 例如上例中 "Pet" 的 "type" 值应为 "object". - -### schema 属性 - -KCL schema 中可以定义若干属性,属性的声明一般包含如下几部分: - -- 属性注解:可选,以 @ 开头,例如 @deprecated 注解表示属性被废弃 -- 属性访问修饰符(final):可选,声明当前属性的值不可被修改 -- 属性名称:必须 -- 属性 optional 修饰符(?):可选,带问号表示当前属性为可选属性,可以不被赋值。反之,不带问号表示必填属性 -- 属性类型:必须,可以是基本数据类型,也可以是 schema 类型, 或者是前述两种类型的并集 -- 属性默认值:非必须 - -它们与 OpenAPI 规范的对应关系如下: - -| KCL schema 属性元素 | OpenAPI 元素 | -| -------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| 属性注解 | 暂不支持,计划扩展一个 deprecate 字段用于描述 deprecated 注解 | | -| 属性名称 | properties 节点下,每个属性的 key 即为属性名称 | -| 属性 optional 修饰符(?) | 模型节点下,通过 required 字段列出该模型的所有必填属性的名称,未被列出的属性即为 optional | -| 属性类型 | 属性节点下,设置 type + format 可以标识属性的基本类型,如果是 schema 类型则用 $ref 字段表示,类型 union 则由扩展字段 x-kcl-types 来标识,此外,属性节点的 enum、pattern 也可以用于表示 KCL 类型。 | -| KCL-OpenAPI 关于类型的对照关系,详见“基本数据类型”小节 | | -| 属性默认值 | 属性节点下,设置 default 字段即可为属性设置默认值 | - -示例: -下例中 Pet 模型包含了 2 个属性:name(string 类型,必填属性,无注解,无默认值)、id(int64 类型,无注解,非必填,默认值为 -1) - -```python -# KCL schema Pet,包含两个属性 name 和 id -schema Pet: - name: str - id?: int = -1 - -# 对应的 OpenAPI 文档 -{ - "definitions": { - "Pet": { - "type": "object", - "properties": { - "name": { - "type": "string", - }, - "id": { - "type": "integer", - "format": "int64", - "default": -1 - } - }, - "required": [ - "name" - ], - } - }, - "swagger": "2.0", - "info": { - "title": "demo", - "version": "v1" - } -} -``` - -### schema 索引签名 - -KCL schema 允许定义索引签名,用于定义属性名不固定的 dict,起到静态模板的作用。具体来说,KCL schema 索引签名包含如下几个元素: - -- 索引签名中 key 的类型:在方括号中声明,必须是基础类型 -- 索引签名中 value 的类型:在冒号后声明,可以是任意合法的 KCL 类型 -- 索引签名中的省略符:在方括号中,key 类型之前声明,使用"..."表示。如果带有该符号,表示该索引签名只用于约束未在 schema 中定义的属性;否则,表示 schema 中所有已定义和未定义属性都收到该索引签名的约束。 -- 索引签名中 key 的别名:在方括号中,紧随左方括号之后声明,使用名称 + 冒号表示,该别名可用于按名称引用索引签名 -- 索引签名的默认值:可以为索引签名设置默认值 - -在 OpenAPI 中,可以借助在模型节点的 `additionalProperties` 字段描述某些 key 为 string 的索引签名。但对于 KCL 索引签名中非 string 类型的 dict key、索引签名 key 的 check 校验,在 OpenAPI 规范没有对等的描述。它们与 OpenAPI 规范的对应关系如下: - -| KCL 索引签名元素 | OpenAPI 元素 | -| ----------------------- | ---------------------------------------------------------------------- | -| 索引签名中 key 的类型 | OpenAPI 仅支持 key 为 string 类型,无法自定义 | -| 索引签名中 value 的类型 | 模型节点的下 additionalProperties 下的 "type" 字段 | -| 索引签名中的省略符 | OpenAPI 中表示索引签名时,只能表示 KCL 中带有省略符的情况 | -| 索引签名中 key 的别名 | OpenAPI 中不支持为索引签名定义 key 别名,(预计通过扩展支持:x-alias) | -| 索引签名的默认值 | 目前不支持 | - -示例:下例中的 KCL schema Pet,包含两个预定义的属性 name 和 id,除此之外,还允许使用该 schema 的配置额外地赋值其他 key 为 string 类型,value 为 bool 类型的属性: - -```python -# KCL schema Pet,包含两个预定义的属性 name 和 id,允许额外给 key 为 string、value 为 bool 的属性赋值 -schema Pet: - name: str - id?: int - [...str]: bool - -# 对应的 OpenAPI 描述 -{ - "definitions": { - "Pet": { - "type": "object", - "properties": { - "name": { - "type": "string", - }, - "id": { - "type": "integer", - "format": "int64", - } - }, - "additionalProperties": { - "type": "bool" - }, - "required": [ - "name" - ], - } - }, - "swagger": "2.0", - "info": { - "title": "demo", - "version": "v1" - } -} -``` - -### schema 继承关系 - -### 内联 schema - -OpenAPI 支持嵌套地定义 schema,但 KCL 目前暂不支持 schema 的内联。OpenAPI 中内联定义的 schema 将被转换为 KCL 中带名称的 schema,其名称的命名规则为:在该内联 schema 的上层 schema 名称的基础上,增加相应的后缀。在拼接后缀时,根据定义了该内联 schema 的外层 OpenAPI 元素类型,后缀内容如下: - -| OpenAPI 文档中定义内联 schema 的元素 | KCL schema 名称拼接规则 | -| ------------------------------------ | ------------------------------ | -| 某属性节点 | 增加该属性节点的名称为后缀 | -| AdditionalProperties 节点 | 增加"AdditionalProperties"后缀 | - -注:KCL 未来也可能会支持内联 schema,届时再更新这部分转换规则 -示例 1:下例中的模型 Deployment 包含有 kind、spec 两个属性,其中 deploymentSpec 属性的 schema 通过内联的方式定义: - -```python -# OpenAPI 文档 -{ - "definitions": { - "Deployment": { - "type": "object", - "properties": { - "kind": { - "type": "string", - }, - "spec": { - "type": "object", - "properties": { - "replicas": { - "type": "integer", - "format": "int64" - } - } - } - }, - "required": [ - "kind", - "spec" - ], - } - }, - "swagger": "2.0", - "info": { - "title": "demo", - "version": "v1" - } -} - -# 转换为 KCL Schema 如下: -schema Deployment: - kind: str - spec: DeploymentSpec - -schema DeploymentSpec: - replicas?: int -``` - -示例 2:下例中的模型 Person 中除固定属性 name 外,还允许包含额外的属性(additionalProperties),并且这部分额外属性的属性值的 schema 通过内联的方式定义: - -```python -# OpenAPI 文档 -{ - "definitions": { - "Person": { - "type": "object", - "properties": { - "name": { - "type": "string", - }, - }, - "required": [ - "name", - "spec" - ], - "additionalProperties": { - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "description": { - "type": "string" - } - }, - "required": [ - "name" - ] - }, - } - }, - "swagger": "2.0", - "info": { - "title": "demo", - "version": "v1" - } -} - -# 转换为 KCL Schema 如下: -schema Person: - name: str - [...str]: [PersonAdditionalProperties] - -schema PersonAdditionalProperties: - name: str - description?: str -``` - -## KCL 文档 - -KCL doc 规范请参考:[传送门](../kcl/docgen.md) -KCL 文档包含 module 文档、schema 文档两类,其中 schema 文档可以由 OpenAPI 转换得到。KCL schema 文档包含: - -- schema 描述信息:位于 schema 声明之后、schema 属性声明之前,是对 schema 的总体介绍 -- schema 属性信息:位于 shcema 描述信息之后,以 Attributes + 分割线分隔 -- schema 附加信息:位于 schema 属性信息之后,以 See Also + 分割线分隔 -- schema 示例信息:位于 schema 附加信息之后,以 Examples + 分割线分隔 - -它们与 OpenAPI 规范的对应关系如下: - -| KCL 文档元素 | OpenAPI 元素 | -| --------------- | ---------------------------------------------------- | -| schema 描述信息 | definitions 节点下,每个模型节点的 description 字段 | -| schema 属性信息 | properties 节点下,每个属性节点的 description 字段 | -| schema 附加信息 | definitions 节点下,每个模型节点的 example 字段 | -| schema 示例信息 | definitions 节点下,每个模型节点的 externalDocs 字段 | - -示例: -下例中为 Pet 模型定义了其 schema 描述文档 "The schema Pet definition";Pet 的两个属性 "name" 和 "id" 也分别定义了其属性文档 "The name of the pet" 及 "The id of the pet";Pet 的附加信息为 "Find more info here. [https://petstore.swagger.io/](https://petstore.swagger.io/)";此外,Pet 模型还提供了模型实例的示例写法。 - -```python -# KCL schema Pet,采用规范的 KCL 文档格式 -schema Pet: - """The schema Pet definition - - Attributes - ---------- - name : str, default is Undefined, required - The name of the pet - id : int, default is -1, optional - The age of the pet - - See Also - -------- - Find more info here. https://petstore.swagger.io/ - - Examples - -------- - pet = Pet { - name = "doggie" - id = 123 - } - """ - name: str - id?: int = -1 - -# 对应的 OpenAPI 文档 -{ - "definitions": { - "Pet": { - "description": "The schema Pet definition", - "type": "object", - "properties": { - "name": { - "type": "string", - "description": "The name of the pet" - }, - "id": { - "type": "integer", - "format": "int64", - "default": -1, - "description": "The age of the pet" - } - }, - "required": [ - "name" - ], - "externalDocs": { - "description": "Find more info here", - "url": "https://petstore.swagger.io/" - }, - "example": { - "name": "doggie", - "id": 123 - } - } - }, - "swagger": "2.0", - "info": { - "title": "demo", - "version": "v1" - } -} -``` - -​ - -## 基本数据类型 - -| JSON Schema type | swagger type | KCL type | comment | -| ---------------- | --------------------------- | --------------- | ----------------------------------------------------------------------------------------------------- | -| boolean | boolean | bool | | -| number | number | float | | -| | number format double | **unsupported** | | -| | number format float | float | | -| integer | integer | int (32) | | -| | integer format int64 | **unsupported** | | -| | integer format int32 | int (32) | | -| string | string | str | | -| | string format byte | str | | -| | string format int-or-string | int | str | -| | string format binay | str | | -| | string format date | unsupported | As defined by full-date - [RFC3339](http://xml2rfc.ietf.org/public/rfc/html/rfc3339.html#anchor14) | -| | string format date-time | unsupported | As defined by date-time - [RFC3339](http://xml2rfc.ietf.org/public/rfc/html/rfc3339.html#anchor14) | -| | string format password | unsupported | for swagger: A hint to UIs to obscure input. | -| | datetime | datetime | | - -# Reference - -- openapi spec 2.0:[https://swagger.io/specification/v2/](https://swagger.io/specification/v2/) -- openapi spec 3.0:[https://spec.openapis.org/oas/v3.1.0](https://spec.openapis.org/oas/v3.1.0) -- openapi spec 3.0(swagger 版本):[https://swagger.io/specification/](https://swagger.io/specification/) -- openapi spec 2.0 #SchemaObject:[https://swagger.io/specification/v2/#schemaObject](https://swagger.io/specification/v2/#schemaObject) -- go swagger:[https://goswagger.io/use/models/schemas.html](https://goswagger.io/use/models/schemas.html) -- swagger data models:[https://swagger.io/docs/specification/data-models/](https://swagger.io/docs/specification/data-models/) diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/_category_.json b/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/_category_.json deleted file mode 100644 index 8c7bfcf8..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/_category_.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "label": "KCL", - "position": 2 -} diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/index.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/index.md deleted file mode 100644 index ab9d105e..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/index.md +++ /dev/null @@ -1 +0,0 @@ -# KCL Language diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/lang/_category_.json b/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/lang/_category_.json deleted file mode 100644 index 6066c82b..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/lang/_category_.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "label": "KCL", - "position": 1 -} diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/lang/_kcl_mod.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/lang/_kcl_mod.md deleted file mode 100644 index 9fd33601..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/lang/_kcl_mod.md +++ /dev/null @@ -1,88 +0,0 @@ ---- -sidebar_position: 6 ---- - -# kcl.mod - -当配置参数变得复杂时,我们可以通过拆分文件和目录的方式重新组装 KCL 代码,不同文件中的 KCL 代码对应包或模块,它们可以通过 import 语句被导入使用。模块和包可以通过相对模块路径导入,也可以通过绝对模块路径导入。而模块的绝对路径是通过 kcl.mod 文件指定的。 - -## 1. 定位模块根目录 - -比如有以下结构: - -``` -. -|── kcl.mod -├── mod1.k -├── mod2.k -├── pkg1 -│   ├── def1.k -│   ├── def2.k -│   └── def3init.k -└── pkg2 - ├── file2.k - └── subpkg3 - └── file3.k -``` - -- kcl.mod 文件所在的目录对应模块的根目录 -- `mod1.k` 对应导入方式 `import mod1` -- `mod2.k` 对应导入方式 `import mod2` -- `pkg1/*.k` 对应导入方式 `import pkg1` -- `pkg2/*.k` 对应导入方式 `import pkg2` -- `pkg1/subpkg3/*.k` 对应导入方式 `import pkg1.subpkg3` - -> **Note:** 对于同目录下的 KCL 文件,不要混用目录和文件的导入方式(比如 `import pkg1` 和 `import pkg1.def1` 就是混用的例子)。 - -## 2. kcl.mod 文件的结构 - -最简单的 kcl.mod 是一个空文件,只是用于定位模块的绝对路径。不过 kcl.mod 其实是一种 [TOML](https://github.com/toml-lang/toml) 格式的文件,其中可以包含一些配置信息。 - -比如以下的 kcl.mod 文件: - -```toml -[build] -enable_pkg_cache=true -cached_pkg_prefix="base.pkg." - -[expected] -kclvm_version="v0.3.9" -kcl_plugin_version="v0.2.14" -``` - -`build` 段打开了缓存,并定义了要换成的包路径前缀。`expected` 段定义了期望的 KCLVM 版本和插件版本。 - -完整的 kcl.mod 对应以下的 Protobuf 结构: - -```protobuf -syntax = "proto3"; - -package kclvm.modfile; - -// kcl.mod 文件对应的内存格式 -// kcl.mod 文件为TOML格式, 字段名字和类型保持一致 -message KclModFile { - string root = 1; // 根目录路径, 由程序填充 - string root_pkg = 2; // 根包import路径, 对应所有子包的前缀, 可以忽略 - - KclModFile_build_section build = 3; // build 配置 - KclModFile_expected_section expected = 4; // expected 配置 -} - -message KclModFile_build_section { - bool enable_pkg_cache = 1; // 启动pkg缓存 - string cached_pkg_prefix = 2; // 缓存的前缀路径 - string target = 3; // 编译的目标,可选 native, wasm -} - -message KclModFile_expected_section { - string min_build_time = 1; // 期望构建时间下界 2021-08-14 20:30:08 - string max_build_time = 2; // 期望构建时间上界 2021-08-16 20:30:08 - string kclvm_version = 3; // KCLVM 版本依赖 - string kcl_plugin_version = 4; // KCLVM Plugin 版本依赖 - string global_version = 5; // 全局版本 -} -``` - -kcl.mod 文件对应 KclModFile 结构,其中包含模块路径和本地路径的映射关系(目前还没有使用)。上面例子中的 build 和 expected 分别对应 KclModFile_build_section 和 KclModFile_expected_section 结构。 - diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/lang/codelab/_category_.json b/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/lang/codelab/_category_.json deleted file mode 100644 index 2e047bcb..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/lang/codelab/_category_.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "label": "Code Lab", - "position": 2 -} diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/lang/codelab/collaborative.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/lang/codelab/collaborative.md deleted file mode 100644 index febb4181..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/lang/codelab/collaborative.md +++ /dev/null @@ -1,344 +0,0 @@ ---- -title: "Co-configuration with config operations" -linkTitle: "Co-configuration with config operations" -type: "docs" -weight: 2 -description: Co-configuration with config operations -sidebar_position: 3 ---- -## 1. Introduction - -Kusion Configuration Language (KCL) is a simple and easy-to-use configuration language, where users can simply write the reusable configuration code. - -In this codelab, we will learn how to write the config in a collaborative way using the KCL config operation features. - -### What We Will Learn - -1. Define schemas and organize project directories. -2. Create multiple environment configurations via the KCL config operation features. -3. Configure compiling parameters and tests. - -## 2. Define Schemas and Organize Project Directories - -### Schema Definitions - -Suppose we want to define a server configuration with certain attributes, we can create a simple config by creating a `server.k`, we can fill in the following code as below which defines a reusable schema of the configuration of a server. - -```python -import units - -type Unit = units.NumberMultiplier - -schema Server: - replicas: int = 1 - image: str - resource: Resource = {} - mainContainer: Main = {} - labels?: {str:str} - annotations?: {str:str} - -schema Main: - name: str = "main" - command?: [str] - args?: [str] - ports?: [Port] - -schema Resource: - cpu?: int = 1 - memory?: Unit = 1024Mi - disk?: Unit = 10Gi - -schema Port: - name?: str - protocol: "HTTP" | "TCP" - port: 80 | 443 - targetPort: int - - check: - targetPort > 1024, "targetPort must be larger than 1024" -``` - -In the code above, we define a schema named `Server`, which represents the configuration type that the user will write, which contains some basic type attributes (e.g., `replicas`, `image`, etc) and some composite type attributes (e.g., `resource`, `main`, etc). In addition to some basic types mentioned in the [schema codelab](./schema.md), we can see two types in the above code `Unit` and `units.NumberMultiplier`. Among them, `units.NumberMultiplier` denotes the KCL number unit type, which means that a natural unit or binary unit can be added after the KCL number, such as `1K` for `1000`, `1Ki` for `1024`. `Unit` is the type alias of `units.NumberMultiplier`, which is used to simplify the writing of type annotations. - -### Project Directories - -In order to complete the collaborative configuration development, we first need a configuration project, which contains the configuration of the test application and the differential configuration of different environments, so we are creating the following project directory: - -``` -. -├── appops -│ └── test_app -│ ├── base -│ │ └── base.k -│ ├── dev -│ │ ├── ci-test -│ │ │ └── stdout.golden.yaml -│ │ ├── kcl.yaml -│ │ └── main.k -│ └── prod -│ ├── ci-test -│ │ └── stdout.golden.yaml -│ ├── kcl.yaml -│ └── main.k -├── kcl.mod -└── pkg - └── sever.k -``` - -The directory of the project mainly contains three parts: - -- `kcl.mod`: The file used to identify the root directory of the KCL project. -- `pkg`: `Server` Schema structure reused by different application configurations. -- `appops`: Server configurations of different applications, currently only one application `test_app` is placed. - - `base`: Application common configurations for all environments. - - `dev`: Application configuration for the development environment. - - `prod`: Application configuration for the production environment. - -The meaning of `base.k`, `main.k`, `kcl.yaml` and `ci-test/stdout.golden.yaml` will be mentioned in subsequent sections. - -## 3. Create multiple environment configurations via the KCL config operation features - -### Create a baseline configuration - -After we have organized the project directory and the basic server configuration model, we can write the configuration of the user application. We can create our own test application folder `test_app` and place it in the application configuration folder `appops`. - -For the configuration of an application, we often divide it into a basic configuration and the differential configuration of multiple environments and merge them. Through the configuration merging feature of KCL, we can easily do this. Assuming that we have two configurations of development environment and production environment, we can create three folders: `base`, `dev` and `prod` to store baseline, development environment and production environment configurations respectively. First, we write the configuration of `base/base.k`: - -```python -import pkg - -server: pkg.Server { - # Set the image with the value "nginx:1.14.2" - image = "nginx:1.14.2" - # Add a label app into labels - labels.app = "test_app" - # Add a mainContainer config, and its ports are [{protocol = "HTTP", port = 80, targetPort = 1100}] - mainContainer.ports = [{ - protocol = "HTTP" - port = 80 - targetPort = 1100 - }] -} -``` - -As in the above code, we use the `import` keyword in `base.k` to import the `Server` schema placed under `pkg` and use it to instantiate a configuration named `server`, in which we set `image` attribute to `"nginx:1.14.2"`, and a label `app` with the value `test_app` is added. In addition, we also added the configuration of the main container `mainContainer` with the value `[{protocol = "HTTP", port = 80, targetPort = 1100}]` for the ports attribute. - -KCL command: - -``` -kcl appops/test_app/base/base.k -``` - -Output: - -```yaml -server: - replicas: 1 - image: nginx:1.14.2 - resource: - cpu: 1 - memory: 1073741824 - disk: 10737418240 - mainContainer: - name: main - ports: - - protocol: HTTP - port: 80 - targetPort: 1100 - labels: - app: test_app -``` - -At this point, we have a baseline configuration. - -### Create multiple environment configurations - -Next we configure a differentiated multi-environment configuration. First assume that we want to use a temporary image of our own `nginx:1.14.2-dev` in the development environment, and then use it to override the server configuration in the baseline, we can write the following configuration in `dev/main.k`: - -```python -import pkg - -server: pkg.Server { - # Override the image declared in the base - image = "nginx:1.14.2-dev" -} -``` - -KCL command: - -``` -kcl appops/test_app/base/base.k appops/test_app/dev/main.k -``` - -Output: - -```yaml -server: - replicas: 1 - image: nginx:1.14.2-dev - resource: - cpu: 1 - memory: 1073741824 - disk: 10737418240 - mainContainer: - name: main - ports: - - protocol: HTTP - port: 80 - targetPort: 1100 - labels: - app: test_app -``` - -It can be seen that the `image` field of the output YAML is overwritten to `nginx:1.14.2-dev`. Suppose we also want to add a label to the `dev` environment with a key of `env` and a value of `dev`, we add the following code to `dev/main.k`: - -```python -import pkg - -server: pkg.Server { - # Override the image declared in the base - image = "nginx:1.14.2-dev" - # Union a new label env into base labels - labels.env = "dev" -} -``` - -KCL command: - -``` -kcl appops/test_app/base/base.k appops/test_app/dev/main.k -``` - -```yaml -server: - replicas: 1 - image: nginx:1.14.2-dev - resource: - cpu: 1 - memory: 1073741824 - disk: 10737418240 - mainContainer: - name: main - ports: - - protocol: HTTP - port: 80 - targetPort: 1100 - labels: - app: test_app - env: dev -``` - -It can be seen that there are two labels in the `labels` field of the output YAML. - -In addition, we can also use the `+=` operator to add new values to list type attributes, such as the `mainContainer.ports` configuration in the baseline environment, continue to modify the code in `dev/main.k`: - -```python -import pkg - -server: pkg.Server { - # Override the base image. - image = "nginx:1.14.2-dev" - # Union a new label env into base labels. - labels.env = "dev" - # Append a port into base ports. - mainContainer.ports += [{ - protocol = "TCP" - port = 443 - targetPort = 1100 - }] -} -``` - -KCL command: - -``` -kcl appops/test_app/base/base.k appops/test_app/dev/main.k -``` - -Output: - -```yaml -server: - replicas: 1 - image: nginx:1.14.2-dev - resource: - cpu: 1 - memory: 1073741824 - disk: 10737418240 - mainContainer: - name: main - ports: - - protocol: HTTP - port: 80 - targetPort: 1100 - - protocol: TCP - port: 443 - targetPort: 1100 - labels: - app: test_app - env: dev -``` - -Using the same method, we can build the production configuration, write the code in the `dev/main.k` file, and add a label to it. - -```python -import pkg - -server: pkg.Server { - # Union a new label env into base labels - labels.env = "prod" -} -``` - -KCL command: - -``` -kcl appops/test_app/base/base.k appops/test_app/prod/main.k -``` - -Output: - -```yaml -server: - replicas: 1 - image: nginx:1.14.2 - resource: - cpu: 1 - memory: 1073741824 - disk: 10737418240 - mainContainer: - name: main - ports: - - protocol: HTTP - port: 80 - targetPort: 1100 - labels: - app: test_app - env: prod -``` - -## 4. Configure compiling parameters and tests - -In the previous section, we built a multi-environment configuration through code. It can be seen that the KCL command line compilation parameters of different environments are similar, so we can configure these compilation parameters into a file and input them to the KCL command line for invocation. Configure the following code in `dev/kcl.yaml`: - -```yaml -kcl_cli_configs: - files: - - ../base/base.k - - main.k - output: ./ci-test/stdout.golden.yaml -``` - -Then we can compile the configuration in the development environment with the following command: - -``` -cd appops/test_app/dev && kcl -Y ./kcl.yaml -``` - -In addition, we have configured the `output` field in `dev/kcl.yaml` to output YAML to a file for subsequent configuration distribution or testing. You can verify that the application's configuration is as expected by walking through the `kcl.yaml` builds in each environment and comparing with `./ci-test/stdout.golden.yaml`. - -## 5. The Final Step - -Congratulations! - -We have completed the third lesson about KCL. diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/lang/codelab/index.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/lang/codelab/index.md deleted file mode 100644 index dbe554a9..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/lang/codelab/index.md +++ /dev/null @@ -1 +0,0 @@ -# Code Lab diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/lang/codelab/schema.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/lang/codelab/schema.md deleted file mode 100644 index 423b0cb5..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/lang/codelab/schema.md +++ /dev/null @@ -1,816 +0,0 @@ ---- -title: "Write complex config using KCL Schema" -linkTitle: "Write complex config using KCL Schema" -type: "docs" -weight: 2 -description: Write complex config using KCL Schema -sidebar_position: 2 ---- -## 1. Introduction - -Kusion Configuration Language (KCL) is a simple and easy-to-use configuration language, where users can simply write the reusable configuration code. - -In this codelab, we will learn how to write customized config using KCL, such that we can define a schema and write the config in a collaborative way. - -### What We Will Learn - -1. Define a simple schema -2. Set default immutable values to schema fields -3. Create config based on a simple schema -4. Write complex logic in schema -5. Create a new schema via schema combinations -6. Create a config of a deeply nested schema using dict/map -7. Create new schema via schema inheritance -8. Create new schema via multiple mixin schemas -9. Declare validation rules for the schema -10. Config schema output layout -11. Share and reuse schema - -## 2. Write Simple Schema - -Suppose we want to define a workload with certain attributes, we can create a simple config by creating a `my_config.k`, we can fill in the following code as below which defines a reusable schema of the configuration of deploy. - -```python -schema Deployment: - name: str - cpu: int - memory: int - image: str - service: str - replica: int - command: [str] - labels: {str:str} -``` - -In the code above, `cpu` and `memory` are defined as int value; `name`, `image` and `service` are string; `command` is a list of string type; `labels` is a dict type, whose key type and value type are both string. - -Besides, each attribute **must** be assigned with a not-None value as a schema instance unless it is modified by a question mark **?** as an optional attribute. - -```python -schema Deployment: - name: str - cpu: int - memory: int - image: str - service: str - replica: int - command: [str] - labels?: {str:str} # labels is an optional attribute -``` - -When there is an inheritance relationship: - -- If the attribute is optional in the base schema, it could be optional or required in the sub-schema. -- If the attribute is required in the base schema, it must be required in the sub-schema. - -## 3. Enhance Schema as Needed - -Suppose we need to set default values to service and replica, we can make them as below: - -```python -schema Deployment: - name: str - cpu: int - memory: int - image: str - service: str = "my-service" # defaulting - replica: int = 1 # defaulting - command: [str] - labels?: {str:str} # labels is an optional attribute -``` - -And then we can set the service type annotation as the string literal type to make it immutable: - -```python -schema Deployment: - name: str - cpu: int - memory: int - image: str - service: "my-service" = "my-service" - replica: int = 1 - command: [str] - labels?: {str:str} -``` - -In the schema, type hint is a `must`, for example we can define cpu as `cpu: int`. - -Specially, we can define a string-interface dict as `{str:}`, and in case we want to define an object or interface, just define as `{:}`. - -## 4. Create Config Based on Simple Schema - -Now we have a simple schema definition, we can use it to define config as: - -```python -nginx = Deployment { - name = "my-nginx" - cpu = 256 - memory = 512 - image = "nginx:1.14.2" - command = ["nginx"] - labels = { - run = "my-nginx" - env = "pre-prod" - } -} -``` - -Run with the following KCL command, we should be able to see the generated yaml files as the output as below: - -KCL command: - -``` - kcl my_config.k -``` - -Stdout: - -```yaml -nginx: - name: my-nginx - cpu: 256 - memory: 512 - image: nginx:1.14.2 - service: my-service - replica: 1 - command: - - nginx - labels: - run: my-nginx - env: pre-prod -``` - -> Check the manual and specification out for more details about collection data types and block. - -In addition, the **config selector expressions** can be used to init a schema instance, and we can ignore the comma at the end of the line in the config expression. - -```python -nginx = Deployment { - name = "my-nginx" - cpu = 256 - memory = 512 - image = "nginx:1.14.2" - command = ["nginx"] # Ignore the comma at the end of the line - labels.run = "my-nginx" # A dict variable in schema can use selector expressions - labels.env = "pre-prod" # A dict variable in schema can use selector expressions -} -``` - -## 5. Write More Complex Logic in Schema - -Suppose we have some schema logic, we can wrapper it into schema: - -```python -schema Deployment[priority]: - name: str - cpu: int = _cpu - memory: int = _cpu * 2 - image: str - service: "my-service" = "my-service" - replica: int = 1 - command: [str] - labels?: {str:str} - - _cpu = 2048 - if priority == 1: - _cpu = 256 - elif priority == 2: - _cpu = 512 - elif priority == 3: - _cpu = 1024 - else: - _cpu = 2048 -``` - -Now, we can define a config by creating a schema instance and pass in priority as an argument to schema: - -```python -nginx = Deployment(priority=2) { - name = "my-nginx" - image = "nginx:1.14.2" - command = ["nginx"] - labels.run = "my-nginx" - labels.env = "pre-prod" -} -``` - -Run with kcl, we should see the generated yaml files as output as below: - -KCL command: - -``` -kcl my_config.k -``` - -Stdout: - -```yaml -nginx: - name: my-nginx - cpu: 512 - memory: 1024 - image: nginx:1.14.2 - service: my-service - replica: 1 - command: - - nginx - labels: - run: my-nginx - env: pre-prod -``` - -## 6. Create New Schema via Schema Combinations - -Now we want to define a detailed schema with service and volumes, we can do it as follows: - -```python -schema Deployment[priority]: - name: str - cpu: int = _cpu - memory: int = _cpu * 2 - volumes?: [Volume] - image: str - service?: Service - replica: int = 1 - command: [str] - labels?: {str:str} - - if priority == 1: - _cpu = 256 - elif priority == 2: - _cpu = 512 - elif priority == 3: - _cpu = 1024 - else: - _cpu = 2048 - -schema Port: - name: str - protocol: str - port: int - targetPort: int - -schema Service: - name: "my-service" = "my-service" - ports: [Port] - -schema Volume: - name: str - mountPath: str - hostPath: str -``` - -In this case, Deployment is composed of Service and a list of Volumes, and Service is composed of a list of Ports. - -## 7. Create Config of Deeply Nested Schema using Dict/Map - -Now we have a new Deployment schema, however, we may notice that it contains multiple layers of nested structures, in fact, this is very common in complex structure definitions, and we often have to write imperative assembly code to generate the final structure. - -With KCL, we can create the config with simple dict declaration, with the capability of full schema initialization and validation. For example, we can simply config nginx by the new Deployment schema as follows: - -```python -nginx = Deployment(priority=2) { - name = "my-nginx" - image = "nginx:1.14.2" - volumes = [Volume { - name = "mydir" - mountPath = "/test-pd" - hostPath = "/data" - }] - command = ["nginx"] - labels.run = "my-nginx" - labels.env = "pre-prod" - service.ports = [Port { - name = "http" - protocol = "TCP" - port = 80 - targetPort = 9376 - }] -} -``` - -Run with KCL, we will see the generated yaml files as below: - -KCL command: - -``` -kcl my_config.k -``` - -Stdout: - -```yaml -nginx: - name: my-nginx - cpu: 512 - memory: 1024 - volumes: - - name: mydir - mountPath: /test-pd - hostPath: /data - image: nginx:1.14.2 - service: - name: my-service - ports: - - name: http - protocol: TCP - port: 80 - targetPort: 9376 - replica: 1 - command: - - nginx - labels: - run: my-nginx - env: pre-prod -``` - -Note that, the dict that we use to define Deployment config must be aligned with the schema definition, otherwise we will get an error. For example, suppose we define a wrong type of service port as below: - -```python -nginx = Deployment(priority=2) { - name = "my-nginx" - image = "nginx:1.14.2" - volumes = [Volume { - name = "mydir" - mountPath = "/test-pd" - hostPath = "/data" - }] - command = ["nginx"] - labels.run = "my-nginx" - labels.env = "pre-prod" - service.ports = [Port { - name = "http" - protocol = "TCP" - port = [80] # wrong data type, trying to assign List to int - targetPort = 9376 - }] -} -``` - -Run with KCL, we will see the error message as output as below: - -KCL command: - -```python -kcl my_config.k -``` - -Stderr: - -``` -The type got is inconsistent with the type expected: expect int, got [int(80)] -``` - -## 8. Declare Schema Validation Rules - -Now we have seen a complex schema, in which every field has a type hint to make it less error-prone. But this is not good enough, we want to support more enhanced verifications to our schemas, so that code errors in schemas and configs can be discovered as soon as possible. - -Lots of validation rules, like None type check, range check, value check, length check, regular expression matching, enum check have already been added or in progress. Here is a code sample: - -```python -import regex - -schema Deployment[priority]: - name: str - cpu: int = _cpu - memory: int = _cpu * 2 - volumes?: [Volume] - image: str - service?: Service - replica: int = 1 - command: [str] - labels?: {str:str} - - if priority == 1: - _cpu = 256 - elif priority == 2: - _cpu = 512 - elif priority == 3: - _cpu = 1024 - else: - _cpu = 2048 - - check: - multiplyof(cpu, 256), "cpu must be a multiplier of 256" - regex.match(image, "^[a-zA-Z]+:\d+\.\d+\.\d+$"), "image name should be like 'nginx:1.14.2'" - 1 <= replica < 100, "replica should be in range (1, 100)" - len(labels) >= 2 if labels, "the length of labels should be large or equal to 2" - "env" in labels, "'env' must be in labels" - len(command) > 0, "the command list should be non-empty" - -schema Port: - name: str - protocol: str - port: int - targetPort: int - - check: - port in [80, 443], "we can only expose 80 and 443 port" - protocol in ["HTTP", "TCP"], "protocol must be either HTTP or TCP" - 1024 < targetPort, "targetPort must be larger than 1024" - -schema Service: - name: "my-service" = "my-service" - ports: [Port] - - check: - len(ports) > 0, "ports list must be non-empty" - -schema Volume: - name: str - mountPath: str - hostPath: str -``` - -Since the attributes defined by the schema are **required** by default, the verification that judges that the variable cannot be None/Undefined can be omitted. - -```python -schema Volume: - name: str - mountPath: str - hostPath: str -``` - -Now we can write the config based on the new schema and expose config errors in time. For example, with the invalid config as below: - -```python -nginx = Deployment(priority=2) { - name = "my-nginx" - image = "nginx:1142" # image value is not matching the regex - volumes = [Volume { - name = "mydir" - mountPath = "/test-pd" - hostPath = "/data" - }] - command = ["nginx"] - labels.run = "my-nginx" - labels.env = "pre-prod" - service.ports = [Port { - name = "http" - protocol = "TCP" - port = 80 - targetPort = 9376 - }] -} -``` - -Every field is type-valid, but the image name is invalid. - -Run with KCL, we will see the error message as below: - -KCL command: - -``` -kcl my_config.k -``` - -Stderr: - -``` -Schema check is failed to check condition: regex.match(image, "^[a-zA-Z]+:\d+\.\d+\.\d+$"), "image name should be like 'nginx:1.14.2'" -``` - -> The verification capability of KCL covers the verification defined by Openapi so that we can write any API verifications through KCL. - -## 9. Create New Schema via Schema Inheritance - -Now we have a solid Deployment schema definition and we can use it to declare config. - -Usually, schema Deployment will be used in multiple scenarios. We can directly use the schema to declare the configurations in different use cases (see the above section), or we can produce a more specific schema definition through inheritance. - -For example, we can use the Deployment schema as a basis, to define the nginx's base schema, and extend the definition -in each scenario. - -In this case, we define some commonly used attributes. Please note that we mark the name to be immutable with the 'final' keyword to prevent it from being overwritten. - -```python -schema Nginx(Deployment): - """ A base nginx schema """ - name: "my-nginx" = "my-nginx" - image: str = "nginx:1.14.2" - replica: int = 3 - command: [str] = ["nginx"] - -schema NginxProd(Nginx): - """ A prod nginx schema with stable configurations """ - volumes: [Volume] = [{ - name = "mydir" - mountPath = "/test-pd" - hostPath = "/data" - }] - """ A volume mapped to host path """ - service: Service = { - ports = [{ - name = "http" - protocol = "TCP" - port = 80 - targetPort = 9376 - }] - } - """ An 80 port to target backend server """ -``` - -Now we have some static configurations for nginx. It is recommended to declare configurations that we think are static there, and put more dynamic configurations as below: - -```python -nginx = Nginx { - labels.run = "my-nginx" - labels.env = "pre-prod" -} -``` - -```python -nginx = NginxProd { - labels.run = "my-nginx" - labels.env = "pre-prod" -} -``` - -Now, we can simply define nginx prod config just with runtime label value "prod" which is not that static. - -In fact, under some complex situation, we can split all configurations into the basic, business, and environment configuration definitions in this way, and achieve collaboration among team members based on this. - -Run with KCL, we will see the generated yaml files as output as below: - -KCL command: - -``` -kcl prod_config.k -``` - -Stdout: - -```yaml -nginx: - name: my-nginx - cpu: 512 - memory: 1024 - volumes: - - name: mydir - mountPath: /test-pd - hostPath: /data - image: nginx:1.14.2 - service: - name: my-service - ports: - - name: http - protocol: TCP - port: 80 - targetPort: 9376 - replica: 3 - command: - - nginx - labels: - run: my-nginx - env: pre-prod -``` - -## 10. Create New Schema by Multiple Protocol and Mixin Schemas Inheritance - -Now, we can complete the declaration of the server configuration through the Deployment schema. - -However, usually, the actual situation is more complicated, and the deployment may have a variety of optional variable accessories. - -For example, we want to support a persistent volume claim based on an existing schema, as a reusable Kubernetes schema. In this case, we can just wrapper it with a `mixin` and a `protocol` as follows: - -```python -import kusion_kubernetes.api.core.v1 - -protocol PVCProtocol: - pvc?: {str:} - -mixin PersistentVolumeClaimMixin for PVCProtocol: - """ - PersistentVolumeClaim (PVC) sample: - Link: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims - """ - - # Mix in a new attribute `kubernetesPVC` - kubernetesPVC?: v1.PersistentVolumeClaim - - if pvc: - kubernetesPVC = v1.PersistentVolumeClaim { - metadata.name = pvc.name - metadata.labels = pvc.labels - spec = { - accessModes = pvc.accessModes - resources = pvc.resources - storageClassName = pvc.storageClassName - } - } -``` - -With this PersistentVolumeClaimMixin, we define a PVC schema with a clear `user interface`, and use Kubernetes PVC as an implementation. Then, we can define a server schema with Deployment schema, and PVC mixin schema. - -``` -schema Server(Deployment): - mixin [PersistentVolumeClaimMixin] - pvc?: {str:} - """ pvc user interface data defined by PersistentVolumeClaimMixin """ -``` - -In the Server schema, Deployment is the base schema, and PersistentVolumeClaimMixin is an optional add-on whose user interface data is `pvc?: {str:}`. - -Note, the `mixin` is often used to add new attributes to the host schema, or to modify the existing attributes of the host schema. Thus, `mixin` can use the attributes in the host schema. Since the `mixin` is designed to be reusable, we need an additional `protocol` to constrain the attribute names and types in the host schema for the `mixin`. - -Now, if we want a deploy with a PVC, just declare as user interface: - -```python -server = Server { - name = "my-nginx" - image = "nginx:1.14.2" - volumes = [Volume { - name = "mydir" - mountPath = "/test-pd" - hostPath = "/data" - }] - command = ["nginx"] - labels = { - run = "my-nginx" - env = "pre-prod" - } - service.ports = [Port { - name = "http" - protocol = "TCP" - port = 80 - targetPort = 9376 - }] - pvc = { - name = "my_pvc" - accessModes = ["ReadWriteOnce"] - resources.requests.storage = "8Gi" - storageClassName = "slow" - } -} -``` - -Run with kcl, we will see the generated yaml files as output as below: - -KCL command: - -``` -kcl server.k -``` - -Stdout: - -```yaml -server: - name: my-nginx - cpu: 512 - memory: 1024 - volumes: - - name: mydir - mountPath: /test-pd - hostPath: /data - image: nginx:1.14.2 - service: - name: my-service - ports: - - name: http - protocol: TCP - port: 80 - targetPort: 9376 - replica: 1 - command: - - nginx - labels: - run: my-nginx - env: pre-prod - pvc: - name: my_pvc - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 8Gi - storageClassName: slow ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: my_pvc -spec: - accessModes: - - ReadWriteOnce - storageClassName: slow - resources: - requests: - storage: 8Gi -``` - -If we don't want a persistent volume, just remove the pvc config block. - -## 11. Share and Reuse Schema - -The Server schema could be shared via `import`, we can simply package our code with KCL. - -```python -import pkg - -server = pkg.Server { - name = "my-nginx" - image = "nginx:1.14.2" - volumes = [Volume { - name = "mydir" - mountPath = "/test-pd" - hostPath = "/data" - }] - command = ["nginx"] - labels.run = "my-nginx" - labels.env = "pre-prod" - service.ports = [Port { - name = "http" - protocol = "TCP" - port = 80 - targetPort = 9376 - }] -} -``` - -Another skill we should know about sharing code is, modules under the same package do not need to import each other. - -Suppose we have models in a pkg: - -``` -pkg/ - - deploy.k - - server.k - - pvc.k -``` - -And in `server.k`, we can just use Deployment schema in `deploy.k` and pvc schema in `pvc.k` without import: - -```python -# no import needed -schema Server(Deployment): - mixin [PersistentVolumeClaimMixin] - pvc?: {str:} - """ pvc user interface data defined by PersistentVolumeClaimMixin """ -``` - -And then users must import the pkg to use it as a whole: - -```python -import pkg - -server = pkg.Server { - name = "my-nginx" - image = "nginx:1.14.2" - volumes = [pkg.Volume { - name = "mydir" - mountPath = "/test-pd" - hostPath = "/data" - }] - command = ["nginx"] - labels = { - run = "my-nginx" - env = "pre-prod" - } - service.ports = [pkg.Port { - name = "http" - protocol = "TCP" - port = 80 - targetPort = 9376 - }] -} -``` - -Run kcl command: - -``` -kcl pkg_server.k -``` - -Output: - -```yaml -server: - name: my-nginx - cpu: 512 - memory: 1024 - volumes: - - name: mydir - mountPath: /test-pd - hostPath: /data - image: nginx:1.14.2 - service: - name: my-service - ports: - - name: http - protocol: TCP - port: 80 - targetPort: 9376 - replica: 1 - command: - - nginx - labels: - run: my-nginx - env: pre-prod -``` - -## 12. The Final Step - -Congratulations! - -We have completed the second lesson about KCL, we have used KCL to replace our key-value text file to get better programming support. diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/lang/codelab/simple.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/lang/codelab/simple.md deleted file mode 100644 index 041cb370..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/lang/codelab/simple.md +++ /dev/null @@ -1,495 +0,0 @@ ---- -title: "Write simple config with KCL" -linkTitle: "Write simple config with KCL" -type: "docs" -weight: 2 -description: Write simple config with KCL -sidebar_position: 1 ---- -## 1. Introduction - -Kusion Configuration Language (KCL) is a simple and easy-to-use configuration language, where users can simply -write the reusable configuration code. - -In this first codelab, we will learn how to write a simple config with KCL. - -Learning this codelab only requires basic programming knowledge, and experience with python will make it even easier. - -### What We Will Learn - -1. Write simple key-value configuration in a programmable way -2. Write simple logic in KCL code -3. Write collections in KCL code -4. Test and debug with KCL code -5. Use built-in support in KCL code -6. Share and reuse KCL code -7. Write config with dynamic input arguments - -## 2. Write Key-Value Pairs - -Generate a simple config by creating a `my_config.k`, we can fill in the following code without strict format which describes the configuration of deploy. - -```python -cpu = 256 -memory = 512 -image = "nginx:1.14.2" -service = "my-service" -``` - -In the code above, cpu and memory are declared as int value, while image and service are string literal. - -Run with KCL, we will see the generated data in yaml format as below: - -KCL command: - -``` -kcl my_config.k -``` - -Stdout: - -```yaml -cpu: 256 -memory: 512 -image: nginx:1.14.2 -service: my-service -``` - -The exported variable is immutable by default so that once it is declared, we can't modify it some where else. - -## 3. Write Simple Logic - -Sometimes we want to write a logic in configuration, then we can use: - -- Mutable and non-exported variable starting with '_' -- If-else statement - -A non-exported variable means it will not appear in the output YAML, and it can be assigned multiple times. - -Here is a sample to show how to adjust the resource with conditions. - -KCL command: - -```python -kcl my_config.k -``` - -```python -_priority = 1 # a non-exported and mutable variable -_cpu = 256 # a non-exported and mutable variable - -if _priority == 1: - _cpu = 256 -elif _priority == 2: - _cpu = 512 -elif _priority == 3: - _cpu = 1024 -else: - _cpu = 2048 - -cpu = _cpu -memory = _cpu * 2 -image = "nginx:1.14.2" -service = "my-service" -``` - -Run with KCL, we will see the generated data in yaml format as below: - -```python -kcl my_config.k -``` - -Stdout: - -```yaml -cpu: 256 -memory: 512 -image: nginx:1.14.2 -service: my-service -``` - -.. note:: -KCL has rich support of operators and string member functions, please read manual and specification for more details. - -## 4. Write Collections - -We can use collections to represent complex data types. The collections which are already supported are: - -- list -- dict - -```python -_priority = 1 # a non-exported and mutable variable -_cpu = 256 # a non-exported and mutable variable - -if _priority == 1: - _cpu = 256 -elif _priority == 2: - _cpu = 512 -elif _priority == 3: - _cpu = 1024 -else: - _cpu = 2048 - -cpu = _cpu -memory = _cpu * 2 -command = ["nginx"] # a list -labels = {run = "my-nginx"} # a dict -image = "nginx:1.14.2" -service = "my-service" -``` - -Run with kcl, we will see the generated data as yaml format as below: - -KCL command: - -``` -kcl my_config.k -``` - -Stdout: - -```yaml -cpu: 512 -memory: 1024 -command: - - nginx -labels: - run: my-nginx -image: nginx:1.14.2 -service: my-service -``` - -> Check manual and specification out for more about collection date type and member functions. - -## 5. Append Items Into Collections - -We can combine logical expressions, comprehensions, slices, unions and other characteristics to dynamically add elements to the collection - -```python -_priority = 1 # a non-exported and mutable variable -_cpu = 256 # a non-exported and mutable variable -_env = "pre-prod" - -if _priority == 1: - _cpu = 256 -elif _priority == 2: - _cpu = 512 -elif _priority == 3: - _cpu = 1024 -else: - _cpu = 2048 - -cpu = _cpu -memory = _cpu * 2 -_command = ["nginx"] # a list -_command = _command + ["-f", "file"] # Append itemsinto command using + operator to contact two lists -command = [c.lower() for c in _command] # Take eachelement in the list to lowercase -_labels = { - run = "my-nginx" - if _env: - env = _env # Append a dict key-value pair when the _env is not None/Undefined or empty using if expressions -} # a dict -labels = _labels -image = "nginx:1.14.2" -service = "my-service" -``` - -Run with kcl, we will see the generated data as yaml format as below: - -```python -kcl my_config.k -``` - -Stdout: - -```yaml -cpu: 256 -memory: 512 -command: -- nginx -- -f -- file -labels: - run: my-nginx -image: nginx:1.14.2 -service: my-service -``` - -## 6. Write Assert - -To make code testable and robust, we can verify config data with assertions. - -```python -_priority = 1 # a non-exported and mutable variable -_cpu = 256 # a non-exported and mutable variable - -if _priority == 1: - _cpu = 256 -elif _priority == 2: - _cpu = 512 -elif _priority == 3: - _cpu = 1024 -else: - _cpu = 2048 - -cpu = _cpu -memory = _cpu * 2 -command = ["nginx"] # a list -labels = {run = "my-nginx"} # a dict -image = "nginx:1.14.2" -service = "my-service" -assert "env" in labels, "env label is a must" -assert cpu >= 256, "cpu cannot be less than 256" -``` - -Run with KCL, we will see eval failure with an error message as output as below: - -``` -kcl my_config.k -``` - -Stderr: - -``` -Assertion failure: env label is a must. -``` - -After adding env:pre-prod pair into labels, we will get the output as: - -```yaml -cpu: 512 -memory: 1024 -command: - - nginx -labels: - run: my-nginx - env: pre-prod -image: nginx:1.14.2 -service: my-service -``` - -## 7. Use Handy Built-in Support - -What's more, we can use built-in functions to help we debug or simplify coding. - -```python -_priority = 1 # a non-exported and mutable variable -_cpu = 256 # a non-exported and mutable variable - -if _priority == 1: - _cpu = 256 -elif _priority == 2: - _cpu = 512 -elif _priority == 3: - _cpu = 1024 -else: - _cpu = 2048 - -_name = "nginx" -# exported variables -cpu = _cpu -memory = _cpu * 2 -command = [_name] # a list -labels = { - run = "my-{}".format(_name) - env = "pre-prod" -} # a dict -image = "{}:1.14.2".format(_name) # string format -service = "my-service" - -# debugging -print(labels) # debugging by print - -# test -assert len(labels) > 0, "labels can't be empty" # uselen() to get list length -assert "env" in labels, "env label is a must" -assert cpu >= 256, "cpu cannot be less than 256" -``` - -This sample shows how we use `format()`, `len()`, `print()` function to help customize the config. - -Run with KCL, we will see the generated data in yaml format as below: - -KCL command: - -``` -kcl my_config.k -``` - -Stdout: - -```yaml -cpu: 512 -memory: 1024 -command: - - nginx -labels: - run: my-nginx - env: pre-prod -image: nginx:1.14.2 -service: my-service -run: my-nginx -env: pre-prod -``` - -Note: more built-in functions and modules can be seen in spec/module - -## 8. Reuse Variables in Another Module - -To make our code well-organized, we can simply separate our code to `my_config.k` and `my_config_test.k`. - -Config data defined in `my_config.k`, - -```python -_priority = 1 # a non-exported and mutable variable -_cpu = 256 # a non-exported and mutable variable - -if _priority == 1: - _cpu = 256 -elif _priority == 2: - _cpu = 512 -elif _priority == 3: - _cpu = 1024 -else: - _cpu = 2048 -_name = "nginx" - -# exported variables -cpu = _cpu -memory = _cpu * 2 -command = [_name] # a list -labels = { - run = "my-{}".format(_name) - env = "pre-prod" -} # a dict -image = "{}:1.14.2".format(_name) # string format -service = "my-service" -``` - -And test code defined in `my_config_test.k`, in which we can import my_config.k: - -```python -import my_config - -# debugging -print(my_config.labels) # debugging by print - -# test -assert len(my_config.labels) > 0, "labels can't beempty" # use len() to get list length -assert "env" in my_config.labels, "env label is a must" -assert my_config.cpu >= 256, "cpu cannot be less than256" -``` - -## 9. Config with Input Arguments - -Sometimes we need to get external input via parameters dynamically from the end user or platform. - -In this case, we can pass in `priority` and `env` on demand: - -- Pass in arguments: `-D priority=1 -D env=pre-prod` -- Get value by `option` keyword in KCL code - -```python -_priority = option("priority") # a non-exported and mutable variable -_env = option("env") # a non-exported and mutable variable -_cpu = 256 # a non-exported and mutable variable - -if _priority == 1: - _cpu = 256 -elif _priority == 2: - _cpu = 512 -elif _priority == 3: - _cpu = 1024 -else: - _cpu = 2048 - -_name = "nginx" -# exported variables -cpu = _cpu -memory = _cpu * 2 -command = [_name] # a list -labels = { - run = "my-{}".format(_name) - env = _env -} # a dict -image = "{}:1.14.2".format(_name) # string format -service = "my-service" -``` - -Run with KCL, we will see the generated data in yaml format as below: - -``` -kcl -D priority=2 -D env=pre-prod my_config.k -``` - -Stdout: - -```yaml -cpu: 512 -memory: 1024 -command: - - nginx -labels: - run: my-nginx - env: pre-prod -image: nginx:1.14.2 -service: my-service -``` - -## 10. Simplify Logic Expression using Dict - -When we need to write complex logic, we can use dict to simplify the writing of logic. - -```python -_priority = option("priority") # a non-exported and mutable variable -_env = option("env") # a non-exported and mutable variable -_priorityCpuMap = { - "1" = 256 - "2" = 512 - "3" = 1024 -} -# Using a dict to simplify logic and the default value is 2048 -_cpu = _priorityCpuMap[_priority] or 2048 -_name = "nginx" -# exported variables -cpu = _cpu -memory = _cpu * 2 -command = [_name] # a list -labels = { - run = "my-{}".format(_name) - env = _env -} # a dict -image = "{}:1.14.2".format(_name) # string format -service = "my-service" -``` - -Run with KCL, we will see the generated data in yaml format as below: - -KCL command: - -``` -kcl -D priority=2 -D env=pre-prod my_config.k -``` - -Stdout: - -```yaml -cpu: 512 -memory: 1024 -command: - - nginx -labels: - run: my-nginx - env: pre-prod -image: nginx:1.14.2 -service: my-service -``` - -## 11. The Final Step - -Congratulations! - -We have completed the first lesson about KCL, we have used KCL to replace our key-value text file to get better programming support. - -Please check schema codelab out now to learn how to write an advanced config collaboratively with KCL `schema` mechanism. diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/lang/error/_category_.json b/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/lang/error/_category_.json deleted file mode 100644 index 95062745..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/lang/error/_category_.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "label": "Errors and Warnings", - "position": 4 -} diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/lang/error/_error.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/lang/error/_error.md deleted file mode 100644 index 596a0139..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/lang/error/_error.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -title: "错误检查" -linkTitle: "错误检查" -type: "docs" -weight: 1 -description: KCL 语言规范 ---- -When errors happen, developers should be able to detect the error and abort -execution. Thus, KCL introduce the `assert` syntax. - -In the previous topic of `schema` syntax. Errors can also be raised when a -schema is violated. - -## Syntax - -The syntax of the `assert` statement is the following. - -``` -assert_stmt: 'assert' test [',' test] -``` - -In the basic form, an `assert` statement evaluates an expression. If the -expression is evaluated to `False`, the assertion is failed, and an error -should be reported. - -In the extended form, an error message can be provided. The error message is -another expression. It is only evaluated when the expression to be evaluated -is evaluated to `False`. The evaluation result of the error message is printed -when reporting the error. - -The following is an example: - -```py -a = 1 -b = 3 -# a != b evaluates to True, therefore no error should happen. -assert a != b -# a == b is False, in the reported error message, the message "SOS" should be printed. -assert a == b, "SOS" -``` - -## The Implementation - -When an error happens, no matter it is caused by the `assert` or the `schema` syntax, -the virtual machine should exit with an exit code greater than `0`. - -The virtual machine may choose to dump the back trace information, and it is strongly -recommended to implement it. - -In practice, KCLVM can dump back trace by default, and an argument can be introduced -to disable it. diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/lang/error/exception.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/lang/error/exception.md deleted file mode 100644 index 7862f110..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/lang/error/exception.md +++ /dev/null @@ -1,1559 +0,0 @@ ---- -title: "KCL Errors and Warnings" -linkTitle: "KCL Errors and Warnings" -type: "docs" -weight: 2 -description: KCL Errors and Warnings ---- -# KCL Errors and Warnings - -文档的此部分中的文章介绍了由 KCLVM 生成的诊断错误和警告消息。 - -**注意:** -**KCLVM 可以报告多种错误和警告。找到错误或警告后,KCLVM 可能会对代码意向作出假设并尝试继续,以便可以同时报告更多问题。 如果工具做出错误假设,则后续错误或警告可能不适应与当前 KCL 程序。 因此,纠正项目中的问题时,请先纠正第一个错误或警告,然后重新运行获取新的错误信息。 一个修补程序可能会导致后续错误消失。** - -此部分文档的主要内容包括: - -[KCL 语法错误 (E1xxx)](#11-kcl-%E8%AF%AD%E6%B3%95%E9%94%99%E8%AF%AF-e1xxx) : 如果 KCLVM 在当前 KCL 程序中发现了非法的 KCL 语法,KCLVM 就会停止运行并输出 KCL 程序语法错误的提示信息. - -[KCL 编译错误 (E2xxx)](#12-kcl-%E7%BC%96%E8%AF%91%E9%94%99%E8%AF%AF-e2xxx) : 如果 KCLVM 在一个不包含语法错误的 KCL 程序中发现了与 KCL 语义不符的代码,KCLVM 就会停止运行并输出编译错误的提示信息。 - -[KCL 运行时错误 (E3xxx)](#13-kcl-%E8%BF%90%E8%A1%8C%E6%97%B6%E9%94%99%E8%AF%AF-e3xxx) : KCL 程序通过编译后会生成 KCL 字节码,如果 KCLVM 在执行 KCL 字节码过程中出现错误,KCLVM 就会停止运行并输出运行时错误的提示信息. - -[KCL 编译警告 (W2xxx)](#14-kcl-%E7%BC%96%E8%AF%91%E8%AD%A6%E5%91%8A-w2xxx) : 当 KCLVM 发现可能导致运行失败的 KCL 代码,KCLVM 不会立即停止运行,但是会输出潜在错误的警告提示。 - -## 1.1 KCL 语法错误 (E1xxx) - -KCL 会出现的语法错误信息如下表所示: - -| ewcode | KCL exception | messages | -| ------ | ------------------------------------------------------------------- | ----------------------- | -| E1001 | [InvalidSyntaxError](#111-invalidsyntaxerror-e1001) | Invalid syntax | -| E1002 | [KCLTabError](#112-kcltaberror-e1002) | Tab Error | -| E1003 | [KCLIndentationError](#113-kclindentationerror-e1003) | Indentation Error | -| E1I37 | [IllegalArgumentSyntaxError](#114-illegalargumentsyntaxerror-e1i37) | Illegal argument syntax | - -### 1.1.1 InvalidSyntaxError [E1001] - -如果在运行 KCLVM 时遇到错误: - -- `InvalidSyntaxError`, 对应的 encode 为 `E1001` - -那么此时 KCL 程序中出现了 - -- 非法的 KCL 语法。 - -可能出现错误的 KCL 程序片段如下: - -``` -a, b = 1, 2 # 通过 “=” 赋值多个变量在KCL中是非法的。 -``` - -KCLVM 在运行上述 KCL 程序片段时的输出信息如下. - -``` -KCL Syntax Error[E1001] : Invalid syntax ----> File /syntax_error/general/multiple_assign/case0/main.k:1:6 -1 |a = 1, 2 - 6 ^ -> Expected one of ['newline'] -Invalid syntax -``` - -### 1.1.2 KCLTabError [E1002] - -如果在运行 KCLVM 时遇到错误: - -- `KCLTabError`, 对应的 encode 为 `E1002` - -那么此时 KCL 程序中出现了 - -- Tab 与空格混用的问题。KCL 中禁止在代码缩进中混用 Tab 和空格。 - -可能出现错误的 KCL 程序片段如下: - -``` -schema Person: - name: str # 通过tab表示缩进 - age: int # 通过四个空格标识缩进, - # 在当前运行环境中的四个空格与tab不同 -``` - -KCLVM 在运行上述 KCL 程序片段时的输出信息如下. - -``` -KCL Syntax Error[E1002] : Tab Error ----> File /syntax_error/tab/tab_error_0/main.k:2:14 -2 | name: str - 14 ^ -> Failure -Inconsistent use of tabs and spaces in indentation -``` - -可以尝试以下步骤来修复这个错误: - -- 在 KCL 程序中全部使用 Tab 或者全部使用四个空格,不要混用。 - -### 1.1.3 KCLIndentationError [E1003] - -如果在运行 KCLVM 时遇到错误: - -- `KCLIndentationError`, 对应的 encode 为 `E1003` - -那么此时 KCL 程序中出现了 - -- 程序缩进错误。 - -可能出现错误的 KCL 程序片段如下: - -``` -schema Person: - name: str # 使用一个tab或者四个空格表示缩进 - age: int # KCL不支持使用三个空格表示缩进 - info: str # KCL不支持使用两个空格表示缩进 -``` - -KCLVM 在运行上述 KCL 程序片段时的输出信息如下. - -``` -KCL Syntax Error[E1003] : Indentation Error ----> File /syntax_error/indent/indent_error_0/main.k:2:14 -2 | name: str - 14 ^ -> Failure -Unindent 3 does not match any outer indentation level -``` - -可以尝试以下步骤来修复这个错误: - -- 在 KCL 程序中全部使用 Tab 或者全部使用四个空格来表示缩进。 - -### 1.1.4 IllegalArgumentSyntaxError [E1I37] - -如果在运行 KCLVM 时遇到错误: - -- `IllegalArgumentSyntaxError`, 对应的 encode 为 `E1I37` - -那么此时 KCL 程序中出现了 - -- 参数语法错误 - -可能出现错误的 KCL 程序片段如下: - -``` -# KCL中带有keyword的参数必须出现在不带有keyword参数后面 -# 带有keyword的参数: type="list", default={"key": "value"} -# 不带有keyword的参数: "key1" -a = option(type="list", default={"key": "value"}, "key1") -``` - -KCLVM 在运行上述 KCL 程序片段时的输出信息如下. - -``` -KCL Syntax Error[E1I37] : Illegal argument syntax ----> File /option/type_convert_fail_2/main.k:1:51 -1 |a = option(type="list", default={"key": "value"}, "key1") - 51 ^^^^^^ -> Failure -positional argument follows keyword argument -``` - -可以尝试以下步骤来修复这个错误: - -- KCL 中带有 keyword 的参数必须出现在不带有 keyword 参数后面, 参数正常顺序: - -``` -func(input_1, ..., input_n, - param_with_key_1 = input_with_key_1, ..., param_with_key_n = input_with_key_n) -``` - -## 1.2 KCL 编译错误 (E2xxx) - -KCL 会出现的编译错误信息如下表所示: - -| ewcode | KCL exception | messages | -| ------ | ----------------------------------------------------------------------- | --------------------------------------------------- | -| E2F04 | [CannotFindModule](#121-cannotfindmodule-e2f04) | Cannot find the module | -| E2F05 | [FailedLoadModule](#122-failedloadmodule-e2f05) | Failed to load module | -| E2H13 | [UnKnownDecoratorError](#123-unknowndecoratorerror-e2h13) | UnKnown decorator | -| E2H14 | [InvalidDecoratorTargetError](#124-invaliddecoratortargeterror-e2h14) | Invalid Decorator Target | -| E2C15 | [MixinNamingError](#125-mixinnamingerror-e2c15) | Illegal mixin naming | -| E2C16 | [MixinStructureIllegal](#126-mixinstructureillegal-e2c16) | Illegal mixin structure | -| E2B17 | [CannotAddMembersComplieError](#127-cannotaddmemberscomplieerror-e2b17) | Cannot add members to a schema | -| E2B20 | [IndexSignatureError](#128-indexsignatureerror-e2b20) | Invalid index signature | -| E2G22 | [TypeComplieError](#129-typecomplieerror-e2g22) | The type got is inconsistent with the type expected | -| E2L23 | [CompileError](#1210-compileerror-e2l23) | A complie error occurs during compiling | -| E2L25 | [KCLNameError](#1211-kclnameerror-e2l25) | Name Error | -| E2L26 | [KCLValueError](#1212-kclvalueerror-e2l26) | Value Error | -| E2L27 | [KCLKeyError](#1213-kclkeyerror-e2l27) | Key Error | -| E2L28 | [UniqueKeyError](#1214-uniquekeyerror-e2l28) | Unique key error | -| E2A29 | [KCLAttributeComplieError](#1215-kclattributecomplieerror-e2a29) | Attribute error occurs during compiling | -| E2D32 | [MultiInheritError](#1216-multiinheriterror-e2d32) | Multiple inheritance is illegal | -| E2D34 | [IllegalInheritError](#1217-illegalinheriterror-e2d34) | Illegal inheritance | -| E2I36 | [IllegalArgumentComplieError](#1218-illegalargumentcomplieerror-e2i36) | Illegal argument during compiling | -| E3L41 | [ImmutableCompileError](#1219-immutablecompileerror-e3l41) | Immutable variable is modified | - -### 1.2.1 CannotFindModule [E2F04] - -如果在运行 KCLVM 时遇到错误: - -- `CannotFindModule`, 对应的 encode 为 `E2F04` - -那么此时 KCL 程序中出现了 - -- 无法找到导入模块错误 - -可能出现错误的 KCL 程序片段如下: - -``` -import .some0.pkg1 as some00 # some0 not found in package - -Name1 = some00.Name # some0.pkg1.name -``` - -KCLVM 在运行上述 KCL 程序片段时的输出信息如下. - -``` -KCL Complier Error[E2F04] : Cannot find the module ----> File import_abs_fail_0/app-main/main.k:1:1 -1 |import .some0.pkg1 as some00 # some0 not found in app-main package - 1 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -> Failure -Cannot find the module .some0.pkg1 from import_abs_fail_0/app-main/some0/pkg1 -``` - -可以尝试以下步骤来修复这个错误: - -- 在 imoprt 路径下添加导入模块文件。 - -### 1.2.2 FailedLoadModule [E2F05] - -如果在运行 KCLVM 时遇到错误: - -- `FailedLoadModule`, 对应的 encode 为 `E2F05` - -那么此时 KCL 程序中出现了 - -- 导入模块加载错误 - -可以尝试以下步骤来修复这个错误: - -- 查看文件是否可读 -- 查看文件是否为 kcl 文件 - -### 1.2.3 UnKnownDecoratorError [E2H13] - -如果在运行 KCLVM 时遇到错误: - -- `UnKnownDecoratorError`, 对应的 encode 为 `E2H13` - -那么此时 KCL 程序中出现了 - -- 未知的装饰器错误 - -可能出现错误的 KCL 程序片段如下: - -``` -@err_deprecated # 这是一个非法的装饰器 -schema Person: - firstName: str = "John" - lastName: str - name: str - -JohnDoe = Person { - name: "deprecated" -} -``` - -KCLVM 在运行上述 KCL 程序片段时的输出信息如下. - -``` -KCL Complier Error[E2H13] : UnKnown decorator ----> File deprecated/unknown_fail_1/main.k:1:2 -1 |@err_deprecated - 2 ^ -> Failure -UnKnown decorator err_deprecated -``` - -可以尝试以下步骤来修复这个错误: - -- 检查装饰器是否存在。 - -### 1.2.4 InvalidDecoratorTargetError [E2H14] - -如果在运行 KCLVM 时遇到错误: - -- `InvalidDecoratorTargetError`, 对应的 encode 为 `E2H14` - -那么此时 KCL 程序中出现了 - -- 无效的装饰器目标错误。 - -可以尝试以下步骤来修复这个错误: - -- 检查使用装饰器的 KCL 代码是否出现异常。 - -### 1.2.5 MixinNamingError [E2C15] - -如果在运行 KCLVM 时遇到错误: - -- `MixinNamingError`, 对应的 encode 为 `E2C15` - -那么此时 KCL 程序中出现了 - -- Mixin 命名错误。 - -可能出现错误的 KCL 程序片段如下: - -``` -schema Person: - firstName: str - lastName: str - fullName: str - -schema Fullname: # Mixin的名称应该以Mixin结尾 - fullName = "{} {}".format(firstName, lastName) - -schema Scholar(Person): - mixin [Fullname] - school: str - -JohnDoe = Scholar { - "firstName": "John", - "lastName": "Doe", - "fullName": "Doe Jon" -} -``` - -KCLVM 在运行上述 KCL 程序片段时的输出信息如下. - -``` -KCL Complier Error[E2C15] : Illegal mixin naming ----> File mixin/invalid_name_failure/main.k:10:12 -10 | mixin [Fullname] - 12 ^ -> Failure -a valid mixin name should end with 'Mixin', got 'Fullname' -``` - -可以尝试以下步骤来修复这个错误: - -- 如果 schema 是一个 mixin,那么这个 schema 的名称应该以 Mixin 结尾。 - -### 1.2.6 MixinStructureIllegal [E2C16] - -如果在运行 KCLVM 时遇到错误: - -- `MixinStructureIllegal`, 对应的 encode 为 `E2C16` - -那么此时 KCL 程序中出现了 - -- Mixin 结构错误。 - -可以尝试以下步骤来修复这个错误: - -- 检查作为 Mixin 的 Schema 的结构。 - -### 1.2.7 CannotAddMembersComplieError [E2B17] - -如果在运行 KCLVM 时遇到错误: - -- `CannotAddMembersComplieError`, 对应的 encode 为 `E2B17` - -那么此时 KCL 程序中出现了 - -- 使用 Schema 中不存在的成员。 - -可能出现错误的 KCL 程序片段如下: - -``` -schema Girl: - gender: str = "female" - -alice = Girl { - "first": "alice", # Schema中没有成员“first” - "last": " Green", # Schema中没有成员“last” - "age": 10 # Schema中没有成员“age” -} -``` - -KCLVM 在运行上述 KCL 程序片段时的输出信息如下. - -``` -KCL Complier Error[E2B18] : Cannot add members to a schema ----> File /invalid/add_attribute/main.k:9:9 -9 |alice = Girl { - 9 ^ -> Failure -first,last,age: No such member in the schema -``` - -可以尝试以下步骤来修复这个错误: - -- 为 Schema 添加缺少的成员。 -- 不要使用 Schema 中不存在的成员。 - -### 1.2.8 IndexSignatureError [E2B20] - -如果在运行 KCLVM 时遇到错误: - -- `IndexSignatureError`, 对应的 encode 为 `E2B20` - -那么此时 KCL 程序中出现了 - -1. 在一个 schema 中使用多个索引签名。 - -可能出现错误的 KCL 程序片段如下: - -``` -schema Data: - [str]: str - [str]: int # 在同一个schema中使用了多个索引签名 - -data = Data { - name: "test" -} -``` - -KCLVM 在运行上述 KCL 程序片段时的输出信息如下. - -``` -KCL Complier Error[E2B20] : Invalid index signature ----> File index_signature/fail_1/main.k:3:5 -3 | [str]: int - 5 ^^^^^^^^^^ -> Failure -only one index signature is allowed in the schema -``` - -可以尝试以下步骤来修复这个错误: - -- - 删除多余的索引签名。 - -2. schema 中索引签名的名称与 schema 中其他属性的名称存在同名冲突。 - -可能出现错误的 KCL 程序片段如下: - -``` -schema Data: - name: str # name - [name: str]: str # 已有名称为name的schema属性 - -data = Data { - name: "test" -} -``` - -KCLVM 在运行上述 KCL 程序片段时的输出信息如下. - -``` -KCL Complier Error[E2B20] : Invalid index signature ----> File index_signature/fail_2/main.k:3:5 -3 | [name: str]: str - 5 ^ -> Failure -index signature attribute name 'name' cannot have the same name as schema attributes -``` - -可以尝试以下步骤来修复这个错误: - -- - 删除 schema 中出现同名冲突的属性或者索引签名,或者为它们更换不同的名称。 - -3. schema 索引签名的类型与 schema 实例化的属性类型冲突。 - -可能出现错误的 KCL 程序片段如下: - -``` -schema Data: - [str]: int - -data = Data { - name: "test" # 索引签名为 [str]:int, "test"的类型不是int. -} -``` - -KCLVM 在运行上述 KCL 程序片段时的输出信息如下. - -``` -KCL Complier Error[E2L23] : A complie error occurs during compiling ----> File index_signature/fail_3/main.k:4:8 -4 |data = Data { - 8 ^ -> Failure -expected schema index signature value type int, got str(test) of the key 'name' -``` - -可以尝试以下步骤来修复这个错误: - -- - 检查 schema 索引签名的类型与 schema 实例中的属性类型是否一致。 - -4. Schema 中的属性与索引签名冲突 - -可能出现错误的 KCL 程序片段如下: - -``` -schema Data: - count: int # int 和 str 冲突 - [str]: str - -data = Data { - count: 1 -} -``` - -KCLVM 在运行上述 KCL 程序片段时的输出信息如下. - -``` -KCL Complier Error[E2B20] : Invalid index signature ----> File index_signature/fail_4/main.k:2:5 -2 | count: int - 5 ^ -> Failure -the type 'int' of schema attribute 'count' does not meet the index signature definition [str]: str -``` - -可以尝试以下步骤来修复这个错误: - -- - 调整 Schema 属性或者调整索引签名。 - -### 1.2.9 TypeComplieError [E2G22] - -如果在运行 KCLVM 时遇到错误: - -- `TypeComplieError`, 对应的 encode 为 `E2G22` - -那么此时 KCL 程序中出现了 - -- 静态类型检查错误。 - -可能出现错误的 KCL 程序片段如下: - -``` -schema Person: - firstName: str - lastName: int - -JohnDoe = Person { - "firstName": "John", - "lastName": "Doe" # Schema中定义lastName: int -} -``` - -KCLVM 在运行上述 KCL 程序片段时的输出信息如下. - -``` -KCL Complier Error[E2G22] : The type got is inconsistent with the type expected ----> File type/type_fail_0/main.k:7:5 -7 | "lastName": "Doe" - 5 ^ -> Failure -expect int, got str(Doe) -``` - -可以尝试以下步骤来修复这个错误: - -- 检查赋给某个变量的值的类型与这个变量的类型是否一致。 - -### 1.2.10 CompileError [E2L23] - -如果在运行 KCLVM 时遇到错误: - -- `CompileError`, 对应的 encode 为 `E2L23` - -那么此时 KCL 程序中出现了 - -1. 不支持的类型合并 - -可能出现错误的 KCL 程序片段如下: - -``` -_data = [1, 2, 3] -_data |= "value" -``` - -KCLVM 在运行上述 KCL 程序片段时的输出信息如下. - -``` -KCL Complier Error[E2L23] : A complie error occurs during compiling ----> File union/fail/fail_1/main.k:2 -2 |_data |= "value" -> Failure -unsupported operand type(s) for |=: '[int]' and 'str(value)' -``` - -1. 不支持的操作符类型 - -可能出现错误的 KCL 程序片段如下: - -``` -a = None -b = 1 + None # KCL中不支持None和int之间进行+操作 -``` - -KCLVM 在运行上述 KCL 程序片段时的输出信息如下. - -``` -KCL Complier Error[E2L23] : A complie error occurs during compiling ----> File operator/operator_fail_0/main.k:2 -2 |b = 1 + None -> Failure -unsupported operand type(s) for +: 'int(1)' and 'NoneType' -``` - -可以尝试以下步骤来修复这个错误: - -- - 调整操作符号,使其同时支持两个操作数的类型。 -- - 调整操作数,使其同时符合操作符号的约束。 - -1. 没有定义的变量 - -可能出现错误的 KCL 程序片段如下: - -``` -a = 1 -b = "${c + 1}" # 'c' 没有定义 -``` - -KCLVM 在运行上述 KCL 程序片段时的输出信息如下. - -``` -KCL Complier Error[E2L23] : A complie error occurs during compiling ----> File var_not_define_fail_0/main.k:2:8 -2 |b = "${c + 1}" - 8 ^ -> Failure -name 'c' is not defined -``` - -可以尝试以下步骤来修复这个错误: - -- - 对未定义的变量进行定义。 -- - 在表达式中去掉对未定义变量的操作。 - -4. 无效的赋值表达式 - -可能出现错误的 KCL 程序片段如下: - -``` -# pkg.k -a = 1 - -# main.k -import pkg -pkg.a |= 2 -``` - -KCLVM 在运行上述 KCL 程序片段时的输出信息如下. - -``` -KCL Complier Error[E2L23] : A complie error occurs during compiling ----> File pkg_inplace_modify_1/main.k:3:1 -3 |pkg.a |= 2 - 1 ^^^^^ -> Failure -module 'pkg' can't be assigned -``` - -可以尝试以下步骤来修复这个错误: - -- - 检查赋值表达式的内容。 - -1. 无效的字符串表达式 - -可能出现错误的 KCL 程序片段如下: - -``` -a = 1 -b = "${b = a + 1}" # Invalid string interpolation expression -``` - -KCLVM 在运行上述 KCL 程序片段时的输出信息如下. - -``` -KCL Complier Error[E2L23] : A complie error occurs during compiling ----> File invalid_format_value_fail_0/main.k:2:5 -2 |b = "${b = a + 1}" - 5 ^^^^^^^^^^^^^^ -> Failure -invalid string interpolation expression 'b = a + 1' -``` - -可以尝试以下步骤来修复这个错误: - -- - 检查字符串表达式的内容。 - -1. 无效的循环变量 - -可能出现错误的 KCL 程序片段如下: - -``` -data = {"key1": "value1", "key2": "value2"} -dataLoop = [i for i, j, k in data] # the number of loop variables can only be 1 or 2 -``` - -KCLVM 在运行上述 KCL 程序片段时的输出信息如下. - -``` -KCL Complier Error[E2L23] : A complie error occurs during compiling ----> File dict/invalid_loop_var_fail_0/main.k:2:19 -2 |dataLoop = [i for i, j, k in data] # error - 19 ^^^^^^^ -> Failure -the number of loop variables is 3, which can only be 1 or 2 -``` - -### 1.2.11 KCLNameError [E2L25] - -如果在运行 KCLVM 时遇到错误: - -- `KCLNameError`, 对应的 encode 为 `E2L25` - -那么此时 KCL 程序中出现了 - -- 试图访问的变量名不存在 - -可以尝试以下步骤来修复这个错误: - -- 检查报错信息中出现的变量是否存在。 - -### 1.2.12 KCLValueError [E2L26] - -如果在运行 KCLVM 时遇到错误: - -- `KCLValueError`, 对应的 encode 为 `E2L26` - -那么此时 KCL 程序中出现了 - -- 值错误,传给参数的类型不正确 - -可以尝试以下步骤来修复这个错误: - -- 检查参数的具体类型。 - -### 1.2.13 KCLKeyError [E2L27] - -如果在运行 KCLVM 时遇到错误: - -- `KCLKeyError`, 对应的 encode 为 `E2L27` - -那么此时 KCL 程序中出现了 - -- 使用了 dict 中不存在的 key 时引发的 key 错误 - -可以尝试以下步骤来修复这个错误: - -- 检查字典中是否存在 key。 - -### 1.2.14 UniqueKeyError [E2L28] - -如果在运行 KCLVM 时遇到错误: - -- `UniqueKeyError`, 对应的 encode 为 `E2L28` - -那么此时 KCL 程序中出现了 - -- 变量同名或重复定义。 - -可能出现错误的 KCL 程序片段如下: - -``` -schema Person: - name: str = "kcl" - age: int = 1 - -schema Person: - aa: int - -x0 = Person{} -x1 = Person{age:101} -``` - -KCLVM 在运行上述 KCL 程序片段时的输出信息如下. - -``` -KCL Complier Error[E2L28] : Unique key error ----> File /schema/same_name/main.k:5:1 -5 |schema Person: - 1 ^ -> Failure -Variable name 'Person' must be unique in package context -``` - -可以尝试以下步骤来修复这个错误: - -- 检查出现错误的名称是否已经被使用。 - -### 1.2.15 KCLAttributeComplieError [E2A29] - -如果在运行 KCLVM 时遇到错误: - -- `KCLAttributeComplieError`, 对应的 encode 为 `E2A29` - -那么此时 KCL 程序中出现了 - -- Schema 的属性错误。 - -可能出现错误的 KCL 程序片段如下: - -``` -# pkg -schema A: - field_A: str - -# main -import pkg as p - -a = p.D + 1 -``` - -KCLVM 在运行上述 KCL 程序片段时的输出信息如下. - -``` -KCL Complier Error[E2A29] : Attribute error occurs during compiling ----> File /import/module/no_module_attr_fail_0/main.k:3:5 -3 |a = p.D + 1 - 5 ^ -> Failure -module 'pkg' has no attribute 'D' -``` - -可以尝试以下步骤来修复这个错误: - -- 在使用 Schema 属性时检查这个属性是否存在。 - -### 1.2.16 MultiInheritError [E2D32] - -如果在运行 KCLVM 时遇到错误: - -- `MultiInheritError`, 对应的 encode 为 `E2D32` - -那么此时 KCL 程序中出现了 - -- 多继承错误。 - -可能出现错误的 KCL 程序片段如下: - -``` -schema Person: - firstName: str - lastName: str - -schema KnowledgeMixin: - firstName: int - subject: str - -schema Scholar(KnowledgeMixin, Person): # KCL中不支持多继承 - school: str -``` - -KCLVM 在运行上述 KCL 程序片段时的输出信息如下. - -``` -KCL Complier Error[E2D32] : Multiple inheritance is illegal ----> File /schema/inherit/multi_inherit_fail_1/main.k:9:16 -9 |schema Scholar(KnowledgeMixin, Person): - 16 ^^^^^^^^^^^^^^^^^^^^^^ -> Failure -Multiple inheritance of Scholar is prohibited -``` - -可以尝试以下步骤来修复这个错误: - -- 检查程序的继承结构,KCL 中不支持多继承。 - -### 1.2.17 IllegalInheritError [E2D34] - -如果在运行 KCLVM 时遇到错误: - -- `IllegalInheritError`, 对应的 encode 为 `E2D34` - -那么此时 KCL 程序中出现了 - -- 不合法的继承结构 - -可能出现错误的 KCL 程序片段如下: - -``` -schema FullnameMixin: - fullName = "{} {}".format(firstName, lastName) - -schema Scholar(FullnameMixin): # KCL中不支持Schema继承Mixin - school: str -``` - -KCLVM 在运行上述 KCL 程序片段时的输出信息如下. - -``` -KCL Complier Error[E2D34] : Illegal inheritance ----> File /schema/inherit/inherit_mixin_fail/main.k:8:1 -8 |schema Scholar(FullnameMixin): - 1 ^ -> Failure -mixin inheritance FullnameMixin is prohibited -``` - -可以尝试以下步骤来修复这个错误: - -- KCL 中 Schema 支持单继承 Schema。 - -### 1.2.18 IllegalArgumentComplieError [E2I36] - -如果在运行 KCLVM 时遇到错误: - -- `IllegalArgumentComplieError`, 对应的 encode 为 `E2I36` - -那么此时 KCL 程序中出现了 - -- 参数错误 - -可能出现错误的 KCL 程序片段如下: - -``` -a = option("key") - -# kcl main.k -D key=value= -# key=value= is an illegal expression -``` - -KCLVM 在运行上述 KCL 程序片段时的输出信息如下. - -``` -KCL Complier Error[E2I36] : Illegal argument during compiling -'key=value=' -``` - -可以尝试以下步骤来修复这个错误: - -- 检查通过命令设置的 KCL option 参数是否为合法参数。 - -### 1.2.19 ImmutableCompileError [E3L41] - -如果在运行 KCLVM 时遇到错误: - -- `ImmutableCompileError`, 对应的 encode 为 `E3L41` - -那么此时 KCL 程序中出现了 - -- 不可变量的值发生改变 - -可能出现错误的 KCL 程序片段如下: - -``` -a = 2147483646 -a += 1 -``` - -KCLVM 在运行上述 KCL 程序片段时的输出信息如下. - -``` -KCL Compile Error[E2L41] : Immutable variable is modified ----> File /range_check_int/augment_assign/main.k:2:1 -2 |a += 1 - 1 ^ -> Failure -Immutable variable is modified -``` - -可以尝试以下步骤来修复这个错误: - -- 将被改变的不可变量设置为私有或者去掉对不可变量值的改动。 - -## 1.3 KCL 运行时错误 (E3xxx) - -KCL 会出现的运行时错误信息如下表所示: - -| ewcode | KCL exception | messages | -| ------ | ----------------------------------------------------------------------- | --------------------------------------------------- | -| E3F06 | [RecursiveLoad](#131-recursiveload-e3f06) | Recursively loading module | -| E3K04 | [FloatOverflow](#132-floatoverflow-e3k04) | Float overflow | -| E3K09 | [IntOverflow](#133-intoverflow-e3k09) | Integer overflow | -| E3N11 | [DeprecatedError](#134-deprecatederror-e3n11) | Deprecated error | -| E3A30 | [KCLAttributeRuntimeError](#135-kclattributeruntimeerror-e3a30) | Attribute error occurs at runtime | -| E3G21 | [TypeRuntimeError](#136-typeruntimeerror-e3g21) | The type got is inconsistent with the type expected | -| E3B17 | [SchemaCheckFailure](#137-schemacheckfailure-e3b17) | Schema check is failed to check condition | -| E3B19 | [CannotAddMembersRuntimeError](#138-cannotaddmembersruntimeerror-e3b19) | Cannot add members to a schema | -| E3M38 | [EvaluationError](#139-evaluationerror-e3m38) | Evaluation failure | -| E3M39 | [InvalidFormatSpec](#1310-invalidformatspec-e3m39) | Invalid format specification | -| E3M40 | [KCLAssertionError](#1311-kclassertionerror-e3m40) | Assertion failure | -| E3M44 | [ImmutableRuntimeError](#1312-immutableruntimeerror-e3m44) | Immutable variable is modified | -| E2D33 | [CycleInheritError](#1313-cycleinheriterror-e2d33) | Cycle Inheritance is illegal | -| E3M42 | [KCLRecursionError](#1314-kclrecursionerror-e3m42) | Recursively reference | - -### 1.3.1 RecursiveLoad [E3F06] - -如果在运行 KCLVM 时遇到错误: - -- `RecursiveLoad`, 对应的 encode 为 `E3F06` - -那么此时 KCL 程序中出现了 - -- 循环导入错误 - -可能出现错误的 KCL 程序片段如下: - -``` -# module.k -import main # module.k 导入了 main.k - -print('module') - -# main.k -import module # main.k 导入了 module.k - -print('main') -``` - -KCLVM 在运行上述 KCL 程序片段时的输出信息如下. - -``` -KCL Runtime Error[E3F06] : Recursively loading module ----> File /import/recursive_import_fail/main.k:4 -4 | -> Failure -In module module, recursively loading modules: module, main -``` - -可以尝试以下步骤来修复这个错误: - -- 检查包的导入部分是否存在循环导入的问题。 - -### 1.3.2 FloatOverflow [E3K04] - -如果在运行 KCLVM 时遇到错误: - -- `FloatOverflow`, 对应的 encode 为 `E3K04` - -那么此时 KCL 程序中出现了 - -- 浮点数溢出 - -可能出现错误的 KCL 程序片段如下: - -``` -uplimit = 3.402823466e+38 -epsilon = 2.220446049250313e-16 -a = uplimit * (1 + epsilon) - -# kcl main.k -r -d -``` - -KCLVM 在运行上述 KCL 程序片段时的输出信息如下. - -``` -KCL Runtime Error[E3K07] : Float overflow ----> File /range_check_float/overflow/number_0/main.k:6 -6 |a = uplimit * (1 + epsilon) -> Failure -3.402823466000001e+38: A 32-bit floating point number overflow -``` - -可以尝试以下步骤来修复这个错误: - -- 检查浮点数的值是否在 KCL 支持的数字范围内。 - -### 1.3.3 IntOverflow [E3K09] - -如果在运行 KCLVM 时遇到错误: - -- `IntOverflow`, 对应的 encode 为 `E3K09` - -那么此时 KCL 程序中出现了 - -- 整数溢出 - -可能出现错误的 KCL 程序片段如下: - -``` -_a = 9223372036854775807 -_a += 1 - -# kcl test.k -d -``` - -KCLVM 在运行上述 KCL 程序片段时的输出信息如下. - -``` -KCL Runtime Error[E3K09] : Integer overflow ----> File /range_check_int/augment_assign_fail_1/main.k:2 -2 |_a += 1 -> Failure -9223372036854775808: A 64 bit integer overflow -``` - -可以尝试以下步骤来修复这个错误: - -- 检查整数的值是否在 KCL 支持的数字范围内。 - -### 1.3.4 DeprecatedError [E3N11] - -如果在运行 KCLVM 时遇到错误: - -- `DeprecatedError`, 对应的 encode 为 `E3N11` - -那么此时 KCL 程序中出现了 - -- 使用废弃代码 - -可能出现错误的 KCL 程序片段如下: - -``` -schema Person: - firstName: str = "John" - lastName: str - @deprecated(version="1.16", reason="use firstName and lastName instead", strict=True) - name: str - -JohnDoe = Person { - name: "deprecated" # name已经被过时,并且strict设置为True -} -``` - -KCLVM 在运行上述 KCL 程序片段时的输出信息如下. - -``` -KCL Runtime Error[E3N11] : Deprecated error ----> File /schema/deprecated/member_standard_1/main.k:7 -7 |JohnDoe = Person { -> Failure -name was deprecated since version 1.16, use firstName and lastName instead -``` - -可以尝试以下步骤来修复这个错误: - -- strict 设置为 True 时无法使用过时的代码,可以将 strict 设置为 False,将不会出现错误,而是输出一个警告。 -- 调整代码,不使用已经过时的代码。 - -### 1.3.5 KCLAttributeRuntimeError [E3A30] - -如果在运行 KCLVM 时遇到错误: - -- `KCLAttributeRuntimeError`, 对应的 encode 为 `E3A30` - -那么此时 KCL 程序中出现了 - -- 属性错误。 - -可能出现错误的 KCL 程序片段如下: - -``` -import math - -a = math.err_func(1) # err_func is not found in math -``` - -KCLVM 在运行上述 KCL 程序片段时的输出信息如下. - -``` -KCL Runtime Error[E3A30] : Attribute error occurs at runtime ----> File /import/module/no_module_attr_fail_2/main.k:3 -3 |a = math.err_func(1) -> Failure -module 'math' has no attribute 'err_func' -``` - -可以尝试以下步骤来修复这个错误: - -- 检查属性调用是否正确。 - -### 1.3.6 TypeRuntimeError [E3G21] - -如果在运行 KCLVM 时遇到错误: - -- `TypeRuntimeError`, 对应的 encode 为 `E3G21` - -那么此时 KCL 程序中出现了 - -- 类型检查错误 - -可能出现错误的 KCL 程序片段如下: - -``` -schema Person: - name: str = "Alice" - -_personA = Person {} -_personA |= {"name": 123.0} # name: str = "Alice" -personA = _personA -``` - -KCLVM 在运行上述 KCL 程序片段时的输出信息如下. - -``` -KCL Runtime Error[E3G21] : The type got is inconsistent with the type expected ----> File /fail/fail_4/main.k:5 -5 |_personA |= {"name": 123.0} -> Failure -expect str, got float -``` - -可以尝试以下步骤来修复这个错误: - -- 停止错误的类型合并或者将类型调整为 KCL 支持的类型合并。 - -### 1.3.7 SchemaCheckFailure [E3B17] - -如果在运行 KCLVM 时遇到错误: - -- `SchemaCheckFailure`, 对应的 encode 为 `E3B17` - -那么此时 KCL 程序中出现了 - -- Schema 中的 check 条件冲突 - -可能出现错误的 KCL 程序片段如下: - -``` -schema Person: - lastName: str - age: int - check: - age < 140, "age is too large" - -JohnDoe = Person { - "lastName": "Doe", - "age": 1000 # Schema中的check条件为: age < 140 -} -``` - -KCLVM 在运行上述 KCL 程序片段时的输出信息如下. - -``` -KCL Runtime Error[E3B17] : Schema check is failed to check condition ----> File /check_block/check_block_fail_1/main.k:9:11 -9 |JohnDoe = Person { - 11 ^ -> Check failed in the schema ----> File /check_block/check_block_fail_1/main.k:7 -7 | age < 140, "age is too large" -> Check failed on the condition -age is too large -``` - -可以尝试以下步骤来修复这个错误: - -- 检查 Schema 的属性与 check 中的条件是否符合 - -### 1.3.8 CannotAddMembersRuntimeError [E3B19] - -如果在运行 KCLVM 时遇到错误: - -- `CannotAddMembersRuntimeError`, 对应的 encode 为 `E3B19` - -那么此时 KCL 程序中出现了 - -- 访问 Schema 中不存在的成员 - -可能出现错误的 KCL 程序片段如下: - -``` -schema Name: - name: str - -schema Person: - name: Name - -person = Person { - name.err_name: "Alice" # err_name is not found in schema Name -} -``` - -KCLVM 在运行上述 KCL 程序片段时的输出信息如下. - -``` -KCL Runtime Error[E3B19] : Cannot add members to a schema ----> File /nest_var/nest_var_fail_1/main.k:8:5 -8 | name.err_name: "Alice" - 5 ^ -> Failure -err_name: No such member in the schema -``` - -可以尝试以下步骤来修复这个错误: - -- 为 Schema 添加不存在的成员。 -- 访问 Schema 中存在的成员。 - -### 1.3.9 EvaluationError [E3M38] - -如果在运行 KCLVM 时遇到错误: - -- `EvaluationError`, 对应的 encode 为 `E3M38` - -那么此时 KCL 程序中出现了 - -- 当 KCL 中数值计算过程出现了错误。 - -可能出现错误的 KCL 程序片段如下: - -``` -_list1 = [1, 2, 3] # _list1 is a variable, and its type can only be known at runtime -_list2 = None # _list1 is a variable, and its type can only be known at runtime - -result2 = _list1 + _list2 # list + NoneType is illegal -``` - -KCLVM 在运行上述 KCL 程序片段时的输出信息如下. - -``` -KCL Runtime Error[E3M38] : Evaluation failure ----> File /datatype/list/add_None_fail/main.k:4 -4 |result2 = _list1 + _list2 -> Failure -can only concatenate list (not "NoneType") to list -``` - -可以尝试以下步骤来修复这个错误: - -- 检查表达式中是否存在变量为 None,或者非法的计算过程。 - -### 1.3.10 InvalidFormatSpec [E3M39] - -如果在运行 KCLVM 时遇到错误: - -- `InvalidFormatSpec`, 对应的 encode 为 `E3M39` - -那么此时 KCL 程序中出现了 - -- 非法的字符串格式 - -可能出现错误的 KCL 程序片段如下: - -``` -a = 1 -b = 1 -data = "${a: #js}" + " $$ " # KCL插值字符串中,#js是非法的 -``` - -KCLVM 在运行上述 KCL 程序片段时的输出信息如下. - -``` -KCL Runtime Error[E3M39] : Invalid format specification ----> File /datatype/str_interpolation/invalid_format_spec_fail_0/main.k:3 -3 |data = "${a: #js}" + " $$ " -> Failure -#js is invalid format spec -``` - -可以尝试以下步骤来修复这个错误: - -- 将非法 String 调整为 KCL 标准支持的 String。 - -### 1.3.11 KCLAssertionError [E3M40] - -如果在运行 KCLVM 时遇到错误: - -- `KCLAssertionError`, 对应的 encode 为 `E3M40` - -那么此时 KCL 程序中出现了 - -- Assert False - -可能出现错误的 KCL 程序片段如下: - -``` -assert False -``` - -KCLVM 在运行上述 KCL 程序片段时的输出信息如下. - -``` -KCL Runtime Error[E3M40] : Assertion failure ----> File /assert/invalid/fail_0/main.k:1 -1 |assert False -> Failure -Assertion failure -``` - -可以尝试以下步骤来修复这个错误: - -- 检查 Assert 的条件,Assert 条件为 False 时,就会出现此类错误,去掉 Assert 语句或改变条件为 True。 - -### 1.3.12 ImmutableRuntimeError [E3M44] - -如果在运行 KCLVM 时遇到错误: - -- `ImmutableRuntimeError`, 对应的 encode 为 `E3M44` - -那么此时 KCL 程序中出现了 - -- 不可变量的值发生改变 - -可能出现错误的 KCL 程序片段如下: - -``` -schema Person: - final firstName : str - lastName : str - - -schema Scholar(Person): - firstName = "CBA" - - -scholar = Scholar { - "firstName": "ABC" # firstName in schema Person is final. -} -``` - -KCLVM 在运行上述 KCL 程序片段时的输出信息如下. - -``` -KCL Runtime Error[E3M41] : Immutable variable is modified ----> File /final/fail_lazy_init_0/main.k:12:5 -12 | "firstName": "ABC" - 5 ^ -> Failure -final schema field 'firstName' -``` - -可以尝试以下步骤来修复这个错误: - -- 检查 final 修饰的不可变量是否出现了赋值等改变值的操作。 - -### 1.3.13 CycleInheritError [E2D33] - -如果在运行 KCLVM 时遇到错误: - -- `CycleInheritError`, 对应的 encode 为 `E2D33` - -那么此时 KCL 程序中出现了 - -- 循环继承 - -可能出现错误的 KCL 程序片段如下: - -``` -schema Parent(Son): - parent_field: str - -schema Son(GrandSon): - son_field: str - -schema GrandSon(Parent): - grandson_field: str - -parent = Parent { - parent_field: "" -} -``` - -KCLVM 在运行上述 KCL 程序片段时的输出信息如下. - -``` -KCL Complier Error[E2D33] : Cycle Inheritance is illegal ----> File /inherit/cycle_inherit_fail_1/main.k:7:1 -7 |schema GrandSon(Parent): - 1 ^ -> Failure -GrandSon and Parent -``` - -可以尝试以下步骤来修复这个错误: - -- 检查 Schema 的继承关系,避免出现 A 继承 B,B 继承 A 的情况。 - -### 1.3.14 KCLRecursionError [E3M42] - -如果在运行 KCLVM 时遇到错误: - -- `KCLRecursionError`, 对应的 encode 为 `E3M42` - -那么此时 KCL 程序中出现了 - -- 循环引用 - -可能出现错误的 KCL 程序片段如下: - -``` -schema Parent(Son): - parent_field: str - son: Son = Son { # Parent has attribute Son - parent: Parent { - parent_field: "123" - } - } - -schema Son: - son_field: str - parent: Parent = Parent { # Son has attribute Parent - son: Son { - son_field: "123" - } - } - -parent = Parent { - parent_field: "", -} -``` - -KCLVM 在运行上述 KCL 程序片段时的输出信息如下. - -``` -KCL Runtime Error[E3M42] : Recursively reference ----> File /init/init_cycle_fail_0/main.k:10 -10 | son_field: str -> Failure -maximum recursion depth exceeded in __instancecheck__ -``` - -可以尝试以下步骤来修复这个错误: - -- 检查 Schema 中的属性成员,避免出现循环引用的问题。 - -## 1.4 KCL 编译警告 (W2xxx) - -KCL 中的编译警告如下表所示: - -| ewcode | KCL exception | messages | -| ------ | ------------------------------------------------- | ------------------ | -| W2K04 | [FloatUnderflow](#141-floatunderflow-w2k08) | Float underflow | -| W2P10 | [InvalidDocstring](#142-invaliddocstring-w2p10) | Invalid docstring | -| W2N12 | [DeprecatedWarning](#143-deprecatedwarning-w2n12) | Deprecated warning | - -### 1.4.1 FloatUnderflow [W2K08] - -如果在运行 KCLVM 时遇到错误: - -- `FloatUnderflow`, 对应的 encode 为 `W2K08` - -那么此时 KCL 程序中出现了 - -- 看看 python 里面是怎么说的 - -可能出现错误的 KCL 程序片段如下: - -``` -downlimit = 1.175494351e-38 -uplimit = 3.402823466e+38 - -epsilon = 2.220446049250313e-16 - -a = uplimit / (1 + epsilon) -b = downlimit / (1 + epsilon) - -# kcl main.k -r -d -``` - -KCLVM 在运行上述 KCL 程序片段时的输出信息如下. - -``` -KCL Complier Warning[W2K08] : Float underflow ----> File /range_check_float/underflow/number_0/main.k:7 -7 |b = downlimit / (1 + epsilon) -> Failure -1.1754943509999997e-38: A 32-bit floating point number underflow -``` - -可以尝试以下步骤来修复这个错误: - -- 检查浮点数的值是否在 KCL 支持的数字范围内。 - -### 1.4.2 InvalidDocstring [W2P10] - -如果在运行 KCLVM 时遇到错误: - -- `InvalidDocstring`, 对应的 encode 为 `W2P10` - -那么此时 KCL 程序中出现了 - -- 无效的 doc 内容 - -可以尝试以下步骤来修复这个错误: - -- 请按照 KCL 标准编写 doc。 - -### 1.4.3 DeprecatedWarning [W2N12] - -如果在运行 KCLVM 时遇到错误: - -- `DeprecatedWarning`, 对应的 encode 为 `W2N12` - -那么此时 KCL 程序中出现了 - -- 过时的代码警告 - -可能出现错误的 KCL 程序片段如下: - -``` -schema Person: - firstName?: str = "John" - lastName?: str - @deprecated(version="1.16", reason="use firstName and lastName instead", strict=False) - name?: str - -JohnDoe = Person { - name: "deprecated" # name is deprecated and strict is False -} -``` - -KCLVM 在运行上述 KCL 程序片段时的输出信息如下. - -``` -KCL Compile Warning[W2N12] : Deprecated warning -name was deprecated since version 1.16, use firstName and lastName instead -``` - -可以尝试以下步骤来修复这个错误: - -- 尽量不要使用已经过时的代码。如果将 strict 设置为 True,KCLVM 将会输出错误,并停止运行。 diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/lang/error/index.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/lang/error/index.md deleted file mode 100644 index 15fbda6f..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/lang/error/index.md +++ /dev/null @@ -1 +0,0 @@ -# Errors and Warnings diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/lang/index.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/lang/index.md deleted file mode 100644 index 66fd3dde..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/lang/index.md +++ /dev/null @@ -1 +0,0 @@ -# KCL diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/lang/spec/_category_.json b/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/lang/spec/_category_.json deleted file mode 100644 index 7b24faae..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/lang/spec/_category_.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "label": "Spec", - "position": 3 -} diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/lang/spec/codestyle.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/lang/spec/codestyle.md deleted file mode 100644 index 0267a73c..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/lang/spec/codestyle.md +++ /dev/null @@ -1,623 +0,0 @@ ---- -title: "Code Style" -linkTitle: "Code Style" -type: "docs" -weight: 2 -description: Code Style ---- -## Introduction - -This document gives the KCL code style conventions. Good code style can play a vital role in the development and maintenance of the project. We can learn the KCL code style by referring to the full text of the description and sample codes, and use KCL format and lint tools to help coding. - -## Source File Encoding - -KCL file encoding should always use **UTF-8**. - -## Code Layout - -### Indentation - -Use **4 spaces** per indentation level such as in the schema statement and if statement. - -```python -schema PersonA: - name: str # non-recommended - age: int - -schema PersonB: - name: str # recommended - age: int - -if True: - a = 1 # recommended -elif True: - b = 2 # non-recommended -else: - c = 3 # non-recommended -``` - -The closing brace/bracket/parenthesis on multiline constructs should line up under **first character** of the line that starts the multiline construct, as in: - -```python -# valid and recommended -my_list = [ - 1, 2, 3, - 4, 5, 6, -] -``` - -```python -# invalid -my_list = [ - 1, 2, 3, - 4, 5, 6, - ] -``` - -### Tabs or Spaces - -- Spaces are the preferred indentation method. -- Tabs should be used solely to remain consistent with code that is already indented with tabs. - -KCL disallows mixing the use of tabs and spaces for indentation and an error will be reported during the compile time. - -### Blank Lines - -- Surround top-level schema definitions with one blank line. -- Keep at most one blank line between two statements and remove redundant blank lines. -- Remove extra blank characters at the end of the line -- Remove extra blank characters in a blank line. -- There is no blank line in the header of the file, start writing from the first line. -- Only one blank line will be left at the end of the KCL file. - -```python -# Remove blank lines in the file header -a = 1 # Remove white space at the end of the line -# Keep at most one blank line between two statements - -b = 2 -# Only leave one blank line at the end of the file - -``` - -### Inline Expressions - -Write indentation of KCL `if`, `elif`, `else` and other conditions on different lines. - -```python -if True: print("") # non-recommended - -if True: # recommended - print("") -``` - -### Line Break and Continuation lines - -- For long expressions, use the line continuation symbol `\` and keep the left end of multiple expressions aligned. -- The 4-space rule is optional for continuation lines. - -```python -anotherString = "Too long expression " + \ - "Too long expression " # non-recommended - -longString = "Too long expression " + \ - "Too long expression " + \ - "Too long expression " # recommended -``` - -### When to Use Trailing Commas - -- Always use trailing commas. - -### Maximum Line Length - -- The general recommendation is **80 characters** but not absolute. - -### Symbol Break White Space - -Try to keep the spaces between different symbols, but not too many, usually one is good. - -```python -a = 1 # recommended -b = 1 + 2 # non-recommended -``` - -### Whitespace in Expressions and Statements - -Avoid extraneous whitespace in the following situations: - -- The parentheses `()`, brackets `[]` and braces `{}` in the expression have no spaces inside. - -```python -a = (1 + 2) # recommended -b = ( 1 + 2 ) # non-recommended - -c = [1, 2, 3] # recommended -d = [ 1, 2, 3 ] # non-recommended - -e = {key = "value"} # recommended -f = { key = "value" } # non-recommended -``` - -```python -spam(ham[1], {eggs = 2}) # recommended -spam( ham[ 1 ], { eggs = 2 } ) # non-recommended -``` - -- Between a trailing comma and a following close parenthesis. - -```python -foo = [0,] # recommended -bar = [0, ] # non-recommended -``` - -- Immediately before the open parenthesis that starts the argument list of a function call. - -```python -print(1) # recommended -print (1) # non-recommended -``` - -- Immediately before the open parenthesis that starts indexing or slicing. - -```python -dct = {key = "value"} -lst = [1, 2, 3] - -a = dct['key'] # recommended -b = dct ['key'] # non-recommended - -c = lst[0] # recommended -d = lst [1] # non-recommended -``` - -- More than one space around an assignment `=` (or other) operator to align it with another. - -```python -# recommended: -x = 1 -y = 2 -long_variable = 3 -``` - -```python -# non-recommended: -x = 1 -y = 2 -long_variable = 3 -``` - -- Always surround these binary operators with a single space on either side: assignment (`=`), augmented assignment (`+=`, `-=`, etc.), comparisons (`==`, `<`, `>`, `!=`, `<=`, `>=`, `in`, `not in`, `is`, `is not`), booleans (`and`, `or`, `not`). - -```python -# recommended: -i = i + 1 -submitted += 1 -x = x * 2 - 1 -hypot2 = x * x + y * y -c = (a + b) * (a - b) -``` - -```python -# non-recommended: -i = i+1 -submitted+=1 -x = x*2 - 1 -hypot2 = x*x + y*y -c = (a+b) * (a-b) -``` - -- Break one blank line between different statements e.g., import, schema and expression statements. - -```python -import math -import net - -schema Person: - name: str - -person = Person { - name = "Alice" -} -``` - -- Compound statements (multiple statements on the same line) are generally discouraged - -```python -# recommended: -if foo == 'blah': - do_blah_thing() -do_one() -do_two() -do_three() -``` - -```python -# non-recommended: -if foo == 'blah': do_blah_thing() -do_one(); do_two(); do_three() -``` - -## Naming Conventions - -### Naming Styles - -The following naming styles are commonly distinguished: - -- `b` (single lowercase letter) -- `B` (single uppercase letter) -- `lowercase` -- `lower_case_with_underscores` -- `UPPERCASE` -- `UPPER_CASE_WITH_UNDERSCORES` -- `CapitalizedWords` (capitalize all letters of the acronym in ``CapitalizedWords`` e.g., `HTTPServer`.) -- `mixedCase` (differs from `CapitalizedWords` by initial lowercase character) -- `Capitalized_Words_With_Underscores` (ugly and non-recommended) - -### Names to Avoid - -Never use the characters 'l' (lowercase letter el), 'O' (uppercase letter oh), or 'I' (uppercase letter eye) as single-character variable names. - -### Package and Module Names - -Package and module names should have short, all-lowercase names. - -### Schema Names - -Schema names should normally use the `CapWords` convention. - -### Constants - -Constants are usually defined on a module level and written in all capital letters with underscores separating words such as `MAX_OVERFLOW` and `TOTAL`. - -## Import - -- Imports should usually be on separate lines. -- Imports are always put at the top of the file, just after any module comments and docstrings, and before module globals and constants. -- Imports should be grouped in the following order and we should put a blank line between each group of imports. - 1. Standard library imports. - 2. Related third party plugin imports. - 3. Local application/library specific imports. -- Use an alias when we import a package name with a relatively long path. -- Leave only one space between the Import keyword and the package name. - -```python -import net # recommended -import math # non-recommended - -import ..pkg.internal_pkg as alias_pkg # recommended -``` - -## Comments - -- Comments should be complete sentences. The first word should be capitalized unless it is an identifier that begins with a lower-case letter (never alter the case of identifiers!). -- Block comments generally consist of one or more paragraphs built out of complete sentences, with each sentence ending in a period. -- Use two spaces after a sentence-ending period in multi-sentence comments, except after the final sentence. - -### Block Comments - -Block comments generally apply to some (or all) code that follows them, and are indented to the same level as that code. Each line of a block comment starts with a `#` and **a single space**(unless it is indented text inside the comment). - -Paragraphs inside a block comment are separated by a line containing a single `#`. - -```python -# This is a block comment -a = 1 -``` - -### Inline Comments - -Use inline comments sparingly. - -An inline comment is a comment on the same line as a statement. Inline comments should be separated by **at least two spaces** from the statement. They should start with a `#` and **a single space**. - -```python -a = 1 # This is an inline comment -``` - -### Documentation Strings - -Write docstrings for all public schema and schema attributes. - -```python -schema Person: - """ - Person schema doc string - """ - - name: str = "Alice" - """ - Person schema attribute name doc string - """ -``` - -## String - -- Single-quoted strings and double-quoted strings are the same in KCL. -- Use double-quoted string with lowercase prefix -- For triple-quoted strings, always use double quote characters to be consistent with the docstring convention. -- When a string contains single or double quote characters, use the other one to avoid backslashes in the string. - -```python -strA = b"123" # recommended -strB = B'123' # non-recommended - -strC = "'123'" # recommended -strD = "\"123\"" # non-recommended -``` - -## Number - -- Use lowercase for the prefix of non-decimal numbers, and use uppercase for the number itself. - -```python -foo = 0xAB # recommended -bar = 0Xab # non-recommended -``` - -## Operators - -### Binary Operators - -- Leave only one space before and after the assignment `=`. -- Leave only one space before and after the binary operator in the expression. - -```python -a = 1 # recommended -b=2 # non-recommended -c= 3 # non-recommended -d =4 # non-recommended - -_value = (1 + 2 * 3) # recommended -_value = (1+2*3) # non-recommended -``` - -### Unary Operators - -- There is only no space after unary operators e.g., `~`, `+` and `-`. - -```python -_value = 1 + -2 * ~3 # recommended -_value = 1+ - 2 * ~ 3 # non-recommended -``` - -- There is no space after `**` and `*` in the dict/list deduction expressions and argument expressions. - -```python -_list = [1, 2, 3] -_list = [*_list, [4, 5 ,6]] # recommended -_list = [* _list, [4, 5 ,6]] # non-recommended - -_dict = {**{k = "v"}, **{k = "v"}} # recommended -_dict = {** {k = "v"}, ** {k = "v"}} # non-recommended -``` - -- Use `is not` operator rather than `not ... is`. - -```python -# recommended: -if foo is not None: - a = 1 -``` - -```python -# non-recommended: -if not foo is None: - a = 1 -``` - -## Dict - -- There is no space before the colon `:` at the instantiation of KCL dict and schema config, and a space after the colon `:`. - -```python -d1 = {labels: {k1 = "v1"}} # recommended -d2 = {labels : {k1 = "v1"}} # non-recommended -d3 = {labels :{k1 = "v1"}} # non-recommended -``` - -- Always surround the override attribute operator `=` and the insert attribute operator `+=` with a single space on either sid. - -```python -d1 = {key = "value"} # recommended -d2 = {key= "value"} # non-recommended -d3 = {key ="value"} # non-recommended -``` - -```python -d1 = {key += [0, 1, 2]} # recommended -d2 = {key+= [0, 1, 2]} # non-recommended -d3 = {key +=[0, 1, 2]} # non-recommended -``` - -- Remove all commas at the end of the line in the KCL multiline dict because the end commas of each line are optional. - -```python -d = { - key1 = "value1" - key2 = "value2" - key3 = "value3" - key4 = "value4" -} -``` - -## List - -- Keep only **one space** after the comma `,` separating elements in the list - -```python -a = [1, 2, 3] # recommended -b = [1,2,3] # non-recommended -``` - -- Keep only **one space** before and after the comprehension expression token `for` and `in` in the dict and list. - -```python -a = [i for i in range(10)] # recommended -b = [i for i in range(10)] # non-recommended -``` - -## Slice - -- Keep the same number of spaces before and after the colon `:` of the list slice. - -```python -l = [1, 2, 3] -a = l[0:2] # recommended -b = l[0 : 2] # non-recommended -c = l[0: 2] # non-recommended - -d = l[0 + 0 : 1 + 1] # recommended -d = l[0 + 0:1 + 1] # non-recommended -``` - -## Schema - -- Leave only one space before and after the schema attribute assignment `=`. -- Always add a doc string to a schema, which is a good programming habit. - -```python -schema Person: - """ - Schema doc string - """ - name: str = "Alice" # recommended - age : int=12 # non-recommended - -person = Person {} -``` - -- Keep **no spaces** around the schema inheritance operator `()` - -```python -schema Base: - name: str - -schema Person(Base): # recommended - age: int - -schema Schema ( Base ): # non-recommended - age: int -``` - -- Keep **only one space** between the brackets and the schema name of the config at schema instantiation. - -```python -schema Base: - name: str - -schema Person(Base): - age: int - -personA = Person{} # non-recommended -personB = Person {} # recommended -``` - -- Keep **only one space** between the **mixin** keyword and the following `[]` operator - -```python -schema NameMixin: - name: str = "name" - -schema Person: - mixin [NameMixin] # non-recommended - age: int - -schema Parent: - mixin [NameMixin] # recommended - age: int -``` - -### Attribute Annotations - -- Annotations for schema attributes should have a single space after the colon `:` and no space before the colon `:`. - -```python -# recommended: -schema Person: - name: str # No space before the colon `:` - age: int = 18 # Spaces around assignment`=` -``` - -```python -# non-recommended: -schema Person: - codeA:int # No space after the colon `:` - codeB : int # Space before the colon `:` - name: str="Alice" # No spaces around assignment`=` -``` - -- There are no spaces around the colon `:` in the dict type annotation. - -```python -schema Person: - labels: {str:str} # recommended - keyValues: {str : str} # non-recommended -``` - -### Arguments - -- There are no spaces around the assignment `=` in the function/schema/decorator keyword arguments (kwargs). - -```python -schema Person[nameVar]: - # Decorator kwargs - @deprecated(strict=False) # recommended - name: str = nameVar - - @deprecated(strict = False) # non-recommended - age: int - -# Schema kwargs -personA = Person(nameVar="Alice") {} # recommended -personB = Person(nameVar = "Bob") {} # non-recommended - -# Function kwargs -print("", end='') # recommended -print("", end = '') # non-recommended -``` - -## Keywords - -- Only one space is usually reserved around the keyword, such as `schema`, `mixin`, `final`, `is` and `not`, etc. - -```python -schema NameMixin: - check: - name not None - -schema Person: - """ - Person schema definition - """ - mixin [NameMixin] - - final name: str = "Alice" - age: int - -person = Person { - age = 18 -} -``` - -## Function - -- There are no spaces around the function/package select operator `.` -- There are no spaces between the function name and the parentheses `()`. - -```python -import math - -print(math.log(10)) # recommended -print( math . log (10)) # non-recommended -``` - -## Other Recommendations - -- All commas `,` semicolons `;`, colons `:` has no spaces before them. - -```python -if True: - a = 1;b = 2 # non-recommended - c = 3; d = 4 # recommended -``` diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/lang/spec/datatypes.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/lang/spec/datatypes.md deleted file mode 100644 index 9c5f5ec2..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/lang/spec/datatypes.md +++ /dev/null @@ -1,430 +0,0 @@ ---- -title: "Data Types" -linkTitle: "Data Types" -type: "docs" -weight: 2 -description: Data Types ---- -## Syntax - -### Bool - -Boolean values are the two constant objects `False` and `True`. They are used to represent truth values (although other values can also be considered false or true). The built-in function bool() can be used to convert any value to a Boolean, if the value can be interpreted as a truth value. - -### Int - -Int, or integer, is an arbitrarily sized integer, positive or negative, without decimals, of 64 binary digits precision(-9,223,372,036,854,775,808~9,223,372,036,854,775,807). Int is created by int literals or as the result of built-in functions and operators. Unadorned integer literals (including `hex`, `octal` and `binary` numbers) yield integers. The constructor int() can be used to produce int of a specific type. - -Besides, integer literals may have an `SI` or `IEC` multiplier. - -+ `SI`: General integer or fixed-point number form: `P`, `T`, `G`, `M`, `K`, `k`, `m`, `u`, `n`. -+ `IEC`: Corresponding power of 2: `Pi`, `Ti`, `Gi`, `Mi`, `Ki`. - -```python -a = 1 # positive integer: 1 -b = -1 # negative integer: -1 -c = 0x10 # hexadecimal literal: 16 -d = 0o10 # octal literal: 8, or the form `010` -e = 0b10 # binary literal: 2 -f = 10Ki # integer literal with IEC multiplier: 10240 -g = 1M # integer literal with SI multiplier: 1000000 -h = int("10") # int constructor: 10 -i = int("10Ki") # int constructor with multiplier: 10240 -``` - -Notes: - -+ Report an error if unable to represent an integer value precisely. - -### Float - -Float, floating-point, approximation to real numbers, positive or negative, containing one or more decimals, of 64 bit IEEE 754 floats. The constructor float() can be used to produce int of a specific type. - -```python -a = 1.10 -b = 1.0 -c = -35.59 -d = 32.3+e18 -f = -90. -g = -32.54e100 -h = 70.2-E12 -i = float("112") # float constructor -``` - -Notes: - -+ Report an error if unable to represent a floating-point value due to overflow -+ Report a warning if unable to represent a floating-point value due to underflow. Round to the nearest representable value if unable to represent a floating-point value due to limits on precision. These requirements apply to the result of any expression except for built-in functions for which an unusual loss of precision must be explicitly documented. - -#### None - -In KCL, `None` can indicate that the value of the object is empty, which is similar to `nil` in Go or `null` in Java, and corresponds to `null` in YAML and JSON. - -```python -a = None -b = [1, 2, None] -c = {"key1" = "value1", "key2" = None} -``` - -Please note that `None` cannot participate in the four arithmetic operations, but it can participate logical operators and comparison operators to perform calculations. - -```python -a = 1 + None # error -b = int(None) # error -c = not None # True -d = None == None # True -e = None or 1 # 1 -f = str(None) # None -``` - -#### Undefined - -`Undefined` is similar to None, but its semantics is that a variable is not assigned any value and will not be output to YAML or JSON. - -```python -a = Undefined -b = [1, 2, Undefined] -c = {"key1" = "value1", "key2" = Undefined} -``` - -Please note that `Undefined` cannot participate in the four arithmetic operations, but it can participate logical operators and comparison operators to perform calculations. - -```python -a = 1 + Undefined # error -b = int(Undefined) # error -c = not Undefined # True -d = Undefined == Undefined # True -e = Undefined or 1 # 1 -f = str(Undefined) # Undefined -``` - -### Common Numeric Operations - -Int and Float support the following operations (see built-in proposal for built-in details): - -+ `x + y`: sum of x and y. -+ `x - y`: difference of x and y. -+ `x * y`: product of x and y. -+ `x / y`: quotient of x and y. -+ `x // y`: floored quotient of x and y. -+ `x % y`: remainder of x / y. -+ `x ** y`: x to the power y. -+ `-x`: x negated. -+ `+x`: x unchanged. -+ `~x`: x bitwise negation. -+ `abs(x)`: absolute value or magnitude of x. -+ `int(x)`: x converted to integer. -+ `float(x)`: x converted to floating point. - -KCL supports mixed arithmetic: when a binary arithmetic operator has operands of different numeric types, the operand with the "narrower" type is widened to that of the other, where integer is narrower than floating-point. - -### String - -Strings are immutable sequences of Unicode characters. String literals are written in a variety of ways: - -```python -'allows embedded "double" quotes' # Single quotes -"allows embedded 'single' quotes" # Double quotes -'''Three single quotes''', """Three double quotes""" # Triple quoted -``` - -Triple quoted strings may span multiple lines. - -Indexing a string produces strings of length 1, for a non-empty string s, `s[0] == s[0:1]`. - -```python -a = "Hello, World!" -b = a[2:5] # "llo" -c = a[-5:-2] # "orl" -d = a[::-1] # "'!dlroW ,olleH'" -``` - -+ `str(x=None) -> str` - -Return a string. If *x* is not provided, raise a runtime error. - -```python -x = str(3.5) # "3.5" -``` - -#### Members - -Built-in function and members of a string - -+ `str#len() -> int` - Return the number of characters in the string. -+ `capitalize() -> str` - Return a copy of the string with its first character (if any) capitalized and the rest lowercased. -+ `count(sub: str, start: int = 0, end: int = -1) -> int` - Returns the number of (non-overlapping) occurrences of substring sub in string, optionally restricting to `[start:end]`, start being inclusive and end being exclusive. -+ `endswith(suffix: str, start: int = 0, end: int = -1) -> bool` - Returns `True` if the string ends with the specified suffix, otherwise return `False`, optionally restricting to `[start:end]`, start being inclusive and end being exclusive. -+ `find(sub: str, start: int = 0, end: int = -1) -> int` - Returns the lowest index where substring sub is found, or -1 if no such index exists, optionally restricting to `[start:end]`, start being inclusive and end being exclusive. -+ `format(*args, **kwargs) -> str` - Perform string interpolation. Format strings contain replacement fields surrounded by curly braces {}. Anything that is not contained in braces is considered literal text, which is copied unchanged to the output. If you need to include a bracket character in the literal text, it can be escaped by doubling: A replacement field can be either a name, a number or empty. Values are converted to strings using the str function. -+ `index(sub: str, start: int = 0, end: int = -1) -> int` - Returns the first index where sub is found, or raises an error if no such index exists, optionally restricting to `[start:end]` start being inclusive and end being exclusive. -+ `isalnum() -> bool` - Returns True if all characters in the string are alphanumeric (`[a-zA-Z0-9]`) and there is at least one character, False otherwise. -+ `isalpha() -> bool` - Returns True if all characters in the string are alphabetic (`[a-zA-Z]`) and there is at least one character. -+ `isdigit() -> bool` - Returns True if all characters in the string are digits (`[0-9]`) and there is at least one character. -+ `islower() -> bool` - Returns True if all cased characters in the string are lowercase and there is at least one character. -+ `isspace() -> bool` - Returns True if all characters are white space characters and the string contains at least one character. -+ `istitle() -> bool` - Returns True if the string is in title case and it contains at least one character. This means that every uppercase character must follow an uncased one (e.g., whitespace) and every lowercase character must follow a cased one (e.g., uppercase or lowercase). -+ `isupper() -> bool` - Returns True if all cased characters in the string are uppercase and there is at least one character. -+ `join(iterable: list) -> str` - Return a string which is the concatenation of the strings in iterable. A TypeError will be raised if there are any non-string values in iterable. The separator between elements is the string providing this method. Example: - - ```python - >>> "|".join(["a", "b", "c"]) - "a|b|c" - ``` -+ `lower() -> str` - Returns a copy of the string with all the cased characters converted to lowercase. -+ `lstrip(chars: str) -> str` - Return a copy of the string with leading characters removed. The chars argument is a string specifying the set of characters to be removed. If omitted or None, the chars argument defaults to removing whitespace. The chars argument is not a prefix; rather, all combinations of its values are stripped: - - ```python - >>> ' spacious '.lstrip() - 'spacious ' - >>> 'www.example.com'.lstrip('cmowz.') - 'example.com' - ``` -+ `replace(old: str, new: str, count: int) -> str` - Return a copy of the string with all occurrences of substring old replaced by new. If the optional argument count is given, only the first count occurrences are replaced. -+ `rfind(sub: str, start: int = 0, end: int = -1) -> int` - Return the highest index in the string where substring sub is found, such that sub is contained within s[start:end]. Optional arguments start and end are interpreted as in slice notation. Return -1 on failure. -+ `rindex(sub: str, start: int = 0, end: int = -1) -> int` - Returns the last index where sub is found, or raises an ValueError if no such index exists, optionally restricting to `[start:end]`, start being inclusive and end being exclusive. -+ `rsplit(sep: str, maxsplit: int = -1) -> list` - Return a list of the words in the string, using sep as the delimiter string. If maxsplit is given, at most maxsplit splits are done, the rightmost ones. If sep is not specified or None, any whitespace string is a separator. Except for splitting from the right, rsplit() behaves like split() which is described in detail below. -+ `rstrip(chars: str) -> str` - Return a copy of the string with trailing characters removed. The chars argument is a string specifying the set of characters to be removed. If omitted or None, the chars argument defaults to removing whitespace. The chars argument is not a suffix; rather, all combinations of its values are stripped: - - ```python - >>> ' spacious '.rstrip() - ' spacious' - >>> 'mississippi'.rstrip('ipz') - 'mississ' - ``` -+ `split(sep: str, maxsplit: int) -> list` - Return a list of the words in the string, using sep as the delimiter string. If maxsplit is given, at most maxsplit splits are done (thus, the list will have at most maxsplit+1 elements). If maxsplit is not specified or -1, then there is no limit on the number of splits (all possible splits are made). - - If sep is given, consecutive delimiters are not grouped together and are deemed to delimit empty strings (for example, `'1,,2'.split(',')` returns `['1', '', '2']`). The sep argument may consist of multiple characters (for example, `'1<>2<>3'.split('<>')` returns `['1', '2', '3']`). Splitting an empty string with a specified separator returns `['']`. - - For example: - - ```python - >>> '1,2,3'.split(',') - ['1', '2', '3'] - >>> '1,2,3'.split(',', maxsplit=1) - ['1', '2,3'] - >>> '1,2,,3,'.split(',') - ['1', '2', '', '3', ''] - ``` - - If sep is not specified or is None, a different splitting algorithm is applied: runs of consecutive whitespace are regarded as a single separator, and the result will contain no empty strings at the start or end if the string has leading or trailing whitespace. Consequently, splitting an empty string or a string consisting of just whitespace with a `None` separator returns `[]`. - - For example: - - ```python - >>> '1 2 3'.split() - ['1', '2', '3'] - >>> '1 2 3'.split(maxsplit=1) - ['1', '2 3'] - >>> ' 1 2 3 '.split() - ['1', '2', '3'] - ``` -+ `splitlines(keepends: str) -> list` - Return a list of the lines in the string, breaking at line boundaries('\n', '\r\n', '\r'). Line breaks are not included in the resulting list unless keepends is given and true. - - This method splits on the following line boundaries. In particular, the boundaries are a superset of universal newlines. - - For example: - - ```python - >>> 'ab c\n\nde fg\rkl\r\n'.splitlines() - ['ab c', '', 'de fg', 'kl'] - >>> 'ab c\n\nde fg\rkl\r\n'.splitlines(keepends=True) - ['ab c\n', '\n', 'de fg\r', 'kl\r\n'] - ``` - - Unlike `split()` when a delimiter string sep is given, this method returns an empty list for the empty string, and a terminal line break does not result in an extra line: - - ```python - >>> "".splitlines() - [] - >>> "One line\n".splitlines() - ['One line'] - ``` - - For comparison, `split('\n')` gives: - - ```python - >>> ''.split('\n') - [''] - >>> 'Two lines\n'.split('\n') - ['Two lines', ''] - ``` -+ `startswith(prefix: str, start: int = 0, end: int = -1) -> bool` - Return `True` if string starts with the prefix, otherwise return False. prefix can also be a list of prefixes to look for. With optional start, test string beginning at that position. With optional end, stop comparing string at that position. -+ `strip(chars: str) -> str` - Return a copy of the string with the leading and trailing characters removed. The chars argument is a string specifying the set of characters to be removed. If omitted or None, the chars argument defaults to removing whitespace. The chars argument is not a prefix or suffix; rather, all combinations of its values are stripped: - - ```python - >>> ' spacious '.strip() - 'spacious' - >>> 'www.example.com'.strip('cmowz.') - 'example' - ``` - - The outermost leading and trailing chars argument values are stripped from the string. Characters are removed from the leading end until reaching a string character that is not contained in the set of characters in chars. A similar action takes place on the trailing end. For example: - - ```python - >>> comment_string = '#....... Section 3.2.1 Issue #32 .......' - >>> comment_string.strip('.#! ') - 'Section 3.2.1 Issue #32' - ``` -+ `title() -> str` - Return a titlecased version of the string where words start with an uppercase character and the remaining characters are lowercase. - - For example: - - ```python - >>> 'Hello world'.title() - 'Hello World' - ``` - - The algorithm uses a simple language-independent definition of a word as groups of consecutive letters. The definition works in many contexts but it means that apostrophes in contractions and possessives form word boundaries, which may not be the desired result: - - ```python - >>> "they're bill's friends from the UK".title() - "They'Re Bill'S Friends From The Uk" - ``` -+ `upper() -> str` - Return a copy of the string with all the cased characters 4 converted to uppercase. Note that `s.upper().isupper()` might be `False` if s contains uncased characters or if the Unicode category of the resulting character(s) is not “Lu” (Letter, uppercase), but e.g., “Lt” (Letter, titlecase). - -### List - -Lists are immutable sequences, typically used to store collections of homogeneous items (where the precise degree of similarity will vary by application). - -Lists may be constructed in several ways: - -+ Using a pair of square brackets to denote the empty list: `[]` -+ Using square brackets, separating items with commas: `[a]`, `[a, b, c]` -+ Using a list comprehension: `[x for x in iterable]` -+ Using the type constructor: list() or list(iterable) - -The constructor builds a list whose items are the same and in the same order as iterable’s items.Iterable may be either a sequence, a container that supports iteration, or an iterator object. If iterable is already a list, a copy is made and returned, similar to `iterable[:]`. For example, `list('abc')` returns `['a', 'b', 'c']` and `list([1, 2, 3])` returns `[1, 2, 3]`. If no argument is given, the constructor creates a new empty list `[]`. - -Lists implement all of the common sequence operations. - -#### Members - -+ `len()` - Return the number of items in the list. - -### Common Sequence Operations - -The operations in the following table are supported by List and Dict. - -This table lists the sequence operations sorted in ascending priority. In the table, s and t are sequences of the same type, n, i, j and k are integers and x is an arbitrary object that meets any type and value restrictions imposed by s. - -The `in` and `not in` operations have the same priorities as the comparison operations. The + -(concatenation) and * (repetition) operations have the same priority as the corresponding numeric operations. - -| Operation | Result | Notes | -| ------------ | -------------------------------------------------- | ----- | -| `x in s` | `True` if an item of s is equal to x, else `False` | #1 | -| `x not in s` | `False` if an item of s is equal to x, else `True` | #1 | -| `s + t` | the concatenation of s and t | #5 | -| `s[i]` | ith item of s, origin 0 | #2 | -| `s[i:j]` | slice of s from i to j | #2 #3 | -| `s[i:j:k]` | slice of s from i to j with step k | #2 #4 | -| `min(s)` | smallest item of s | | -| `max(s)` | largest item of s | | - -Notes: - -+ 1. While the in and not in operations are used only for simple containment testing in the - general case, some specialized sequences (str) also use them for subsequence testing: - -```python ->>> "gg" in "eggs" -True -``` - -+ 2. If i or j is negative, the index is relative to the end of sequence s: `s.len() + i` or `s.len() + j` is substituted. But note that -0 is still 0. -+ 3. The slice of s from i to j is defined as the sequence of items with index k such that `i <= k < j`. If i or j is greater than `s.len()`, use `s.len()`. If i is omitted or None, use 0. If j is omitted or None, use `s.len()`. If i is greater than or equal to j, the slice is empty. -+ 4. The slice of s from i to j with step k is defined as the sequence of items with index `x = i + n*k` such that `0 <= n < (j-i)/k`. In other words, the indices are `i`, `i+k`, `i+2*k`, `i+3*k` and so on, stopping when j is reached (but never including j). When k is positive, i and j are reduced to s.len() if they are greater. When k is negative, i and j are reduced to s.len() - - + If they are greater. If i or j are omitted or None, they become “end” values (which end depends on the sign of k). Note, k cannot be zero. If k is None, it is treated like 1. -+ 5. Concatenating immutable sequences always results in a new object. This means that building up a sequence by repeated concatenation will have a quadratic runtime cost in the total sequence length. To get a linear runtime cost, you must switch to one of the alternatives below: - - + if concatenating str objects, you can build a list and use `str.join()` at the end -+ 6. `index` raises `ValueError` when x is not found in s. Not all implementations support passing the additional arguments i and j. These arguments allow efficient searching of subsections of the sequence. Passing the extra arguments is roughly equivalent to using `s[i:j].index(x)`, only without copying any data and with the returned index being relative to the start of the sequence rather than the start of the slice. - -### Dict - -Dict is an immutable mapping object maps hashable values to arbitrary objects. A dictionary’s keys are almost arbitrary values. Values that are not hashable, that is, values containing lists, dictionaries may not be used as keys. Numeric types used for keys obey the normal rules for numeric comparison: if two numbers compare equal (such as 1 and 1.0) then they can be used interchangeably to index the same dictionary entry. (Note however, that since computers store floating-point numbers as approximations it is usually unwise to use them as dictionary keys.) Dict is ordered. The order of the keys is the order of their declaration. - -Dictionaries can be created by placing a comma-separated list of keys: value pairs within braces, for example: `{'jack': 4098, 'sjoerd': 4127}` or `{4098: 'jack', 4127: 'sjoerd'}`, by the dict constructor, or list/dict comprehension. - -+ `dict(obj)` - -Return a new dictionary initialized from an optional positional argument and a possibly empty set of keyword arguments.If no positional argument is given, an empty dictionary is created. If a positional argument is given and it is a mapping object, a dictionary is created with the same key-value pairs as the mapping object. Otherwise, the positional argument must be an iterable object. Each item in the iterable must itself be an iterable with exactly two objects. The first object of each item becomes a key in the new dictionary, and the second object the corresponding value. If a key occurs more than once, the last value for that key becomes the corresponding value in the new dictionary. If keyword arguments are given, the keyword arguments and their values are added to the dictionary created from the positional argument. If a key being added is already present, the value from the keyword argument replaces the value from the positional argument.To illustrate, the following examples all return a dictionary equal to `{"one": 1, "two": 2, "three": 3}`: - -```python ->>> a = {'two': 1, 'one': 2, 'three': 3} ->>> b = {'one': 1, 'two': 2, 'three': 3} ->>> c = {'three': 3, 'one': 1, 'two': 2} ->>> a == b == c -True -``` - -Providing keyword arguments as in the first example only works for keys that are valid KCL identifiers. Otherwise, any valid keys can be used. - -In the dict comprehension, key/value pairs yielded by the generator expression is set in the dictionary in the order yielded: the first occurrence of the key determines its insertion order, and the last determines the value associated to it. - -```python ->>> {str(i): 2 * i for i in range(3)} -{"0": 0, "1": 2, "2": 4} - ->>> a = {"one": 1, "two": 2, "three": 3} ->>> b = {k: v for k, v in a if v >= 2} -{two: 2, three: 3} -``` - -#### Operations & Members - -These are the operations that dictionaries the support. - -+ `list(d)` - Return a list of all the keys used in the dictionary d. -+ `len()` - Return the number of items in the dictionary d. -+ `d[key]` - Return the item of d with key. Return Undefined if key is not in the map. -+ `key in d` - Return True if d has a key, else False. -+ `key not in d` - Equivalent to not key in d. -+ `d.key` - Return the item of d with key. Return Undefined if key is not in the map. - -Dictionaries compare equal if and only if they have the same (key, value) pairs(keys' ordering matters). Order comparisons (‘<’, ‘<=’, ‘>=’, ‘>’) raise TypeError. - -```python ->>> d = {"one": 1, "two": 2, "three": 3, "four": 4} ->>> d -{'one': 1, 'two': 2, 'three': 3, 'four': 4} ->>> list(d) -['one', 'two', 'three', 'four'] -``` diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/lang/spec/error.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/lang/spec/error.md deleted file mode 100644 index 84f9044a..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/lang/spec/error.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -title: "Errors" -linkTitle: "Errors" -type: "docs" -weight: 2 -description: Errors ---- -When errors happen, developers should be able to detect the error and abort -execution. Thus, KCL introduce the `assert` syntax. - -In the previous topic of `schema` syntax. Errors can also be raised when a -schema is violated. - -## Syntax - -The syntax of the `assert` statement is the following. - -``` -assert_stmt: 'assert' test [',' test] -``` - -In the basic form, an `assert` statement evaluates an expression. If the -expression is evaluated to `False`, the assertion is failed, and an error -should be reported. - -In the extended form, an error message can be provided. The error message is -another expression. It is only evaluated when the expression to be evaluated -is evaluated to `False`. The evaluation result of the error message is printed -when reporting the error. - -The following is an example: - -```python -a = 1 -b = 3 -# a != b evaluates to True, therefore no error should happen. -assert a != b -# a == b is False, in the reported error message, the message "SOS" should be printed. -assert a == b, "SOS" -``` - -## The Implementation - -When an error happens, no matter it is caused by the `assert` or the `schema` syntax, -the virtual machine should exit with an exit code greater than `0`. - -The virtual machine may choose to dump the back trace information, and it is strongly -recommended to implement it. - -In practice, KCLVM can dump back trace by default, and an argument can be introduced -to disable it. diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/lang/spec/expressions.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/lang/spec/expressions.md deleted file mode 100644 index 58999cb4..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/lang/spec/expressions.md +++ /dev/null @@ -1,842 +0,0 @@ ---- -title: "Expressions" -linkTitle: "Expressions" -type: "docs" -weight: 2 -description: Expressions ---- -## Syntax - -In KCL, an expression specifies the computation of a value. - -The syntax is the following: - -``` -expression: test ("," test)* -test: if_expr | primary_expr | unary_expr | binary_expr -``` - -KCL expressions consist of `if` expression, `primary` expression, `unary` expression, and `binary` expression. - -### Primary Expressions - -Primary expressions are the operands for unary and binary expressions. - -Operands are self-delimiting. An **operand** may be followed by any number of selector dot, a function call, or slice suffixes, to form a primary expression. The grammar uses `expression`, where a multiple-component expression is allowed, and `test` where it accepts an expression of only a single component. - -Syntax: - -``` -primary_expr: operand | primary_expr select_suffix | primary_expr call_suffix | primary_expr subscript_suffix -``` - -### Operands - -Operand denotes the elementary value in an expression. An operand may be an identifier, a literal, or a parenthesized expression. - -Syntax: - -``` -operand: operand_name | number | string | "True" | "False" | "None" | list_expr | list_comp | dict_expr | dict_comp | "(" expression ")" -operand_name: identifier | qualified_identifier -``` - -### Identifiers - -In KCL, an identifier is a name, may with selectors, that identifies a value. - -Syntax: - -``` -identifier: NAME -``` - -Examples: - -```python -x -a -_b -``` - -Use the `$` character prefix to define keyword identifiers. - -```python -$if = 1 -$else = "s" -``` - -Please note: whether the non-keyword identifier is prefixed with `$` has the same effect. - -```python -_a = 1 -$_a = 2 # equal to `_a = 2` -``` - -To simplify the definition of the qualified identifier, such as 'pkg.type', we additionally define `qualified_identifier`: - -Syntax: - -``` -qualified_identifier: identifier "." identifier -``` - -Examples: - -```python -pkg.a -``` - -The package name in qualified_identifier must be imported. - -### Basic Literals - -Basic literals supported in KCL are `int`, `float`, `string` and `bool` including `True` and `False`. Evaluation of basic literal yields a value of the given type with the given value. - -Syntax: - -``` -operand: number | string | "True" | "False" | "None" | "Undefined" -``` - -Examples: - -```python -1 -2.3 -"abc" -True -False -None -Undefined -``` - -See more details about **data type** spec. - -### Parenthesized Expressions - -An expression enclosed in parentheses yields the result of that expression. - -Syntax: - -``` -operand: '(' [expression] ')' -``` - -Examples: - -```python -x = (1 + 2) * (3 + 4) # 21 -``` - -### Dictionary Expressions - -A dictionary expression is a comma-separated immutable list of colon-separated key/value expression pairs, enclosed in curly brackets, and it yields a new dictionary object. An optional comma may follow the final pair. - -``` -dict_expr: '{' [entries [',']] '}' -entries: entry {',' entry} -entry: test ':' test | "**" primary_expr -``` - -Examples: - -```python -{} -{"one": 1} -{"one": 1, "two": 2} -``` - -The key and value expressions are evaluated in left-to-right order. Evaluation fails if the same key is used multiple times. - -Only hashable values may be used as the keys of a dictionary. This includes all built-in types except dictionaries, and lists. - -We can ignore the comma `,` at the end of the line for writing dict key-value pairs in multiple lines: - -```python -data = { - "key1" = "value1" # Ignore the comma ',' at the end of line - "key2" = "value2" -} # {"key1": "value1", "key2": "value2"} -``` - -We can ignore the key quotation marks when we writing simple literals on the key. - -```python -data = { - key1 = "value1" # Ignore the comma ',' at the end of line - key2 = "value2" -} # {"key1": "value1", "key2": "value2"} -``` - -In addition, the **config selector expressions** can be used to init a schema instance. - -```python -person = { - base.count = 2 - base.value = "value" - labels.key = "value" -} # {"base": {"count": 2, "value": "value"}, "labels": {"key": "value"}} -``` - -We can **merge** dict using the dict unpacking operator `**` like this: - -```python -_part1 = { - a = "b" -} - -_part2 = { - c = "d" -} - -a_dict = {**_part1, **_part2} # {"a: "b", "c": "d"} -``` - -We can use `if expressions` to dynamically add elements to the dict element, elements that meet the conditions are added to the dict, and elements that do not meet the conditions are ignored. - -```python -a = 1 # 1 -data = { - key1 = "value1" - if a == 1: key2 = "value2" - if a > 0: key3 = "value3" - if a < 0: key4 = "value4" -} # {"key1": "value1", "key2": "value2", "key3": "value3"} -``` - -```python -a = 1 # 1 -data1 = { - key1 = "value1" - if a == 1: - key2 = "value2" - elif a > 0: - key3 = "value3" - else: - key4 = "value4" -} # {"key1": "value1", "key2": "value2"} -data2 = { - key1 = "value1" - if a == 1: key2 = "value2" - elif a > 0: key3 = "value3" - else: key4 = "value4" -} # {"key1": "value1", "key2": "value2"} -``` - -### List Expressions - -A list expression is a comma-separated immutable list of element expressions, enclosed in square brackets, and it yields a new list. An optional comma may follow the last element expression. - -``` -list_expr: '[' [list_item [',']] ']' -list_item: test | "*" primary_expr -``` - -Element expressions are evaluated in left-to-right order. - -Examples: - -```python -[] # [], empty list -[1] # [1], a 1-element list -[1, 2, 3] # [1, 2, 3], a 3-element list -``` - -We can use `if expressions` to dynamically add elements to the list element, elements that meet the conditions are added to the list, and elements that do not meet the conditions are ignored. - -```python -a = 1 # 1 -data = [ - 1 - if a == 1: 2 - if a > 0: 3 - if a < 0: 4 -] # [1, 2, 3] -``` - -```python -a = 1 # 1 -data1 = [ - 1 - if a == 1: - 2 - elif a == 2: - 3 - else: - 3 -] # [1, 2] -data2 = [ - 1 - if a == 1: 2 - elif a == 2: 2 - else: 3 -] # [1, 2] -``` - -### Comprehensions - -A comprehension constructs a new list or dictionary value by looping over one or more iterables and evaluating a body expression that produces successive elements of the result. - -Syntax: - -``` -list_comp: '[' list_item comp_clause+ ']' . -dict_comp: '{' entry comp_clause+ '}' . - -comp_clause: 'for' loop_variables [","] 'in' test ['if' test] -loop_variables: primary_expr (',' primary_expr)* -``` - -A list comprehension consists of a single expression followed by one or more clauses, the first of which must be a `for` clause. Each `for` clause resembles a `for` statement, and specifies an iterable operand and a set of variables to be assigned by successive values of the iterable. An `if` cause resembles an `if` statement, and specifies a condition that must be met for the body expression to be evaluated. A sequence of `for` and `if` clauses acts like a nested sequence of `for` and `if` statements. - -Examples: - -```python -[x * x for x in range(5)] # [0, 1, 4, 9, 16] -[x * x for x in range(5) if x % 2 == 0] # [0, 4, 16] -[[x, y] for x in range(5) \ - if x % 2 == 0 \ - for y in range(5) \ - if y > x] # [[0, 1], [0, 2], [0, 3], [0, 4], [2, 3], [2, 4]] -``` - -Besides, we can use two variables in the list comprehension, the first variable denotes the list index and the second variable denotes the list item. - -```python -data = [1000, 2000, 3000] -# Single variable loop -dataLoop1 = [i * 2 for i in data] # [2000, 4000, 6000] -dataLoop2 = [i for i in data if i == 2000] # [2000] -dataLoop3 = [i if i > 2 else i + 1 for i in data] # [1000, 2000, 3000] -# Double variable loop -dataLoop4 = [i + v for i, v in data] # [1000, 2001, 3002] -dataLoop5 = [v for i, v in data if v == 2000] # [2000] -# Use `_` to ignore loop variables -dataLoop6 = [v if v > 2000 else v + i for i, v in data] # [1000, 2001, 3000] -dataLoop7 = [i for i, _ in data] # [0, 1, 2] -dataLoop8 = [v for _, v in data if v == 2000] # [2000] -``` - -A dict comprehension resembles a list comprehension, but its body is a pair of expressions, key: value, separated by a colon, and its result is a dictionary containing the key/value pairs for which the body expression was evaluated. Evaluation fails if the value of any key is un-hashable. - -Besides, we can use two variables in the dict comprehension, the first variable denotes the dict key and the second variable denotes the dict value of the key. - -```python -data = {"key1" = "value1", "key2" = "value2"} -# Single variable loop -dataKeys1 = {k: k for k in data} # {"key1": "key1", "key2": "key2"} -dataValues1 = {k: data[k] for k in data} # {"key1": "value1", "key2": "value2"} -# Double variable loop -dataKeys2 = {k: k for k, v in data} # {"key1": "key1", "key2": "key2"} -dataValues2 = {v: v for k, v in data} # {"value1": "value1", "value2": "value2"} -dataFilter = {k: v for k, v in data if k == "key1" and v == "value1"} # {"key1": "value1"} -# Use `_` to ignore loop variables -dataKeys3 = {k: k for k, _ in data} # {"key1": "key1", "key2": "key2"} -dataValues3 = {v: v for _, v in data} # {"value1": "value1", "value2": "value2"} -``` - -As with a `for` loop, the loop variables may exploit compound assignment: - -```python -[x * y + z for [x, y], z in [[[2, 3], 5], [["o", 2], "!"]] # [11, 'oo!'] -``` - -KCL does not accept an un-parenthesized list as the operand of a for clause: - -```python -[x * x for x in 1, 2, 3] # parse error: unexpected comma -``` - -Comprehensions defines a new lexical block, so assignments to loop variables have no effect on variables of the same name in an enclosing block: - -```python -x = 1 -_ = [x for x in [2]] # new variable x is local to the comprehension -print(x) # 1 -``` - -The operand of a comprehension's first clause (always a for) is resolved in the lexical block enclosing the comprehension. In the examples below, identifiers referring to the outer variable named x have been distinguished by subscript. - -```python -x0 = [1, 2, 3] -[x * x for x in x0] # [1, 4, 9] -[x * x for x in x0 if x % 2 == 0] # [4] -``` - -All subsequent for and if expressions are resolved within the comprehension's lexical block, as in this rather obscure example: - -```python -x0 = [[1, 2], [3, 4], [5, 6]] -[x * x for x in x0 for x in x if x % 2 == 0] # [4, 16, 36] -``` - -which would be more clearly rewritten as: - -```python -x = [[1, 2], [3, 4], [5, 6]] -[z * z for y in x for z in y if z % 2 == 0] # [4, 16, 36] -``` - -### Conditional Expressions - -A conditional expression has the form `a if cond else b`. It first evaluates the condition `cond`. If it's true, it evaluates `a` and yields its value; otherwise it yields the value of `b`. - -Syntax: - -``` -if_expr: test "if" test "else" test -``` - -Examples: - -```python -x = True if enabled else False # if enabled is -``` - -### Unary Expressions - -In KCL, supported unary operators are `+`, `-`, `~`, and `not`. - -Syntax: - -``` -unary_expr: ("+" | "-" | "~") primary_expr - | "not" test -``` - -Usage: - -``` -+ number unary positive (int, float) -- number unary negation (int, float) -~ number unary bitwise inversion (int) -not x logical negation (any type) -``` - -The `+` and `-` operators may be applied to any number (int or float) and return the number. -The `not` operator returns the negation of the truth value of its operand. - -Examples: - -```python -~1 # -2 -~-1 # 0 -~0 # -1 -not True # False -not 0 # True -``` - -### Binary Expressions - -In KCL, binary expressions consist of `comparisons`, `logical operations`, `arithmetic operations` and `membership tests`. - -Syntax: - -``` -binary_expr: test bin_op test -bin_op: 'or' - | 'and' - | '==' | '!=' | '<' | '>' | '<=' | '>=' - | 'in' | 'not' 'in' - | '|' - | '^' - | '&' - | '-' | '+' - | '*' | '%' | '/' | '//' - | '<<' | '>>' -``` - -#### Logical Operations - -The `or` and `and` operators yield the logical disjunction and conjunction of their arguments, which need not be Booleans. - -The expression `x or y` yields the value of `x` if its truth value is `True`, or the value of `y` otherwise. - -```python -False or False # False -False or True # True -True or True # True -1 or "hello" # 1 -``` - -Similarly, `x` and `y` yields the value of `x` if its truth value is `False`, or the value of `y` otherwise. - -```python -False and False # False -False and True # False -True and True # True -1 and "hello" # "hello" -``` - -These operators use "short circuit" evaluation, so the second expression is not evaluated if the value of the first expression has already determined the result, allowing constructions like these: - -```python -x and x[0] == 1 # x[0] is not evaluated if x is empty -len(x) == 0 or x[0] == "" -not x or not x[0] -``` - -#### Comparisons - -The `==` operator reports whether its operands are equal; the `!=` operator is its negation. - -The operators `<`, `>`, `<=`, and `>=` perform an ordered comparison of their operands. It is an error to apply these operators to operands of unequal type, unless one of the operands is an `int` and the other is a `float`. Of the built-in types, only the following support ordered comparison, using the ordering relation shown: - -``` -NoneType # None <= None -bool # False < True -int # mathematical -float # as defined by IEEE 754 -string # lexicographical -list # lexicographical -``` - -Comparison of floating-point values follows the IEEE 754 standard, which breaks several mathematical identities. For example, if `x` is a `NaN` value, the comparisons `x < y`, `x == y`, and `x > y` all yield false for all values of `y`. - -The remaining built-in types support only equality comparisons. Values of type `dict` and `schema` compare equal if their elements compare equal, and values of type function or `builtin_function_or_method` are equal only to themselves. - -``` -dict # equal contents -schema # equal exported-attributes -function # identity -builtin_function_or_method # identity -``` - -#### Arithmetic Operations - -The following table summarizes the binary arithmetic operations available for built-in types: - -``` -Arithmetic (int or float; result has type float unless both operands have type int) - number + number # addition - number - number # subtraction - number * number # multiplication - number / number # real division (result is always a float) - number // number # floored division - number % number # remainder of floored division - number ^ number # bitwise XOR - number << number # bitwise left shift - number >> number # bitwise right shift - -Concatenation - string + string - list + list - -Repetition (string/list) - int * sequence - sequence * int - -Union - int | int - list | list - dict | dict - schema | schema - schema | dict -basictype | basictype -``` - -The operands of the arithmetic operators `+`, `-`, `*`, `//`, and `%` must both be numbers (`int` or `float`) but need not have the same type. The type of the result has type `int` only if both operands have that type. The result of real division / always has type `float`. - -The `+` operator may be applied to non-numeric operands of the same type, such as two lists, or two strings, in which case it computes the concatenation of the two operands and yields a new value of the same type. - -```python -"Hello, " + "world" # "Hello, world" -[1, 2] + [3, 4] # [1, 2, 3, 4] -``` - -The `*` operator may be applied to an integer n and a value of type `string`, `list`, in which case it yields a new value of the same sequence type consisting of n repetitions of the original sequence. The order of the operands is immaterial. Negative values of n behave like zero. - -```python -'mur' * 2 # 'murmur' -3 * range(3) # [0, 1, 2, 0, 1, 2, 0, 1, 2] -``` - -The `&` operator requires two operands of the same type, such as `int`. For integers, it yields the bitwise intersection (AND) of its operands. - -The `|` operator likewise computes bitwise, unions basic types and unions collection and schema data, such as **list**, **dict** and **schema**. - -Computing bitwise examples: - -```python -0x12345678 | 0xFF # 0x123456FF -``` - -Unioning basic types examples: - -```python -schema x: - a: int | str # attribute a could be a int or string -``` - -A union type is used to define a schema attribute type. See more details in **schema** spec. -Supported types in a union type are `int`, `str`, `float`, `bool`, `list` and `dict`. - -Unioning collection and schema data: - -- Unioning List. Overwrite the list expression on the right side of the operator `|` to the list variable on the left side of the operator one by one according to the **index**. - -```python -_a = [1, 2, 3] -_b = [4, 5, 6, 7] -x = _a | _b # [4, 5, 6, 7] 4 -> 1; 5 -> 2; 6 -> 3; 7 -> None -``` - -Unioning to the specific index or all elements is still under discussion. - -- Unioning Dict. Union the dict expression on the right side of the operator `|` one by one to the dict variable on the left side of the operator according to the **key** - -```python -_a = {key1 = "value1"} -_b = {key1 = "overwrite", key2 = "value2"} -_c = _a | _b # {"key1": "overwrite", "key2": "value2"} -``` - -The union of collection and schema is a new one whose attributes are unioning b to a, preserving the order of the attributes of the operands, left before right. - -Unioning to the specific key or all keys is still under discussion. - -- Unioning Schema. - -The union operation for schema is similar to dict. - -Schema union could be done as: - -``` -schema Person: - firstName: str - lastName: str - -_a = Person { - firstName = "John" -} -_b = {lastName = "Doe"} -_a = _a | _b # {"firstName": "John", "lastName": "Doe"} -``` - -Unioning to a specific attribute is still under discussion. Unioning to all attributes is not applicable to schema instances. - -See **selector expression** in **expression** spec for more details. - -The `^` operator accepts operands of `int`. For integers, it yields the bitwise XOR (exclusive OR) of its operands. - -The `<<` and `>>` operators require operands of `int` type both. They shift the first operand to the left or right by the number of bits given by the second operand. It is a dynamic error if the second operand is negative. Implementations may impose a limit on the second operand of a left shift. - -```python -0x12345678 & 0xFF # 0x00000078 -0b01011101 ^ 0b110101101 # 0b111110000 -0b01011101 >> 2 # 0b010111 -0b01011101 << 2 # 0b0101110100 -``` - -#### Membership Tests - -Usage: - -``` - any in sequence (list, dict, schema, string) - any not in sequence -``` - -The `in` operator reports whether its first operand is a member of its second operand, which must be a list, dict, schema, or string. The `not in` operator is its negation. Both return a Boolean. - -The meaning of membership varies by the type of the second operand: the members of a list are its elements; the members of a dict are its keys; the members of a string are all its substrings. - -```python -1 in [1, 2, 3] # True - -d = {"one" = 1, "two" = 2} -"one" in d # True -"three" in d # False -1 in d # False -[] in d # False - -"nasty" in "dynasty" # True -"a" in "banana" # True -"f" not in "way" # True - -d = data {one = 1, two = 2} # data is a schema with attributes one and two -"one" in d # True -"three" in d # False -``` - -### Function Invocations - -KCL allows calling built-in functions and functions from built-in and system modules. Whether KCL allows defining new functions is under discussion. - -Syntax: - -``` -call_suffix: "(" [arguments [","]] ")" -arguments: argument ("," argument)* -argument: test | identifier "=" test | "*" test | "**" test -``` - -To call a function, the basic way is shown as the following code excerpt: - -```python -print("An argument") - -import math -# 2 powers 3 is 8. -a = math.pow(2, 3) -``` - -As you can see, arguments are separated with `,`. Arguments can only be passed in this way. KCL supports positional arguments and key-value arguments. - -Note that: - -- Some functions have parameters with default values. -- Some functions accept variadic arguments. - -When an argument is not supplied for a parameter without a default value, -an error will be reported. - -### Selector Expressions - -A selector expression selects the attribute or method of the value. - -#### Select attribute - -Syntax: - -``` -select_suffix: ["?"] "." identifier -``` - -KCL provides a wealth of ways to identify or filter attributes. - -x.y - -- schema: it denotes the attribute value of a schema `x` identified by `y` -- package: it denotes the identifier of a package `x` identified by `y` - -Examples: - -``` -schema Person: - name: str - age: int - -person = Person { - name = "Alice" - age = 18 -} -name = person.name # "Alice" -age = person.age # 18 -``` - -x?.y - -If the x if None/Undefined or empty(empty list or dict), just return None, otherwise behave as x.y. - -Examples - -``` -noneData = None -data?.name # None - -emptyDict = {} -emptyDict?.name # None - -emptyList = [] -emptyList?[0] # None -``` - -As a supplementary of the `selector` expression in KCL code, the KCL compiler needs to provide corresponding identifying and filtering features through the command line and api form. - -#### Select method - -Syntax: - -``` -select_suffix: "." identifier -``` - -A `identifier` identifies method belongs to the built-in types `string`, `list`, `dict`, and `schema`. - -- A selector expression fails if the value does not have an attribute of the specified name. -- A selector expression that selects a method typically appears within a call expression, as in these examples: - -Examples: - -```python -["able", "baker", "charlie"].index("baker") # 1 -"banana".count("a") # 3 -"banana".reverse() # error: string has no .reverse field or method -Person.instances() # all instances of schema Person -``` - -But when not called immediately, the selector expression evaluates to a bound method, that is, a method coupled to a specific receiver value. A bound method can be called like an ordinary function, without a receiver argument: - -``` -f = "banana".count -f # -f("a") # 3 -f("n") # 2 -``` - -### Index expressions - -An index expression `a[i]` yields the `i` th element of an indexable type such as a string or list. The index `i` must be an `int` value in the range `-n` ≤ `i` < `n`, where `n` is `len(a)`; any other index results in an error. - -Syntax: - -``` -subscript_suffix: "[" [test] "]" -``` - -A valid negative index `i` behaves like the non-negative index `n+i`, allowing for convenient indexing relative to the end of the sequence. - -```python -"abc"[0] # "a" -"abc"[1] # "b" -"abc"[-1] # "c" - -["zero", "one", "two"][0] # "zero" -["zero", "one", "two"][1] # "one" -["zero", "one", "two"][-1] # "two" -``` - -An index expression `d[key]` may also be applied to a dictionary `d`, to obtain the value associated with the specified key. It returns `Undefined` if the dictionary contains no such key. - -An index expression appearing on the left side of an assignment causes the specified list or dictionary element to be updated: - -```python -a = range(3) # a == [0, 1, 2] -a[2] = 7 # a == [0, 1, 7] - -coins["suzie b"] = 100 -``` - -It is a dynamic error to attempt to update an element of an immutable type, such as a list or string, or a frozen value of a mutable type. - -### Slice expressions - -A slice expression `a[start:stop:stride]` yields a new value containing a sub-sequence of `a`, which must be a string, or list. - -``` -subscript_suffix: "[" [test] [":" [test] [":" [test]]] "]" -``` - -Each of the `start`, `stop`, and `stride` operands is optional; if present, and not `None`, each must be an integer. -The `stride` value defaults to 1. If the stride is not specified, the colon preceding it may be omitted too. It is an error to specify a stride of zero. - -Conceptually, these operands specify a sequence of values `i` starting at start and successively adding `stride` until `i` reaches or passes `stop`. The result consists of the concatenation of values of `a[i]` for which `i` is valid.` - -The effective start and stop indices are computed from the three operands as follows. Let `n` be the length of the sequence. - -**If the stride is positive**: If the `start` operand was omitted, it defaults to -infinity. If the `end` operand was omitted, it defaults to +infinity. For either operand, if a negative value was supplied, `n` is added to it. The `start` and `end` values are then "clamped" to the nearest value in the range 0 to `n`, inclusive. - -**If the stride is negative**: If the `start` operand was omitted, it defaults to +infinity. If the `end` operand was omitted, it defaults to -infinity. For either operand, if a negative value was supplied, `n` is added to it. The `start` and `end` values are then "clamped" to the nearest value in the range -1 to `n`-1, inclusive. - -```python -"abc"[1:] # "bc" (remove first element) -"abc"[:-1] # "ab" (remove last element) -"abc"[1:-1] # "b" (remove first and last element) -"banana"[1::2] # "aaa" (select alternate elements starting at index 1) -"banana"[4::-2] # "nnb" (select alternate elements in reverse, starting at index 4) -``` - -It's not allowed to define a slice expression as a left value in KCL. -Cause list and string are immutable, re-slicing can directly operate to operand to ensure better performance. diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/lang/spec/index.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/lang/spec/index.md deleted file mode 100644 index 88bbaa59..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/lang/spec/index.md +++ /dev/null @@ -1 +0,0 @@ -# KCL Spec diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/lang/spec/kcl-spec.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/lang/spec/kcl-spec.md deleted file mode 100644 index 9318b105..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/lang/spec/kcl-spec.md +++ /dev/null @@ -1,300 +0,0 @@ ---- -title: "KCL Spec" -linkTitle: "KCL Spec" -type: "docs" -weight: 2 -description: KCL Spec ---- -## 词法规则 - -### 关键字和保留字 - -下面是 KCL 语言的关键字: - -```python - True False None Undefined import - and or in is not - as if else elif for - schema mixin protocol check assert - all any map filter lambda - rule -``` - -下面是 KCL 语言的保留字: - -```python - pass return validate rule flow - def del raise except try - finally while from with yield - global nonlocal struct class final -``` - -### 行注释 - -```python -# a comment -``` - -### 运算符 - -```python - + - * ** / // % - << >> & | ^ < > - ~ <= >= == != = - += -= *= **= /= //= %= - <<= >>= &= ^= -``` - -### 分隔符 - -```python - ( ) [ ] { } - , : . ; @ -``` - -### 运算符的优先级 - -以下的运算符列表根据优先级从 **高到底** 排列: - -| Operator | Description | -| ---------------------------------------------------------------- | -------------------------------------------------------- | -| `**` | Exponentiation (highest priority) | -| `+x` `-x` `~x` | Positive, negative, bitwise NOT | -| `*` `/` `%` `//` | Multiplication, division, floor division and remainder | -| `+` `-` | Addition and subtraction | -| `<<` `>>` | Left and right shifts | -| `&` | Bitwise AND | -| `^` | Bitwise XOR | -| `|` | Bitwise OR | -| `in`, `not in`, `is`, `is not`, `<`, `<=`, `>`, `>=`, `!=`, `==` | Comparisons, including membership and identity operators | -| `not` | Boolean NOT | -| `and` | Boolean AND | -| `or` | Boolean OR | -| `if – else` | Conditional expression = | -| `=`, `+=`, `-=`, `*=`, `/=`, `%=`, `&=`, `|=`, `^=`, `**=`, `//=`, `<<=`, `>>=` | Assign | - -## EBNF 语法 - -KCL 采用 Python 的 [LarkParser](https://lark-parser.readthedocs.io/en/latest/) 工具描述语法,规范规则如下: - -```bnf -// Copyright 2021 The KCL Authors. All rights reserved. - -//////////// KCL grammar //////////// -start: (NEWLINE | preamble_statement)* - -//////////// statement //////////// -preamble_statement: preamble_small_stmt | preamble_compound_stmt -preamble_small_stmt: (small_stmt | import_stmt) NEWLINE -preamble_compound_stmt: compound_stmt | schema_stmt -statement: small_stmt NEWLINE | compound_stmt -compound_stmt: if_stmt -small_stmt: assign_stmt | expr_stmt | assert_stmt - -//////////// import_stmt //////////// -import_stmt: IMPORT dot_name (AS NAME)? -dot_name: [leading_dots] identifier (DOT identifier)* -leading_dots: DOT+ - -/////////// assert_stmt //////////// -assert_stmt: ASSERT test (COMMA test)? - -//////////// if_stmt //////////// -if_stmt: IF test COLON suite (ELIF test COLON suite)* (ELSE COLON suite)? -suite: small_stmt NEWLINE | NEWLINE _INDENT statement+ _DEDENT - -//////////// assign_stmt //////////// -assign_stmt: primary_expr (ASSIGN primary_expr)* ASSIGN test - | primary_expr augassign test -augassign: COMP_PLUS | COMP_MINUS | COMP_MULTIPLY | COMP_DOUBLE_STAR | COMP_DIVIDE - | COMP_DOUBLE_DIVIDE | COMP_MOD | COMP_AND | COMP_OR | COMP_XOR | COMP_SHIFT_LEFT - | COMP_SHIFT_RIGHT - -//////////// schema_stmt //////////// -schema_stmt: [decorators] SCHEMA [RELAXED] identifier [LEFT_BRACKETS [arguments] RIGHT_BRACKETS] [LEFT_PARENTHESES operand_name RIGHT_PARENTHESES] COLON NEWLINE [schema_body] -schema_body: _INDENT (string NEWLINE)* [mixin_stmt] (schema_attribute_stmt | statement)* [check_block] _DEDENT -schema_attribute_stmt: [decorators] (FINAL)? identifier COLON type [(ASSIGN | augassign) test] NEWLINE - -/////////// decorators //////////// -decorators: (AT primary_expr NEWLINE)+ - -//////////// type //////////// -type: type_element (OR type_element)* -type_element: schema_type | basic_type | compound_type -schema_type: operand_name -basic_type: STRING_TYPE | INT_TYPE | FLOAT_TYPE | BOOL_TYPE -compound_type: list_type | dict_type -list_type: LEFT_BRACKETS (type)? RIGHT_BRACKETS -dict_type: LEFT_BRACE (type)? COLON (type)? RIGHT_BRACE - -//////////// check_block //////////// -check_block: CHECK COLON NEWLINE _INDENT check_expr+ _DEDENT -check_expr: check_test [COMMA primary_expr] NEWLINE -check_test: or_test [IF or_test] - -//////////// mixin_stmt //////////// -mixin_stmt: MIXIN LEFT_BRACKETS [mixins | multiline_mixins] RIGHT_BRACKETS NEWLINE -multiline_mixins: NEWLINE _INDENT mixins NEWLINE _DEDENT -mixins: operand_name (COMMA (NEWLINE mixins | operand_name))* - -//////////// expression_stmt //////////// -expr_stmt: expression -expression: test (COMMA test)* -test: if_expr | primary_expr | unary_expr | binary_expr -if_expr: test IF test ELSE test -unary_expr: (PLUS | MINUS | NOT) primary_expr | L_NOT test -binary_expr: test bin_op test -bin_op: L_OR - | L_AND - | EQUAL_TO | NOT_EQUAL_TO | LESS_THAN | GREATER_THAN | LESS_THAN_OR_EQUAL_TO | GREATER_THAN_OR_EQUAL_TO - | IN | L_NOT IN | IS | IS L_NOT - | OR - | XOR - | AND - | MINUS | PLUS - | MULTIPLY | MOD | DIVIDE | DOUBLE_DIVIDE - | SHIFT_LEFT | SHIFT_RIGHT - -primary_expr: operand | primary_expr select_suffix | primary_expr call_suffix | primary_expr subscript_suffix | primary_expr schema_expr -operand: operand_name | number | string - | TRUE | FALSE | NONE | list_expr | list_comp | dict_expr - | dict_comp | LEFT_PARENTHESES test RIGHT_PARENTHESES -operand_name: identifier | qualified_identifier - -select_suffix: DOT (identifier | dict_identifier_selector | list_identifier_selector) -dict_identifier_selector: MULTIPLY | LEFT_BRACE identifier (COMMA identifier)* RIGHT_BRACE -list_identifier_selector: subscript_suffix - -//////////// call_suffix //////////// -call_suffix: LEFT_PARENTHESES [arguments [COMMA]] RIGHT_PARENTHESES - -//////////// subscript_suffix //////////// -subscript_suffix: LEFT_BRACKETS (test | [test] COLON [test] [COLON [test]]) RIGHT_BRACKETS - -//////////// arguments //////////// -arguments: argument (COMMA argument)* -argument: test | NAME ASSIGN test | MULTIPLY test | DOUBLE_STAR test - - -//////////// operand //////////// -identifier: NAME -qualified_identifier: identifier DOT identifier - -//////////// list_expr //////////// -list_expr: LEFT_BRACKETS [list_items | NEWLINE _INDENT list_items _DEDENT] RIGHT_BRACKETS -list_items: list_item ((COMMA [NEWLINE] | NEWLINE) list_item)* [COMMA] [NEWLINE] -list_item: test | star_expr -list_comp: LEFT_BRACKETS (list_item comp_clause+ | NEWLINE _INDENT list_item comp_clause+ _DEDENT) RIGHT_BRACKETS -//////////// dict_expr //////////// -dict_expr: LEFT_BRACE [entries | NEWLINE _INDENT entries _DEDENT] RIGHT_BRACE -dict_comp: LEFT_BRACE (entry comp_clause+ | NEWLINE _INDENT entry comp_clause+ _DEDENT) RIGHT_BRACE -entries: entry ((COMMA [NEWLINE] | NEWLINE) entry)* [COMMA] [NEWLINE] -entry: test COLON test | double_star_expr -comp_clause: FOR loop_variables [COMMA] IN or_test [NEWLINE] [IF test [NEWLINE]] - -star_expr: MULTIPLY primary_expr -double_star_expr: DOUBLE_STAR primary_expr -loop_variables: primary_expr (COMMA primary_expr)* - -//////////// schema_expr //////////// -schema_expr: (LEFT_PARENTHESES [arguments] RIGHT_PARENTHESES)? dict_expr - -//////////// misc //////////// -number: DEC_NUMBER | HEX_NUMBER | BIN_NUMBER | OCT_NUMBER | FLOAT_NUMBER | IMAG_NUMBER -string: STRING | LONG_STRING - -// Tokens - -ASSIGN: "=" -COLON: ":" -SEMI_COLON: ";" -COMMA: "," -LEFT_PARENTHESES: "(" -RIGHT_PARENTHESES: ")" -LEFT_BRACKETS: "[" -RIGHT_BRACKETS: "]" -LEFT_BRACE: "{" -RIGHT_BRACE: "}" -PLUS: "+" -MINUS: "-" -MULTIPLY: "*" -DIVIDE: "/" -MOD: "%" -DOT: "." -AND: "&" -OR: "|" -XOR: "^" -NOT: "~" -LESS_THAN: "<" -GREATER_THAN: ">" -EQUAL_TO: "==" -NOT_EQUAL_TO: "!=" -GREATER_THAN_OR_EQUAL_TO: ">=" -LESS_THAN_OR_EQUAL_TO: "<=" -DOUBLE_STAR: "**" -DOUBLE_DIVIDE: "//" -SHIFT_LEFT: "<<" -SHIFT_RIGHT: ">>" -AT: "@" - -COMP_PLUS: "+=" -COMP_MINUS: "-=" -COMP_MULTIPLY: "*=" -COMP_DIVIDE: "/=" -COMP_MOD: "%=" -COMP_AND: "&=" -COMP_OR: "|=" -COMP_XOR: "^=" -COMP_DOUBLE_STAR: "**=" -COMP_DOUBLE_DIVIDE: "//=" -COMP_SHIFT_LEFT: "<<=" -COMP_SHIFT_RIGHT: ">>=" - -// Special tokens -IMPORT: "import" -AS: "as" -SCHEMA: "schema" -MIXIN: "mixin" -RELAXED: "relaxed" -CHECK: "check" -FOR: "for" -ASSERT: "assert" -IF: "if" -ELIF: "elif" -ELSE: "else" -L_OR: "or" -L_AND: "and" -L_NOT: "not" -IN: "in" -IS: "is" -FINAL: "final" -LAMBDA: "lambda" - -STRING_TYPE: "str" -INT_TYPE: "int" -FLOAT_TYPE: "float" -BOOL_TYPE: "bool" - -// Constant tokens -TRUE: "True" -FALSE: "False" -NONE: "None" - -NAME: /[a-zA-Z_]\w*/ -COMMENT: /#[^\n]*/ -NEWLINE: ( /\r?\n[\t ]*/ | COMMENT )+ - -STRING: /[ubf]?r?("(?!"").*?(? **note** -> -> Any character except the ASCII space, tab (`\t`) and formfeed (`\f`) is considered a none-space character. - -- A line ending in a backslash cannot carry a comment (, which will be introduced shortly afterwards). -- A backslash does not continue a comment. -- A backslash does not continue a token except for string literals (i.e., tokens other than string literals cannot be split across physical lines using a backslash). -- A backslash is illegal elsewhere on a line outside a string literal. - -### Implicit Line Joining - -Expressions in parentheses, square brackets or curly braces can be split over more than one physical line without using backslashes. - -- Implicitly continued lines can carry comments. -- The indentation of the continuation lines is not important. -- Blank continuation lines are allowed. -- There is no `NEWLINE` token between implicit continuation lines. -- Implicitly continued lines can also occur within triple-quoted strings (see below); in that case they cannot carry comments. - -### Blank Lines - -### Indentation - -### Comments - -Starting with a `#` character that is not part of a string literal is a comment. A comment ends at the end of the physical line. - -A comment signifies the end of the logical line unless the implicit line joining rules are invoked. - -Comments are ignored by the syntax. - -### Identifiers and Keywords - -Identifiers (also referred to as names) are described by the following lexical definitions. - -Within the ASCII range (from `U+0001` to `U+007F`), the valid characters for identifiers are the uppercase and lowercase letters `A` through `Z`, the underscore `_` and, except for the first character, the digits `0` through `9`. - -Identifiers are unlimited in length. The case is significant. - -### Keywords - -The following identifiers are used as reserved words, or keywords of the language, and cannot be used as ordinary identifiers. They must be spelled exactly as written here: - -``` -True False None Undefined import -and or in is not -as if else elif for -schema mixin protocol check assert -all any map filter final -lambda rule -``` - -The following tokens are not used, but they are reserved as possible future keywords: - -``` -pass return validate rule flow -def del raise except try -finally while from with yield -global nonlocal struct class -``` - -### Literals - -Literals are notations for constant values of some built-in types. - -### String Literals - -String literals are described by the following lexical definitions: - -``` -stringliteral ::= [stringprefix](shortstring | longstring) -stringprefix ::= "r" | "u" | "R" | "U" | "f" | "F" - | "fr" | "Fr" | "fR" | "FR" | "rf" | "rF" | "Rf" | "RF" -shortstring ::= "'" shortstringitem* "'" | '"' shortstringitem* '"' -longstring ::= "'''" longstringitem* "'''" | '"""' longstringitem* '"""' -shortstringitem ::= shortstringchar | stringescapeseq -longstringitem ::= longstringchar | stringescapeseq -shortstringchar ::= -longstringchar ::= -stringescapeseq ::= "\" -``` - -Multiple adjacent string or bytes literals (delimited by whitespace),possibly using different quoting conventions, are allowed, and their meaning is the same as their concatenation. - -### Numeric Literals - -There are two types of numeric literals: integers and floating-point numbers. - -Integer literals are described by the following lexical definitions: - -``` -integer ::= decinteger | bininteger | octinteger | hexinteger -decinteger ::= nonzerodigit (["_"] digit)* | "0"+ (["_"] "0")* -bininteger ::= "0" ("b" | "B") (["_"] bindigit)+ -octinteger ::= "0" ("o" | "O") (["_"] octdigit)+ -hexinteger ::= "0" ("x" | "X") (["_"] hexdigit)+ -nonzerodigit ::= "1"..."9" -digit ::= "0"..."9" -bindigit ::= "0" | "1" -octdigit ::= "0"..."7" -hexdigit ::= digit | "a"..."f" | "A"..."F" -``` - -Floating-point literals are described by the following lexical definitions: - -``` -floatnumber ::= pointfloat | exponentfloat -pointfloat ::= [digitpart] fraction | digitpart "." -exponentfloat ::= (digitpart | pointfloat) exponent -digitpart ::= digit (["_"] digit)* -fraction ::= "." digitpart -exponent ::= ("e" | "E") ["+" | "-"] digitpart -``` - -## Operators and Delimiters - -### Operators - -The following tokens are operators: - -``` -+ - * ** / // % -<< >> & | ^ < > -~ <= >= == != @ -``` - -### Delimiters - -The following tokens serve as delimiters in the grammar: - -``` -( ) [ ] { } -, : . ; = += --= *= **= /= //= %= -<<= >>= &= |= ^= -``` - -The period can also occur in floating-point literals. - -The following printing ASCII characters have special meaning as part of other tokens or are otherwise significant to the lexical analyzer: - -``` -' " # \ -``` - -The following printing ASCII characters are not used in KCL. Their occurrence outside string literals and comments is an unconditional error: - -``` -? ` -``` - -## Reference - -Since the lexical conventions of KCL is very similar to that of Python, we use the following document as the reference when writing this chapter. - -- [https://docs.python.org/3/reference/lexical_analysis.html](https://docs.python.org/3/reference/lexical_analysis.html) diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/lang/spec/modules.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/lang/spec/modules.md deleted file mode 100644 index 4c9bcbdf..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/lang/spec/modules.md +++ /dev/null @@ -1,618 +0,0 @@ ---- -title: "Modules" -linkTitle: "Modules" -type: "docs" -weight: 2 -description: Modules ---- -## Modules and the Import System - -KCL code is organized in **modules**. For code in one module to access the code defined in another module, a process called **importing** must be used. - -Importing is undertaken at compile-time in KCL. The advantage is to have static checking enabled. - -A regular KCL module is a file on the file system. It is required to have a `.k` suffix. - -## Packages - -To help manage modules and provide a naming hierarchy, KCL has the concept of packages. In KCL, a package maps to exactly a file system directory, and a regular module maps to a file. - -Files directly under a package are considered parts of the package, instead of individual regular modules. - -Packages can have sub-packages. - -Packages are special modules: - -- All packages in KCL are modules. -- A single-file module can never be a package. - -All modules have a name. - -Sub package names are separated from their parent package name by dots. - -To summary, a regular KCL module is a `.k` file, and a package is a directory on the file system. All `.k` files directly under the directory are included in the package, other files are ignored. If the directory has subdirectories, they become sub-packages as long as there are `.k` files underneath. - -### Intra-Package Name Space Sharing - -Inside a package, all `.k` files are considered parts of the package, instead of regular modules. Code in these files share a single name space and can access names defined in other files, without explicitly granted. - -### Package Initialization - -A package can have the initialization code. The code must exist in only one of the `.k` files under this package. The interpreter guarantees that the initialization code is executed after all definitions. - -## Searching - -The searching begins when an `import` statement is used to import a module. - -### Module Cache - -In KCL, only standard system modules are cached. When a cached module is imported, the cached version is used. In other words, KCL runtime would not create another copy of the standard system module in memory. - -However, other modules are uncached. Importing a module multiple time would create multiple instances of the module. - -### Module Names - -An `import` statement specifies the name of the module to import. The syntax is: - -``` -import -``` - -The rule to search with the module name is very simple: - -- **Step 1**: Searches the module name from the **standard system modules**, then **plugins modules**. - - See **standard system modules** and **plugins modules** for more details. If matched, the module is imported. Otherwise, continue to **Step 2**. -- **Step 2**. Whether a module name starts with a `.` is checked. If yes, the name is a so-called relative pathname, and we go to **Step 5**. Otherwise, continue to **Step 3**. -- **Step 3**: If the module name does not start with any `.`, then the compiler searches the nearest `root path` directory from this directory to the parent, and find the module according to the name just from the `root path`. If no `root path` is found, find the module according to the name from the folder the `.k` file including this `import` statement exists. - - **root path**: the directory contains a `kcl.mod` file. If matched, the module is imported. Otherwise, continue to **Step 4**. -- **Step 4**: Then the compiler checks if the name is the name of any library module that requires explicit loading. If matched, the library module is imported. Otherwise, continue to **Step 6**. -- **Step 5**. For relative importing, find the module according to the name from the folder the `.k` file including this `import` statement exists. Interpret leading dots using the following rule: -- One dot: Ignore. -- Tow or more dots: Suppose there are `n` leading dots, then the searching starts at `n - 1` levels above this folder. If matched, the module is imported. Otherwise, continue to **Step 6**. -- **Step 6**. Module not found, report an error. - -Do case-sensitive search when the operating system allows. If case-sensitive search is not allowed, search directories before regular files. - -In KCL, the `from <> import <>` is unsupported, and relative import is performed with the `import <>` syntax. - -### Uniqueness of Module - -Each module has a unique location path in its scope, so that a module or package could be located with a unique location path, such as `a.b.c`. - -Searching by location path should be supported by the kcl compiler, which needs to provide corresponding searching features through the command line and api form. - -## Standard System Modules - -KCL supports a few standard system modules. The following is the full list of these standard system modules: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ModuleMember
datetimetoday
now
ticks
date
mathceil
exp
expm1
factorial
floor
gcd
isfinite
isinf
isnan
log
log1p
log2
log10
modf
pow
sqrt
regexreplace
match
compile
findall
search
split
unitsn
u
m
k
K
M
G
T
P
Ki
Mi
Gi
Ti
Pi
to_n
to_u
to_m
to_K
to_M
to_G
to_T
to_P
to_Ki
to_Mi
to_Gi
to_Ti
to_Pi
jsonencode
decode
dump_to_file
yamlencode
decode
dump_to_file
netsplit_host_port
join_host_port
fqdn
parse_IP
to_IP4
to_IP16
IP_string
is_IPv4
is_IP
is_loopback_IP
is_multicast_IP
is_interface_local_multicast_IP
is_link_local_multicast_IP
is_link_local_unicast_IP
is_global_unicast_IP
is_unspecified_IP
base64encode
decode
cryptomd5
sha1
sha224
sha256
sha384
sha512
- -- datetime - - ticks() -> float - Return the current time in seconds since the Epoch. Fractions of a second may be present if the system clock provides them. - - date() -> str - return the `%Y-%m-%d %H:%M:%S` format date. - - now() -> str - return the local time. e.g. `'Sat Jun 06 16:26:11 1998'` - - today() -> str - return the `%Y-%m-%d %H:%M:%S.%{ticks}` format date. -- math - - ceil(x) -> int - Return the ceiling of x as an Integral. This is the smallest integer >= x. - - factorial(x) -> int - Return x!. Raise a error if x is negative or non-integral. - - floor(x) -> int - Return the floor of x as an Integral. This is the largest integer <= x. - - gcd(a: int, b: int) -> int - Return the greatest common divisor of x and y - - isfinite(x) -> bool - Return True if x is neither an infinity nor a NaN, and False otherwise. - - isinf(x) -> bool - Return True if x is a positive or negative infinity, and False otherwise. - - isnan(x) -> bool - Return True if x is a NaN (not a number), and False otherwise. - - modf(x) -> Listfloat, float] - Return the fractional and integer parts of x. Both results carry the sign of x and are floats. - - exp(x) -> float - Return e raised to the power of x. - - expm1(x) -> float - Return exp(x)-1. This function avoids the loss of precision involved in the direct evaluation of exp(x)-1 for small x. - - log(x) -> float - Return the logarithm of x to the base e. - - log1p(x) -> float - Return the natural logarithm of 1+x (base e). The result is computed in a way which is accurate for x near zero. - - log2(x) -> float - Return the base 2 logarithm of x. - - log10(x) -> float - Return the base 10 logarithm of x. - - pow(x, y) -> float - Return x**y (x to the power of y). - - sqrt(x) -> float - Return the square root of x. -- regex - - replace(string: str, pattern: str, replace: str, count=0) -> str - Return the string obtained by replacing the leftmost non-overlapping occurrences of the pattern in string by the replacement. - - match(string: str, pattern: str) -> bool - Try to apply the pattern at the start of the string, returning a bool value True if any match was found, or False if no match was found. - - compile(pattern: str) -> bool - Compile a regular expression pattern, returning a bool value denoting whether the pattern is valid. - - findall(string: str, pattern: str) -> List[str] - Return a list of all non-overlapping matches in the string. - - search(string: str, pattern: str) -> bool - Scan through string looking for a match to the pattern, returning a bool value True if any match was found, or False if no match was found. - - split(string: str, pattern: str, maxsplit=0) -> List[str] - Scan through string looking for a match to the pattern, returning a Match object, or None if no match was found. -- units - - Unit constants - - Fixed point: `n`, `u`, `m`, `k`, `K`, `G`, `T` and `P`. - - Power of 2: `Ki`, `Mi`, `Gi`, `Ti` and `Pi`. - - Functions - - to_n(num: int) -> str - Int literal to string with `n` suffix - - to_u(num: int) -> str - Int literal to string with `u` suffix - - to_m(num: int) -> str - Int literal to string with `m` suffix - - to_K(num: int) -> str - Int literal to string with `K` suffix - - to_M(num: int) -> str - Int literal to string with `M` suffix - - to_G(num: int) -> str - Int literal to string with `G` suffix - - to_T(num: int) -> str - Int literal to string with `T` suffix - - to_P(num: int) -> str - Int literal to string with `P` suffix - - to_Ki(num: int) -> str - Int literal to string with `Ki` suffix - - to_Mi(num: int) -> str - Int literal to string with `Mi` suffix - - to_Gi(num: int) -> str - Int literal to string with `Gi` suffix - - to_Ti(num: int) -> str - Int literal to string with `Ti` suffix - - to_Pi(num: int) -> str - Int literal to string with `Pi` suffix -- json - - encode(data: any, sort_keys: bool = False, indent: int = None, ignore_private: bool = False, ignore_none: bool = False) -> str - Serialize a KCL object `data` to a JSON formatted str. - - decode(value: str) -> any - Deserialize `value` (a string instance containing a JSON document) to a KCL object. - - dump_to_file(data: any, filename: str, ignore_private: bool = False, ignore_none: bool = False) -> None - Serialize a KCL object `data` to a JSON formatted str and write it into the file `filename`. -- yaml - - encode(data: any, sort_keys: bool = False, ignore_private: bool = False, ignore_none: bool = False) -> str - Serialize a KCL object `data` to a YAML formatted str. - - decode(value: str) -> any - Deserialize `value` (a string instance containing a YAML document) to a KCL object. - - dump_to_file(data: any, filename: str, ignore_private: bool = False, ignore_none: bool = False) -> None - Serialize a KCL object `data` to a YAML formatted str and write it into the file `filename`. -- net - - split_host_port(ip_end_point: str) -> List[str] - Split the 'host' and 'port' from the ip end point. - - join_host_port(host, port) -> str - Merge the 'host' and 'port'. - - fqdn(name: str = '') -> str - Return Fully Qualified Domain Name (FQDN). - - parse_IP(ip) -> str - Parse 'ip' to a real IP address - - to_IP4(ip) -> str - Get the IP4 form of 'ip'. - - to_IP16(ip) -> int - Get the IP16 form of 'ip'. - - IP_string(ip: str | int) -> str - Get the IP string. - - is_IPv4(ip: str) -> bool - Whether 'ip' is a IPv4 one. - - is_IP(ip: str) -> bool - Whether ip is a valid ip address. - - is_loopback_IP(ip: str) -> bool - Whether 'ip' is a loopback one. - - is_multicast_IP(ip: str) -> bool - Whether 'ip' is a multicast one. - - is_interface_local_multicast_IP(ip: str) -> bool - Whether 'ip' is a interface, local and multicast one. - - is_link_local_multicast_IP(ip: str) -> bool - Whether 'ip' is a link local and multicast one. - - is_link_local_unicast_IP(ip: str) -> bool - Whether 'ip' is a link local and unicast one. - - is_global_unicast_IP(ip: str) -> bool - Whether 'ip' is a global and unicast one. - - is_unspecified_IP(ip: str) -> bool - Whether 'ip' is a unspecified one. -- base64 - - encode(value: str, encoding: str = "utf-8") -> str - Encode the string `value` using the codec registered for encoding. - - decode(value: str, encoding: str = "utf-8") -> str - Decode the string `value` using the codec registered for encoding. -- crypto - - md5(value: str, encoding: str = "utf-8") -> str - Encrypt the string `value` using `MD5` and the codec registered for encoding. - - sha1(value: str, encoding: str = "utf-8") -> str - Encrypt the string `value` using `SHA1` and the codec registered for encoding. - - sha224(value: str, encoding: str = "utf-8") -> str - Encrypt the string `value` using `SHA224` and the codec registered for encoding. - - sha256(value: str, encoding: str = "utf-8") -> str - Encrypt the string `value` using `SHA256` and the codec registered for encoding. - - sha384(value: str, encoding: str = "utf-8") -> str - Encrypt the string `value` using `SHA384` and the codec registered for encoding. - - sha512(value: str, encoding: str = "utf-8") -> str - Encrypt the string `value` using `SHA512` and the codec registered for encoding. - -### The Built-in System Module - -KCL provides a list of built-in system modules, which are loaded automatically and can be directly used without providing any module name. For example, `print` is a widely used built-in module. - -The following is the full list of these built-in system modules: - -- print() - - The print function. -- multiplyof(a, b) - - Check if the modular result of a and b is 0 -- isunique(inval) - - Check if a list has duplicated elements -- len(inval) - Return the length of a value -- abs(x) - Return the absolute value of the argument. -- all(iterable) - Return True if bool(x) is True for all values x in the iterable. If the iterable is empty, return True. -- any(iterable) - Return True if bool(x) is True for any x in the iterable. If the iterable is empty, return False. -- bin(number) - Return the binary representation of an integer. -- hex(number) - Return the hexadecimal representation of an integer. -- oct(number) - Return the octal representation of an integer. -- ord(c) -> int - Return the Unicode code point for a one-character string. -- sorted(iterable) - Return a new list containing all items from the iterable in ascending order. A custom key function can be supplied to customize the sort order, and the reverse flag can be set to request the result in descending order. -- range(start, end, step=1) - Return the range of a value with start, end and step parameter. -- min(iterable) - With a single iterable argument, return its smallest item. The default keyword-only argument specifies an object to return if the provided iterable is empty. With two or more arguments, return the smallest argument. -- max(iterable) - With a single iterable argument, return its biggest item. The default keyword-only argument specifies an object to return if the provided iterable is empty. With two or more arguments, return the largest argument. -- sum(iterable, start) - Return the sum of a 'start' value (default: 0) plus an iterable of numbers. When the iterable is empty, return the start value. This function is intended specifically for use with numeric values and may reject non-numeric types. -- pow(x, y, z) - Equivalent to `x**y` (with two arguments) or `x**y % z` (with three arguments). Some types, such as ints, are able to use a more efficient algorithm when invoked using the three argument form. -- round(number, ndigits) - Round a number to a given precision in decimal digits. The return value is an integer if ndigits is omitted or None. Otherwise the return value has the same type as the number. ndigits may be negative. -- typeof(x: any, *, full_name: bool = False) -> str - Return the type of the value 'x' at runtime. When the 'full_name' is 'True', return the full package type name such as `pkg.schema`. - -### Plugin Modules - -KCL compiler needs to provide the ability to dynamically expand and load plugin modules without modifying the compiler itself. KCL compiler needs to support flexible pluggable module extension mechanism, so that KCL users can use more abundant built-in function capabilities to simplify writing. - -KCL compiler needs to ensure the stability and safety of the expansion mechanism, without affecting the core of the compiler. - -Searching extended plugin module is performed after the standard system module. The standard system module has a higher priority in naming. If it exists a standard or built-in system module with the same name, the extended plugin module will be ignored. - -Importing and using the extended plugin module should be consistent with the standard or built-in system module. - -### Replacing Standard System Modules - -Replacing standard system modules is not allowed. - -## Examples - -We show more module features through an example. - -Suppose we have the following directories and files: - -``` - . - ├── mod1.k - ├── mod2.k - ├── pkg1 - │   ├── def1.k - │   ├── def2.k - │   └── def3init.k - └── pkg2 - ├── file2.k - └── subpkg3 - └── file3.k -``` - -From the structure we can see that `pkg1` and `pkg2` are two packages, `subpkg3` is a subpackage of `pkg2`, and `mod1.k` and `mod2.k` are regular modules. - -### Importing a Standard System Module - -The following statement can import the standard system module `math` - -```python -import math -``` - -This is the only way to import a standard system module. After importing a standard system module, functions, variables and schemas defined in it can be used. For example, the following statement uses the `log10` function -defined in `math` - -```python -a = math.log10(100) # a is 2 after computation. -``` - -### Importing a Regular Module - -In `mod1.k`, we can import `mod2` using one of the following syntaxes. - -```python -import mod2 -``` - -```python -import .mod2 -``` - -The difference is that in the first syntax, the KCL compiler will first try to check if `mod2` matches any of the standard system modules' name. Since it does not match any standard system module's name, the statement will check the directory where `mod1.k` resists in, like what the second statement does. - -Suppose in `mod2.k` there is a definition of a variable:: - -```python -a = 100 -``` - -After importing `mod2`, we can access `a` in `mod1.k` using the following syntax - -```python -b = mod2.a -``` - -### Importing a Package - -In `mod1.k`, we can import `pkg1` using one of the following syntaxes. - -```python -import pkg1 -``` - -```python -import .pkg1 -``` - -The difference is that in the first syntax, the KCL compiler will first try to check if `pkg1` matches any of the standard system modules' name. Since it does not match any standard system module's name, the statement will check the directory where `mod1.k` resists in, like what the second statement does. - -We can use similar statements to import `pkg2`. Note that importing `pkg2` will not import `subpkg3`. - -The name of the package is the name of the imported module. - -Suppose in `file2.k` that is inside `pkg2` there is a definition to variable `foo` - -```python -foo = 100 -``` - -This variable can be used in `mod1.k` after importing `pkg2` like the following - -```python -bar = pkg2.foo -``` - -### Importing a Subpackage - -To import `subpkg3` from `mod1.k`, one of the following statements can be used. - -```python -import pkg2.subpkg3 -``` - -```python -import .pkg2.subpkg3 -``` - -The behaviors of these statements are identical. - -The name of the subpackage is the name of the imported module. - -Suppose in `file3.k` that is inside `subpkg3` there is a definition to variable `foo` - -```python -foo = 100 -``` - -This variable can be used in `mod1.k` after importing `subpkg3` like the following - -```python -bar = subpkg3.foo -``` - -### Relative Importing - -Relative importing is useful when there is code trying to import modules that does not exist recursively inside the current directory. - -For example, the following statements, if written in `file3.k`, can be used to import `pkg2`, `pkg1` and `mod2` respectively. - -```python -import ...pkg2 # Go two levels up then import pkg2 -import ...pkg1 # Go two levels up then import pkg1 -import ...mod2 # Go two levels up then import mod2 -``` - -### Importing from a Root Path - -Suppose we have a `kcl.mod` file in the directory to mark it as a root path, then we have the following files: - -``` - . - |── kcl.mod - ├── mod1.k - ├── mod2.k - ├── pkg1 - │   ├── def1.k - │   ├── def2.k - │   └── def3init.k - └── pkg2 - ├── file2.k - └── subpkg3 - └── file3.k -``` - -In `pkg1` `def1.k`, we can import `pkg2.subpkg3` `file3` using the following syntaxes. - -```python -import pkg2.subpkg3.file3 -``` - -Importing from the root path is very convenient when the code is trying to import modules from a directory needs to look up multiple directories above this directory. At also, it is helpful to organize a large number of files in a root directory. - -### Importing a Module Inside a Package - -Note that `subpkg3` is only implemented with one file `file3.k`. The file can be regarded as a regular module and imported directly. - -In `mod1.k`, the importing statement would be:: - -```python -import pkg2.subpkg3.file3 -``` - -Different from importing `subpkg3`, now the name of the module is `file3`. We can access the variable `foo` defined in this module with the following -statement - -```python -bar = file3.foo -``` - -### Precedence of Importing - -When an import statement specifies a package to import, the virtual machine first looks for a directory named according to the import statement in the file system. - -If such a directory is not found, the virtual machine looks for a single file module. - -For example, when the statement `import a.b.c` appears, the virtual machine first looks for the directory `a/b/c` from the directory of the current file. If `a/b/c` is not found, the virtual machine looks for a file named `a/b/c.k`. If the file is also absent, an error is reported. - -### Package Implemented with Multiple Files - -Package `pkg1` is implemented with multiple KCL files. - -Multiple files can be used to define variables, schemas and functions, and they can access names defined in other files of this package. - -For example, suppose `def1.k` defines a variable `foo`, `def2.k` defines `bar`, and `def3init.k` defines a variable `baz`, when `pkg1` is imported by `mod1.k`, all these variable can be used - -```python -import pkg1 -a = pkg1.foo + pkg1.bar + pkg1.baz -``` - -Inside a module, names defined in a file can be accessed in another file without further importing. For example, suppose `bar` in `def2.k` would invoke `foo` defined in `def1.k`, it can directly use `foo` like the following - -```python -bar = foo + 1 -``` diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/lang/spec/schema.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/lang/spec/schema.md deleted file mode 100644 index 43a67761..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/lang/spec/schema.md +++ /dev/null @@ -1,915 +0,0 @@ ---- -title: "Schema" -linkTitle: "Schema" -type: "docs" -weight: 2 -description: Schema ---- -## Syntax - -### Schema Definition - -A schema is a language element to define a type of configuration data. - -To define a schema, the syntax is the following: - -``` -schema_stmt: [decorators] "schema" ["relaxed"] identifier ["[" [arguments] "]"] ["(" operand_name ")"] ":" NEWLINE [schema_body] -schema_body: _INDENT (string NEWLINE)* [mixin_stmt] (schema_attribute_stmt | schema_index_signature | statement)* [check_block] _DEDENT -``` - -Attributes could be defined in a schema, the syntax is the following: - -``` -schema_attribute_stmt: [decorators] identifier ["?"] ":" type [(ASSIGN | augassign) test] NEWLINE -``` - -Index signature could be defined in a schema, the syntax is the following: - -``` -schema_index_signature: LEFT_BRACKETS [NAME COLON] [ELLIPSIS] basic_type RIGHT_BRACKETS COLON type [ASSIGN test] NEWLINE -``` - -Once defined, an attribute must have a valid type: - -``` -type: type_element ("|" type_element)* -type_element: schema_type | basic_type | list_type | dict_type -schema_type: operand_name -basic_type: "str" | "int" | "float" | "bool" -list_type: "[" (type)? "]" -dict_type: "{" (type)? COLON (type)? "}" -``` - -The followings are some basic examples: - -```python -# A person has a first name, a last name and an age. -schema person: - firstName: str - lastName: str - # fullName is generated by firstName and lastName - fullName: str = firstName + ' ' + lastName - # The default value of age is 0 - age: int = 0 - -# An employee IS a person, and has some additional information. -schema employee(person): - bankCard: int - nationality: str - -# A company has a name and many employees. -schema company: - name: str - employees: [employee] -``` - -More complex schema definitions will be elaborated after other concepts are -introduced. - -#### Optional Attribute - -Each attribute **must** be assigned with a not-None value as a schema instance unless it is modified by a question mark as an optional attribute. - -Examples: - -``` -schema employee(person): - bankCard?: int # bankCard is an optional attribute - nationality?: str # # nationality is an optional attribute -``` - -When there is an inheritance relationship: - -+ If the attribute is optional in the base schema, it could be optional or required in the sub-schema. -+ If the attribute is required in the base schema, it must be required in the sub-schema. - -### Configuration Definition - -A configuration is structured data stored in a dict-like structure. In KCL, we have introduced -the configuration definition syntax as a variant of dict definition syntax. - -``` -schema_expr: operand_name ("(" [arguments] ")")? dict_expr -``` - -As can be seen, apart from having an identifier as schema type, a configuration definition -is just an ordinary dict definition, and each key in the dict matches an attribute in the schema. - -To simplify configuration, schema attribute key is much easier to define as: - -- schema attribute key can be unquoted. When the attribute key has the same name as a variable, it must be quoted as a normal dict to avoid naming conflict. -- schema attribute key can be defined nested through `select expression`, such as `a.b.c`. - -The comma at the end of each line can be omitted. - -For example, we can define a `person` named `John Doe` using the following statement: - -```python -johnDoe = person { - # In the result, 'lastName' appears later than 'firstName', according the schema - lastName = 'Doe' - firstName = 'John' - # If we don't specify the 'age', the default value 0 is used. - # 'age': 20 -} -``` - -The result is a **dict**: - -```python -{ - 'firstName': 'John' - 'lastName': 'Doe' - 'age': 0 -} -``` - -Compared to the ordinary dict definition, a configuration definition has the following features: - -- Each attribute defined in the schema (or one of the schemas) could be configured, and config data has higher priority than the default value. -- When an attribute defined in the schema (or one of the schemas) is not configured in the configuration definition statement, and it has a default value, the default value is used. -- Unless the schema (or one of the schemas) is a **relaxed schema**, no more attributes can be defined. -- The quotation marks of dict key can be omitted. -- The comma at the end of each line can be omitted. -- Cases of **inheritance** will be discussed separately. - -For attributes of list, dict and schema types, the config data is added by **union** instead of reassignment. For instance: - -```python -schema Name: - firstName: str - lastName: str - -schema Person: - name: Name = { - firstNam = "John" - lastName = "default" - } - -JohnDoe = Person { - name.lastName = "Doe" -} -``` - -The result is a **dict**: - -```python -{ - 'firstName': 'John' - 'lastName': 'Doe' -} -``` - -#### Attribute Identify - -Each key identifier in the configuration expr identifies an element or a range of elements in a schema. The key identifier may consist of multiple attribute identifiers, and each attribute may be a basic type value, a list, a dict or schema. For example, the key identifier 'a.b.c' identifies the element 'c' in the 'A' schema: - -``` - -schema C: - c: int - -schema B: - b: C - -schema A: - a: B - -A { - a.b.c: 5 -} -``` - -To make the key identifier usage rules as clear as possible, we define the way of identifying with complex data types as follows. - -##### List - -Suppose we have a list attribute a. - -Identify an element in a: - -``` -a[0] # the first element -a[3] # the 4th element -a[-1] # the last element -a[-2] # the penultimate element -``` - -Identify a range of elements in the list: - -``` -a[2:5] # a slice of the third, 4th, and 5th elements -a[:5] # a slice of the first to 5th elements -``` - -#### Attribute Operator - -Once we identified the element(s), we can declare operation on it. It follows the pattern of `identifier op E`. - -#### Union - -Pattern: `identifier : E` - -The value of the expression `E` will be unioned into the element value. - -Examples: - -``` -a = A { - # union {d:4} into the element b.c, suppose c is a schema with an int type attribute d. - b.c : { - d : 4 - } -} -``` - -See 'union' in `expressions` spec for more details. - -#### Override - -Pattern: `identifier = E` - -The value of the expression `E` will override the element value. - -Examples: - -``` -a = A { - # override {c:4} to the element b, suppose b is a schema with an int type attribute c. - b = { - c: 4 - } -} -``` - -Unlike union, the override operation will reassign the element with a brand new value. -For basic type value, `union` and `override` have equivalent effects. - -Note: - -+ Especially, we can "delete" its content by overriding the element to `Undefined`, such as `{ a = Undefined }`. - -#### Insert - -Pattern: `identifier += E` -Insert only works for list type `identifier`. - -List `E` will be inserted just after the specified index of the list `identifier`, and the following elements after the index will be automatically shifted. - -Examples: - -``` -a = A { - # insert {c:4} to the `index=2` position(just after index=1), suppose b is a list of schema with an int type attribute c. - b[1] += { - c: 4 - } -} -``` - -If no index is specified, the last index will be used. - -The type of 'E' must be compatible with the type of list. See `types` for more details. - -#### Index Signature - -Index signatures can be defined in the KCL schema, and it means that the key-value constraints of the index signature can be used to construct a dict with the schema type, or additional checks can be added to the relaxed schema attributes to enhance the KCL type and semantic checks. - -- Use the form `[{attr_alias}: {key_type}]: {value_type}` to define an index signature in the schema, and `{attr_alias}` can be omitted. - -```python -schema Map: - """ - Map is a relaxed schema with a key of str type and a value of str type - """ - [str]: str # `{attr_alias}` can be omitted. - -data = Map { - key1 = "value1" - key2 = "value2" -} -``` - -- Mandatory all attributes of the schema key and value types - -```python -schema Person: - name: str - age: int # error, conflicts with the index signature definition `[str]: str` - [str]: str # The values of all attributes of the schema can only be strings -``` - -- Mandatory all attribute key and value types are defined in the schema, which is equivalent to restricting all attribute types except the relaxed attributes. - -```python -schema Person: - name: str - age: int - [...str]: str # Except for the `name` and `age` attributes, the key type of all other attributes of the schema must be `str`, and the value type must also be `str`. -``` - -- Define the index signature attribute alias and use it with the check block. - -```python -schema Data: - [dataName: str]: str - check: - dataName in ["Alice", "Bob", "John"] - -data = Data { - Alice = "10" - Bob = "12" - Jonn = "8" # error Jonn not in ["Alice", "Bob", "John"] -} -``` - -```python -import regex - -schema DataMap: - [attr: str]: str - check: - regex.match(attr, r'[-._a-zA-Z0-9]+') - -data = DataMap { - key1 = "value1" - "foo.bar" = "value2" # check error -} -``` - -### Schema Context - -The schema definition space can be regarded as a separate function context. - -Init statement could be defined inside the schema, the syntax is the following: - -``` -statement: small_stmt NEWLINE | if_stmt -``` - -The following is an example: - -```python -schema Person: - firstName: str = "John" - lastName: str - # fullName is generated by firstName and lastName in a separate init statement - fullName: str = firstName + ' ' + lastName - -JohnDoe = Person { - lastName = "Doe" -} -``` - -The result is a **dict**: - -```python -{ - 'firstName': 'John' - 'lastName': 'Doe' - 'fullName': 'John Doe' -} -``` - -If statement, expr statement and assert statement are supported as a schema init -statement. See more in statement spec. - -+ The attributes must be defined first, including inherited ones, and then used in the init statement. -+ Statements in the schema context will be executed sequentially. -+ The value of attributes referenced in the init statement will be evaluated at runtime. - See the **Configuration Definition** section for the assignment rules of non-referenced attributes. For example, `"fullName"` in Person is generated by `"firstName"` and `"lastName"` evaluated at runtime, in which firstName is 'John', and lastName is "Doe". - -The immutability of attributes in the schema context follows the same rules as the immutability of global variables: - -```python -schema Person: - age: int = 1 # Immutable attribute - _name: str = "Alice" # Mutable attribute - - age = 10 # Error - _name = "Bob" # Ok -``` - -#### Arguments - -Schema context can also have arguments. The following is an example. - -```python -schema Person[separator]: - firstName: str = "John" - lastName: str - fullName: str = firstName + separator + lastName - -JohnDoe = Person('_') { - lastName = "Doe" -} -``` - -The example is similar to the previous one, except that the separator character used in -the `"fullName"` member is passed in as an argument. The way to perform a schema generation -when the schema has an initialization function with arguments is demonstrated in the code. - -### Check Block - -Optionally, a check block can be added to a schema definition to allow -additional checking to be performed. - -The syntax is the following: - -``` -check_block: "check" ":" NEWLINE _INDENT check_expr+ _DEDENT -check_expr: test (IF test)? [":" primary_expr] NEWLINE -``` - -In terms of grammatical definition, a check block consists of a list of conditional expressions. The following is an example: - -```python -schema employee(person): - bankCard: int - gender: str - - check: - len(str(bankCard)) == 16 - gender in ['male', 'female'], "The gender {} is unsupported".format(gender) -``` - -The ability of KCL check expressions covers the abilities that can be defined by OpenAPI spec and is aligned with the ability of logical expressions. We consider further aligning the syntax with `CEL` spec. -Whether to support `lambda expressions` is still under discussion. - -Summary: - -- A check block consists of one or more logical **expressions**. -- When defining a configuration, the expressions in the check block are evaluated - in any order. If any of the expression is `False`, an error is reported. -- A custom error message can be provided after an expression. - -### Specifying Types - -Optionally, the type of any member of a schema can be specified. As previous examples have shown. - -A member can be of a basic type, such as a string (`str`), a floating-point number (`float`), a fixed-point number (`int`) or a boolean number (`bool`). - -A member can also be of a dictionary generated from another schema. In such a case, the name of the other schema is used as the type name. - -A member can also be a list or an ordinary dict: - -- A list with unspecified type of elements is `[]`. -- A list with elements of type `t` is `[t]`. Here `t` is another type. -- A dict with keys of type `kt` and values of type `vt` is `{kt:vt}`. -- `kt`, `vt` or both of them can be missing, like a list with unspecified type of elements. - -The followings are some more examples: - -- A list of lists of strings: `[[str]]`. -- A dict of keys with the type string and unspecified value types: `{str:}`. - -A member can be a **union type** defined by `|`, such as `a | b`, which means the type of the member could be a or b. - -A union type can include types of `int`, `str`, `float`, `bool`, `list` and `dict` and support type nesting e.g. `{str:str|int}` and `[[int|str]|str|float]`, etc. - -Examples: - -```python -schema x: - p: int | str # p could be defined as a int or string -``` - -### Immutability - -KCL pursues strict immutability of schema attributes. It's generally followed the rules: - -- For the attributes of the basic type, such as string, int and float, it's allowed to be reassigned - through the init statement in **schema context** or by the **configuration definition**. -- For the attributes of list, dict and schema type, it's allowed to be reassigned only by the init statement in **schema context**. The content of it is allowed to be operated in **schema context** or by the **configuration definition**. -- Any other attempt to reassign or modify schema attribute will report an error. - -#### Assign by Value - -When using a schema variable to assign the value to another variable, we can only get a deep copy of its value, not a pointer or reference. That is, modifying the assigned value will not change the assigned schema variable. - -```python -schema Person: - name: str - -person = { - name = "Alice" -} -personCopy = person # 'personCopy' is a deep copy of 'person' and modifying 'personCopy' will not affect 'person' -``` - -### Union Operator - -For list, dict and schema, we can union delta to existing data. For example: - -```python -schema Name: - firstName: str - lastName: str - -schema Person: - name: Name = { - firstName = "John" - } - - # union a schema and a dict - name: Name { - lastName = "Doe" - } - -person = Person {} -``` - -The result is a **dict**: - -```python -{ - 'person': { - 'name': { - 'firstName': 'Jhon', - 'lastName': 'Doe' - } - } -} -``` - -### Other Operators - -Except for `assignment` and `union assignment`, it's not support other operators on schema type data. -Report an error if trying to use other operators on schema type data. - -### Deprecated - -The schema attribute can be marked as deprecated once it's considered invalid. - -```python -schema Person: - @deprecated(version="1.1.0", reason="use fullName instead", strict=True) - name: str - ... # Omitted contents - -person = Person { - # report an error on configing a deprecated attribute - name = "name" -} -``` - -- Deprecated attributes cannot be configured under any circumstances. Report an error or warning once the attribute is assigned. -- Define the expired version of the attribute through **version**, and define the reason for the attribute expired through **reason**. -- When strict is true, the attribute assignment will cause an error, otherwise it will report a warning and ignore the attribute assignment. - -### Composition - -The composition is a common way to define complex structures. KCL provides simplified means for the configuration definition of combined structures. - -Assuming we have the following schemas, which is defined by a combination of multiple schemas. - -```python -schema Name: - firstName: str - lastName: str - -schema Person: - name: Name - age: int - -schema Group: - name: str - persons: [Person] -``` - -To config a group: - -```python -group = Group { - name = "group" - persons = [{ - name = { - firstName = "John" - lastName = "Doe" - } - age = 24 - }] -} -``` - -- Top-level schema name is required to config a schema. -- The schema of the attributes in the schema can be omitted. - -Multi-level nested schemas will make the configuration verbose. KCL supports defining attributes in the schema through `selector expression`. The selector form is **x.y.z**, see the following example: - -```python -group = Group { - name = "group" - persons = [{ - name.firstName = "John" - name.lastName = "Doe" - age = 24 - }] -} -``` - -- Selector can be used to represent attribute in a schema - -### Inheritance - -Inheritance is an effective means to define a hierarchical structure definition, and KCL supports limited **single inheritance** of the schema. - -```python -schema Person: - firstName: str - lastName: str - -# schema Scholar inherits schema Person -schema Scholar(Person): - fullName: str = firstName + '_' + lastName - subject: str - -JohnDoe = Scholar { - firstName = "John", - lastName = "Doe", - subject = "CS" -} -``` - -The result is a **dict**: - -```python -{ - 'JohnDoe': { - 'firstName': 'John' - 'lastName': 'Doe' - 'fullName': 'John Doe' - 'subject': 'CS' - } -} -``` - -Each schema can be treated as a separated function context. Statements, including attribute statements and init statements, in the context of schemas will be evaluated from base schema to subschema according to the inheritance order. Each schema context is evaluated only once sequentially. The same goes for expressions in the check block. In the example, firstName and lastName are configured in the context of Person schema, and fullName is formed by splicing firstName and lastName in the context of Scholar schema. - -The default value can be modified in each schema. Value defined in **Configuration Definition** has a higher priority than the default value. Attributes with default values in any schema context ​​will eventually be unioned by configuration data. References to attributes in the schema context statements will use the value with unioned configuration data on evaluating at runtime. For example: - -```python -schema a: - x = 1 - y = x * 2 - -schema b(a): - x = 2 - -v = a { - x = 3 -} - -``` - -The result is a **dict**: - -```python -{ - 'v': { - 'x': 3 - 'y': 6 - } -} -``` - -Notes: - -- Report an error if inheriting more than one base schema. -- The type of the base schema attribute cannot be modified in the subschema. -- Report an error if inheriting a **mixin**. -- Report an error when a circular dependency occurs. - -Limitations: - -Since inheritance will derive some complex demands, we are cautious about these complex demands. There are still some restrictions on inheritance, and it's still under discussion. - -- KCL provides limited and deterministic polymorphism support, more complex and flexible polymorphism support, such as **self**, **super** keywords, are temporarily not included in the schema definition. -- Currently, KCL only supports the polymorphism of the inherited attributes of the schema, and does not support the polymorphism of the expressions in the check block. -- For the case of multiple levels of schema inheritance, the schema arguments can only be passed to the last level of sub-schema. - -### Mixin - -In addition to **composition** and **inheritance**, KCL supports declarative reuse of schema code through the **mixin** mechanism. To use a mixin, we only need to declare the **mixin** in the schema definition. - -The **mixin** syntax is the following: - -``` -//////////// mixin_stmt //////////// -mixin_stmt: "mixin" "[" [mixins | multiline_mixins] "]" "\n" -multiline_mixins: "\n" _INDENT mixins "\n" _DEDENT -mixins: operand_name ("," ("\n" mixins | operand_name))* -``` - -Here is a simple example: - -```python -schema Person: - mixin [FullNameMixin] - firstName: str = "default" - lastName: str - -schema FullNameMixin: - fullName: str = "{} {}".format(firstName, lastName) - -JohnDoe = Person { - firstName = "John" - lastName = "Doe" -} -``` - -The result is a **dict**: - -```python -{ - 'JohnDoe': { - 'firstName': 'John' - 'lastName': 'Doe' - 'fullName': 'John Doe' - } -} -``` - -Multiple mixins can be added to a single schema, and mixins context will be evaluated after the host schema context at runtime. In the inheritance scenario, the mixin context can be regarded as a part of the host schema context, and the overall evaluation of schema context order is not affected. - -Notes: - -- The name of **mixin** schema must end with 'Mixin', otherwise an error will be reported. -- The attributes referenced in the **mixin** must be defined in the **mixin** itself or host schema, otherwise an error will be reported. - -### Protocol - -In addition to schema, an additional type definition method `protocol` is provided in KCL, and its properties are as follows: - -- In a protocol, only attributes and their types can be defined, complex logic and check expressions cannot be written, and mixins cannot be used. -- A protocol can only inherit or refer to other protocols, but cannot inherit or refer to other schemas. - -We can use **protocol** to add an optional host type to the dynamically inserted **mixin**. - -The **mixin** can define its host type through the `for` keyword, and internally it will query the type corresponding to the attribute from the host type. - -```python -protocol DataProtocol: # A mixin host type - data: str - -mixin DataMixin for DataProtocol: # Using the `for` keyword to define a mixin host type - x: int = data # The type of `data` is `str`, which is from `data` of `DataProtocol` -``` - -In `DataMixin`, the `data` attribute is obtained according to the `DataProtocol` host type as `str` type, and then a type error will occur when the value is assigned to `x` of type `int`: - -```python -protocol DataProtocol: - data: str - -mixin DataMixin for DataProtocol: - x: int = data # Error: expect int, got str - x: str = data # Ok -``` - -Please note that the host type **protocol** can only be used for **mixin** definitions (the suffix name is `Mixin`), otherwise an error will be reported. - -```python -protocol DataProtocol: - data: str - -schema Data for DataProtocol: # Error: only schema mixin can inherit from protocol - x: str = data -``` - -### Schema Context Evaluation - -The schema definition is composed of attribute statements, configuration data, init statements, mixins, and checks. In a separate schema context, the evaluation top-down order is as follows: - -``` -|------------------------------------------| -| attribute defaulting | -|------------------------------------------| -| configuration union | -|------------------------------------------| -| attribute templating | -|------------------------------------------| -| init statements in declaration order | -|------------------------------------------| -| mixins in declaration order | -|------------------------------------------| -| check expressions in any order | -|------------------------------------------| -``` - -In the case of schema inheritance, each schema context is evaluated from the base schema in the order of inheritance, and each context is evaluated only once. -Suppose there are schemas a, b, and c, where c inherits b and b inherits a. Schema contexts will be evaluated in top-down order as: - -``` -|-----------------| -| schema a | -|-----------------| -| schema b | -|-----------------| -| schema c | -|-----------------| -``` - -### Members - -Built-in function and members of schema - -+ instances() - Return the list of existing instances of a schema. - -### Irrelevant Order Calculation - -The irrelevant order calculation in the schema indicates the reference relationship between the internal attributes of the schema. For example, when we declare an expression of the form `a = b + 1`, the calculation of the value of `a` depends on the calculation of the value of `b`. When the compiler calculate the value of `a` and the value of `a` depends on the value of `b`, the compiler will choose to first calculate the value of `b`, and then calculate the value of a according to the expression `a = b + 1`, which is slightly different from the calculation method of traditional procedural language the difference. - -Since the calculation of values in the schema is based on dependencies, just like a directed acyclic graph traverses each node in the graph according to the order of topological sorting, the order of declaration of attributes in the schema is not so important, so the feature is called the irrelevant order calculation. - -Please note that there can be no circular references between different schema attribute values. - -We can see this feature through the following examples. - -```python -schema Person: - name?: str - age: int = _age - - _age = 10 - - if name == "Son": - _age = 18 - -schema Son(Person): - name: str = "Son" - -person = Person {} -son = Son {} -``` - -The output is - -```yaml -person: - name: null - age: 10 -son: - name: Son - age: 18 -``` - -Besides, we can achieve KCL polymorphism such as - -```python -schema Person: - name?: str - _age: int = _age - - _age = 10 - if name == "Son": - _age = 18 - elif name == "SonConf": - _age = 24 - -schema Son(Person): - name: str = "Son" - -person = Person() {} -son = Son() { - name = "SonConf" -} -``` - -The output is - -```yaml -person: - name: null - age: 10 -son: - name: SonConf - age: 24 -``` - -More examples: - -```python -schema Fib: - n1: int = n - 1 - n2: int = n1 - 1 - n: int - value: int = _value - - if n <= 2: - _value = 1 - else: - _value = (Fib {n = n1}).value + (Fib {n = n2}).value - -fib8 = (Fib {n = 8}).value -``` - -The output is - -```yaml -fib8: 21 -``` - -As in the above examples, we can see that in the schema, we only need to simply specify the dependency between attributes, and the compiler will automatically calculate the value based on the dependency, which can help us save a lot of boilerplate code and reduce configuration difficulty of writing. diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/lang/spec/statements.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/lang/spec/statements.md deleted file mode 100644 index 75b84cd6..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/lang/spec/statements.md +++ /dev/null @@ -1,184 +0,0 @@ ---- -title: "Statements" -linkTitle: "Statements" -type: "docs" -weight: 2 -description: Statements ---- -## Syntax - -In KCL, statements consist of small statements and compound statements. The syntax is the following: - -``` -preamble_statement: preamble_small_stmt | preamble_compound_stmt -preamble_small_stmt: (small_stmt | import_stmt) NEWLINE -preamble_compound_stmt: compound_stmt | schema_stmt -statement: small_stmt NEWLINE | compound_stmt -compound_stmt: if_stmt -small_stmt: assign_stmt | expr_stmt | assert_stmt -``` - -The preamble statement is used to define the module level statements, consist of `statement`, `import_stmt`, and `schema_stmt`. The statement is used to define the block level statements, which are used in the `if` statement and `schema` statement. - -### Small Statements - -A small statement is comprised of a single logical line. Multiple statements in one-line are not allowed. - -#### Assignment Statements - -Generally, assign_stmt is divided into assignment and augmented assignment. The syntax is the following: - -``` -assign_stmt: target_primary ("=" target_primary)* "=" test | target_primary augassign test -augassign: "+=" | "-=" | "*=" | "**=" | "/=" | "//=" | "%=" | "&=" | "|=" | "^=" | "<<=" | ">>=" | "or" | "and" -target_primary: identifier | target_primary DOT identifier -``` - -An assignment statement has the form `lhs = rhs`. It evaluates the expression on the right-hand side then assigns its value (or values) to the variable (or variables) on the left-hand side. - -The **target_primary** on the left-hand side is an `identifier` or an `identifier` followed by select dots. - -Note: When using **target_primary** will cause collisions, use **primary_expr** as an alternative. - -Examples: - -```python -k = 1 -a.b = "a.b" -``` - -To keep it simple, the compound target is not supported as **target_primary**. - -The right value of an assignment statement is a conditional expression, which is discussed separately. - -An augmented assignment, which has the form `lhs op= rhs` updates the variable `lhs` by applying a binary arithmetic operator op (one of +, -, *, /, //, %, &, |, ^, <<, >>) to the previous value of `lhs` and the value of `rhs`. - -The **target_primary** on the left-hand side is the same as assignment statement. Examples: - -```python -_x -= 1 -_filename += ".k" -``` - -There is no concept of in-place modification in KCL. The `augassign` statement will modify a copy of the **target_primary** and assign the copy to **target_primary**. - -In particular, in KCL, the `|=` symbol represents the **union** operation, which is defined as follows: - -- The behavior of the **union** operation needs to be consistent with the behavior of the **configuration definition**. - -See **expressions** spec for more details of union operator in **Arithmetic Operations**. - -#### Expression Statements - -An expression statement evaluates an expression and discards its result. - -Syntax: - -``` -expr_stmt: expression -``` - -An expression statement supported in KCL is function invocation expression, which is discussed in **expression** spec. - -```python -print(k) # print a variable -``` - -#### Import Statements - -Import statements are used to **search** and **load** a module, and define a name or names in the local namespace for the scope where the import statement occurs. - -Syntax: - -``` -import_stmt: "import" dot_name ("as" NAME)? -dot_name: [leading_dots] identifier (DOT identifier)* -leading_dots: "."+ -``` - -Examples: - -```python -import math # import a built-in module math -import pkg # import pkg -import pkg.foo # import pkg.foo -import pkg.subpkg # import a subpkg in a pkg -import .pkg2.subpkg3 # import a subpkg in a pkg inside of current pkg -import ...pkg2 # Go two levels up then import pkg2 -``` - -See **module** spec for more details of module spec. - -#### Assert Statements - -Assert statements are a convenient way to insert debugging assertions into KCL code. - -The syntax is the following: - -``` -assert_stmt: ASSERT test ("," test)? -``` - -The conditional expression in assert will be evaluated and get a boolean. Report an error if returning a `False`. - -Examples: - -```python -assert: x > 1 # report an error on x <= 1 -``` - -#### Conditional Statements - -KCL allows using conditional statements to control the instructions to -be executed. They are also called the control-flow statements. - -The only type of control-flow syntax is the well-known `if-elif-else` syntax. - -The syntax of the `if-elif-else` statement is the following. - -``` -if_stmt: "if" test ":" suite ("elif" test ":" suite)* (ELSE ":" suite)? -suite: small_stmt | NEWLINE _INDENT statement+ _DEDENT -``` - -An `if` or `elif` statement evaluates a given expression. When the expression -is evaluated to `True`, a list of statements following `:` are executed. - -The following is an example: - -```python -a = 10 -if a == 0: - print("a is zero") -elif a < 100: - print("a < 100") - print("maybe a is negative") -else: - print("a >= 100") -``` - -`if-elif-else` statements can be nested. For example: - -```python -a = 10 -if a == 0: - print("a is zero") -elif a < 100: - print("a < 100") - if a < 0: - print("a is negative") - print("No matter a is negative or positive, this message is printed") -else: - print("a >= 100") -``` - -#### Schema Statements - -Schema statements are used to define a type of configuration data. The syntax is the following: - -``` -schema_stmt: [decorators] "schema" ["relaxed"] identifier ["[" [arguments] "]"] ["(" operand_name ")"] ":" NEWLINE [schema_body] -schema_body: _INDENT (string NEWLINE)* [mixin_stmt] (schema_attribute_stmt | statement)* [check_block] _DEDENT -``` - -See **schema** spec for more details of schema spec. diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/lang/spec/variables.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/lang/spec/variables.md deleted file mode 100644 index 73e7ec3e..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/lang/spec/variables.md +++ /dev/null @@ -1,67 +0,0 @@ ---- -title: "Variables" -linkTitle: "Variable" -type: "docs" -weight: 2 -description: Variable ---- -In KCL, variables can be defined using assign statements. For example, the following statement defines a variable `spam` to a string `"ham"`. - -```python -spam = "ham" -``` - -There are two types of variables, which are global variables and list comprehension local variables. - -- A global variable is defined not within any context. -- A comprehension local variable is defined inside a comprehension. - -A variable can be used after definition, until the end of the current scope. - -For a global variable, the scope is the module it is defined in. Note that a module can consists of multiple source files. - -For a list comprehension local variable, the scope is the list comprehension it is defined in. - -More information on modules, list comprehensions and scopes will be discussed in later chapters. - -## Immutability - -Global variables are immutable. In other words, once defined such a variable cannot be redefined (or, i.e., modified). - -The following code is illegal, and KCLVM will report an error during evaluation. - -```python -spam = "ham" -spam = "eggs" # Error: The immutability rule is violated! -``` - -- A variable starts with the `_` character is mutable. - -```python -_spam -cond = True -if cond: - _spam = "ham" -else: - _spam = "eggs" -``` - -## Variable Exporting - -As shown in the preview chapter, KCLVM is able to export evaluation results to the standard output according to a target data format. - -The rules are the followings: - -- Living global variables at the end of an evaluation will be dumped out. -- If the name of a variable starts with the `_` character, it will not be dumped out. - -## Uniqueness of Exported Variable Identifier - -Each exported variable identifier must be unique in its package, so that an exported variable could be located uniquely by package location path and variable identifier, such as 'a.b.c:var', in which 'a.b.c' locates a package. - -Two variable identifiers are different if: - -- they are spelled differently -- they are defined in different packages and are not compiled in a single execution - -Identifying an exported variable should be supported by the kcl compiler, which needs to provide corresponding identifying features through the command line and api form. diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/lang/tour.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/lang/tour.md deleted file mode 100644 index bedd6e18..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/lang/tour.md +++ /dev/null @@ -1,3211 +0,0 @@ ---- -title: "KCL 之旅" -sidebar_position: 1 ---- - -本文展示了如何使用 KCL 的核心特性,包含变量、运算符、schema 和库,前提是您有使用其他语言编程的经验。KCL 主要受 Python 启发,了解 Python 对学习 KCL 非常有帮助。 - -### 重要概念 - -在学习 KCL 语言时,请牢记以下事实和概念: - -- KCL 是一种配置策略语言。它为编写配置和策略提供了简单且自洽的语言设计和库支持。它不能用于应用程序开发或其他通用编程语言(GPL)支持的场景。 -- KCL 吸收了经典 **OOP** 的元素,并且提供了**类型**、**复用**和**合并**等简单、开发人员友好、可靠且利于传播的配置编写实践。 -- KCL 更倾向于**不可变性**,建议使用**合并**来添加增量的变更。不可变性降低了副作用,例如不可预测的问题。 -- KCL 的 **schema** 结构体定义了严格的属性和静态类型,并且支持表达式验证。**schema** 结构体主要由带类型的属性、schema 上下文和检查块构成。 -- KCL 的 **config** 是一个类 **JSON** 表达式,通过它我们可以复用 schema 的完整定义。KCL 通过分离 schema 和 config 来提供定义和配置的能力。 -- KCL 的 **rule** 是一个书写规则约束表达式的结构,可用于数据校验和策略编写。 -- KCL 的代码文件以包(目录)和模块(文件)的形式进行管理。同一包中的 schema 彼此可见;跨包的数据需要通过 **import 语句**导入。包级变量虽然可以导出,但是它们不能被其他包修改。 -- KCL 语法定义主要使用声明式表达式,并且只提供少量必要的声明式语句,例如 import、 if...else、assert、assignment 以及 schema。 -- 没有主函数,每个 `.k` 文件可以作为单独的配置文件执行。 -- 支持**内置函数**和**插件**以简化编写。 - -### 关键字 - -下表列出了 KCL 语言的关键字。 - -``` - True False None Undefined import - and or in is not - as if else elif for - schema mixin protocol check assert - all any map filter lambda - rule -``` - -### 标识符 - -在 KCL 中, 标识符是标识一个值的名称,可以带有选择器。 - -- 标识符由字母、数字、下划线或前缀 `$` 组成。 -- 标识符不能与关键字重复,除非它们有 `$` 前缀。 -- 标识符不得包含任何嵌入的空格或符号。 -- 可以在标识符中的任何位置使用字母和下划线。 -- 数字不能放在标识符的第一位。 -- `$` 字符只能放在标识符的第一个位置。 - -示例: - -```python -x -a -b1 -b_2 -_c -$if -``` - -为了简化限定标识符(例如 `pkg.type`)的定义,我们还定义了 `qualified identifier`: - -示例: - -```python -pkg.a -``` - -在 `qualified identifier` 中的包名必须通过 `import` 关键字导入。 - -#### 标识符前缀 - -使用 `$` 前缀符号定义关键字标识符。 - -```python -$if = 1 -$else = "s" -``` - -请注意,非关键字标识符是否有 `$` 符号都是同样的效果。 - -```python -_a = 1 -$_a = 2 # equal to `_a = 2` -``` - -### 变量 - -以下是如何创建并实例化变量的例子: - -```python -name = "Foo" # Declare a variable named `name` and its value is a string literal "Foo" -``` - -它对应了如下 YAML 输出: - -```yaml -name: Foo -``` - -在 KCL 中,我们可以通过定义包级变量将变量导出为配置数据。使其直接、清晰、可维护。导出的变量是不可变的。因此一旦声明它,就无法对其进行修改,例如,假设我们有一个名为 `example.k` 的配置文件,变量 `name` 在声明后就禁止修改,就像标准的命令式语言一样。 - -```python -name = "Foo" # exported - -... - -name = "Bar" # error: a exported declaration variable can only be set once. -``` - -作为补充,我们可以在模块级别定义一个非导出变量,这个变量是可变的,不会显示在 YAML 输出当中。 - -```python -_name = "Foo" # _ variables are not output to YAML and are mutable -_name = "Bar" -``` - -请注意,变量的名称不能为 `True`、`False`、`None` 或者 `Undefined`,因为它们与 KCL 内置的名称常量之间存在二义性。 - -```python -False = 1 # Error -True = False # Error -None = Undefined # Error -Undefined = None # Error -``` - -### 内置类型 - -KCL 支持以下类型: - -- 数字 -- 字符串 -- 布尔 -- 列表 -- 字典 - -#### 数字 - -KCL 的数字类型有两种形式: - -- 64 位有符号整数。值的范围为 -9223372036854775808~9223372036854775807. -- 64 位浮点数,遵循 IEEE 754 标准。我们不建议在配置中使用 float 类型,我们可以使用字符串代替并在运行时进行解析。 - -整数和浮点数都支持基本运算符,例如 `+`,`-`,`/` 和 `*`,而复杂的运算,例如 `abs()`, `ceil()` 和 `floor()`,都是通过内置的数学库来支持。 - -整数是不带小数点的数字。以下是一些定义整数的例子: - -```python -a = 1 -b = -1 -c = 0x10 # hexadecimal literal -d = 0o10 # octal literal -e = 010 # octal literal -f = 0b10 # binary literal -g = int("10") # int constructor -``` - -如果一个数字包含小数点,则它是浮点数。以下是一些浮点数的示例: - -```python -a = 1.10 -b = 1.0 -c = -35.59 -d = 32.3e+18 -f = -90. -h = 70.2E-12 -i = float("112") # float constructor -``` - -内置数学库可用于数字类型: - -```python -import math - -assert abs(-40) == 40 -assert round(70.23456) == 70 -assert min(80, 100, 1000) == 80 -assert max(80, 100, 1000) == 1000 -assert sum([0,1,2]) == 3 -assert math.ceil(100.12) == 101.0 -assert math.floor(100.12) == 100.0 -assert math.pow(100, 2) == 10000.0 -``` - -KCL 默认使用 64 位数字类型。我们可以在 KCL 命令行使用 `-r` 参数执行严格的 32 位范围检查。 - -``` -kcl main.k -r -d -``` - -请注意,为了性能考虑该功能只能在 `debug` 模式中使用。 - -##### 单位字面值 - -在 KCL 中,我们可以给一个整数添加如下的单位后缀,这不影响它的真实值。 - -- 通用整形和定点数: `P`, `T`, `G`, `M`, `K`, `k`, `m`, `u`, `n` -- 2 的幂: `Pi`, `Ti`, `Gi`, `Mi`, `Ki` - -```python -# SI -n = 1n # 1e-09 -u = 1u # 1e-06 -m = 1m # 1e-03 -k = 1k # 1000 -K = 1K # 1000 -M = 1M # 1000000 -G = 1G # 1000000000 -T = 1T # 100000000000 -P = 1P # 1000000000000000 -# IEC -Ki = 1Ki # 1024 -Mi = 1Mi # 1024 ** 2 -Gi = 1Gi # 1024 ** 3 -Ti = 1Ti # 1024 ** 4 -Pi = 1Pi # 1024 ** 5 -``` - -此外,我们还可以使用定义在 `units` 模块中的单位常量: - -```python -import units - -n = 1 * units.n # 1e-09 -u = 1 * units.u # 1e-06 -m = 1 * units.m # 1e-03 -k = 1 * units.k # 1000 -K = 1 * units.K # 1000 -M = 1 * units.M # 1000000 -G = 1 * units.G # 1000000000 -T = 1 * units.T # 1000000000000 -P = 1 * units.P # 1000000000000000 -# IEC -Ki = 1 * units.Ki # 1024 -Mi = 1 * units.Mi # 1024 ** 2 -Gi = 1 * units.Gi # 1024 ** 3 -Ti = 1 * units.Ti # 1024 ** 4 -Pi = 1 * units.Pi # 1024 ** 5 -``` - -我们还可以使用定义在 `units` 模块内的整数和单位字符串之间的转换函数 - -```python -import units -# SI -K = units.to_K(1000) # "1K" -M = units.to_M(1000000) # "1M" -G = units.to_G(1000000000) # "1G" -T = units.to_T(1000000000000) # "1T" -P = units.to_P(1000000000000000) # "1P" -# IEC -Ki = units.to_Ki(1024) # "1Ki" -Mi = units.to_Mi(1024 ** 2) # "1Mi" -Gi = units.to_Gi(1024 ** 3) # "1Gi" -Ti = units.to_Ti(1024 ** 4) # "1Ti" -Pi = units.to_Pi(1024 ** 5) # "1Pi" -``` - -```python -import units -# SI -K = units.to_K(int("1M")) # "1000K" -M = units.to_M(int("1G")) # "1000M" -G = units.to_G(int("1T")) # "1000G" -T = units.to_T(int("1P")) # "1000T" -P = units.to_P(int("10P")) # "10P" -# IEC -Ki = units.to_Ki(int("1Mi")) # "1024Ki" -Mi = units.to_Mi(int("1Gi")) # "1024Mi" -Gi = units.to_Gi(int("1Ti")) # "1024Gi" -Ti = units.to_Ti(int("1Pi")) # "1024Ti" -Pi = units.to_Pi(int("10Pi")) # "10Pi" -``` - -单位类型定义在 `units` 模块中,单位类型的值不能进行任何四则运算。 - -```python -import units - -type NumberMultiplier = units.NumberMultiplier - -x0: NumberMultiplier = 1M # Ok -x1: NumberMultiplier = x0 # Ok -x2 = x0 + x1 # Error: unsupported operand type(s) for +: 'number_multiplier(1M)' and 'number_multiplier(1M)' -``` - -我们可以使用 `int()`、`float()` 和 `str()` 函数将数值单位类型转换为数字类型或字符串类型。 - -```python -a: int = int(1Ki) # 1024 -b: float = float(1Ki) # 1024.0 -c: str = str(1Mi) # "1Mi" -``` - -#### 字符串 - -字符串是一个不可变的 Unicode 字符序列。我们可以使用单引号或双引号创建字符串: - -```python -'allows embedded "double" quotes' # Single quotes -"allows embedded 'single' quotes" # Double quotes -'''Three single quotes''', """Three double quotes""" # Triple quoted -``` - -三引号用于定义多行字符串。 - -```python -"""This is a long triple quoted string -may span multiple lines. -""" -``` - -请注意,KCL 的单引号和双引号字符串的使用几乎没有区别。唯一可以简化的是,我们不需要在单引号字符串中转义双引号,也不需要在双引号中转义单引号。 - -```python -'This is my book named "foo"' # Don’t need to escape double quotes in single quoted strings. -"This is my book named 'foo'" # Don’t need to escape single quotes in double quoted strings. -``` - -我们可以使用 `+` 操作符连接字符串: - -```python -x = 'The + operator ' + 'works, as well.' -``` - -我们可以使用 `str` 内置函数将 int 或 float 转为字符串: - -```python -x = str(3.5) # "3.5" -``` - -可以使用很多内置的字符串函数: - -```python -x = "length" -assert len(x) == 6 # True -assert x.capitalize() == "Length" -assert x.count("gt") == 1 -assert x.endswith("th") == True -assert x.find("gth") == 3 -assert "{} {}".format("hello", "world") == 'hello world' -assert x.index("gth") == 3 -assert x.isalnum() == True -assert x.isalpha() == True -assert "123456".isdigit() == True -assert x.islower() == True -assert " ".isspace() == True -assert "This Is Title Example".istitle() == True -assert x.isupper() == False -assert "|".join(["a", "b", "c"]) == "a|b|c" -assert "LENGTH".lower() == "length" -assert ' spacious '.lstrip() == 'spacious ' -assert x.replace("th", "ht") == "lenght" -assert "lengthlength".rfind("le") == 6 -assert "lengthlength".rindex("le") == 6 -assert "length length".rsplit() == ["length", "length"] -assert "length ".rstrip() == "length" -assert "length length".split() == ["length", "length"] -assert 'ab c\n\nde fg\rkl\r\n'.splitlines() == ['ab c', '', 'de fg', 'kl'] -assert "length".startswith('len') == True -assert "***length***".strip('*') == "length" -assert "length length".title() == "Length Length" -assert x.upper() == "LENGTH" -``` - -格式化字符串有两种使用方法: 使用 `"{}".format()` 内置函数, 或者使用花括号指定变量并使用 `$` 标记取变量值。在 KCL 中叫做**插值字符串**。在下面的例子中,`a` 和 `b` 的值都是 `"hello world"`。 - -此外,要序列化的变量可以以特殊的数据格式提取,例如 YAML 或 JSON。在这种情况中,`#yaml` 或 `#json` 可以包含在花括号中。 - -具体来说,当 `$` 符号本身需要出现在**插值字符串**中,需要使用 `$$` 转义。或者使用 `+` 符号连接 `$` 符号和插值字符串来避免转义。在以下示例中,`c` 和 `c2` 的值都是 `$hello world$`。 - -```python -world = "world" -a = "hello {}".format(world) # "hello world" -b = "hello ${world}" # "hello world" -c = "$$hello ${world}$$" # "$hello world$" -c2 = "$" + "hello ${world}" + "$" # "$hello world$" - -myDict = { - "key1" = "value1" - "key2" = "value2" -} - -d = "here is myDict in json: ${myDict: #json}" -# d: 'here is myDict in json: {"key1": "value1", "key2": "value2"}' - -e = "here is myDict in yaml:\n${myDict: #yaml}" -# e: | -# here is myDict in yaml: -# key1: value1 -# key2: value2 -``` - -此外,我们可以在上面的示例代码输出 **YAML 字符串** 中看到一些符号,例如 `|`、`>`、`+`、`-`。 - -- `|` 表示 **块文字样式**,指示块内换行符的行为方式。 -- `>` 表示块标量中的**块折叠样式**,换行符将被空格替换。 -- `+` 和 `-` 是 **block chomping 指示符**,用于控制字符串末尾的换行符。 默认值 **clip** 在字符串的末尾放置一个换行符。 要删除所有换行符,请通过在样式指示符 `|` 或 `>` 后面添加 `-` 来**删除**它们。 clip 和 strip 都忽略块末尾实际有多少换行符; 在样式指示符后面添加一个 `+` 来**保留**它们。 - -例如,**strip 块文字样式** yaml 字符串是 - -```yaml -example: |- - Several lines of text, - with some "quotes" of various 'types', - and also a blank line: - - plus another line at the end. - - -``` - -结果为: - -```plain -Several lines of text, -with some "quotes" of various 'types', -and also a blank line: - -plus another line at the end. -``` - -更多信息可见 [Yaml Multiline String](https://yaml-multiline.info/) 和 [YAML Specification v1.2](https://yaml.org/spec/1.2.1/) 。 - -##### 原始字符串 - -KCL 原始字符串是通过在字符串字面值前加上 `'r'` 或 `'R'` 来创建的。 KCL 原始字符串将反斜杠 (`\`) 和字符串插值 (`${}`) 视为普通的非字符。当我们想要一个包含反斜杠、字符串插值的字符串并且不希望它们被视为转义字符时,原始字符串是很有用的。 - -- 对于包含反斜杠(`\`)的原始字符串,KCL 代码和输出 YAML 如下: - -```python -s = "Hi\nHello" -raw_s = r"Hi\nHello" # This is a KCL raw string with the `r` prefix. -``` - -```yaml -s: |- - Hi - Hello -raw_s: Hi\nHello -``` - -- 对于包含字符串插值(`${}`)的原始字符串,KCL 代码和输出 YAML 如下: - -```python -worldString = "world" -s = "Hello ${worldString}" -raw_s = r"Hello ${worldString}" # This is a KCL raw string with the `r` prefix. -``` - -```yaml -worldString: world -s: Hello world -raw_s: Hello ${worldString} -``` - -此外,原始字符串最常用的场景是在正则表达式中使用: - -```python -import regex - -key = "key" -result = regex.match(key, r"[A-Za-z0-9_.-]*") # True -``` - -#### 布尔值 - -布尔值有两个常量对象:`False` 和 `True`. - -```python -a = True -b = False -``` - -#### List - -List 是一个序列,通常用于存储同质项的集合。下面是一个简单的 KCL 列表的例子: - -```python -list = [1, 2, 3] -assert len(list) == 3 # True -assert list[0] == 1 # True -``` - -我们可以使用列表推导式构建列表: - -```python -list = [ _x for _x in range(20) if _x % 2 == 0] -assert list == [0, 2, 4, 6, 8, 10, 12, 14, 16, 18] # True -``` - -并且还可以使用嵌套的列表推导式: - -```python -matrix = [[1, 2], [3,4], [5,6], [7,8]] -transpose = [[row[_i] for row in matrix] for _i in range(2)] -assert transpose == [[1, 3, 5, 7], [2, 4, 6, 8]] # True -``` - -此外,我们可以在列表推导式中使用两个变量。第一个变量表示列表中的索引,第二个变量表示列表中的项。 - -```python -data = [1000, 2000, 3000] -# Single variable loop -dataLoop1 = [i * 2 for i in data] # [2000, 4000, 6000] -dataLoop2 = [i for i in data if i == 2000] # [2000] -dataLoop3 = [i if i > 2 else i + 1 for i in data] # [1000, 2000, 3000] -# Double variable loop -dataLoop4 = [i + v for i, v in data] # [1000, 2001, 3002] -dataLoop5 = [v for i, v in data if v == 2000] # [2000] -# Use `_` to ignore loop variables -dataLoop6 = [v if v > 2000 else v + i for i, v in data] # [1000, 2001, 3000] -dataLoop7 = [i for i, _ in data] # [0, 1, 2] -dataLoop8 = [v for _, v in data if v == 2000] # [2000] -``` - -我们可以通过 `+` 连接列表: - -```python -_list0 = [1, 2, 3] -_list1 = [4, 5, 6] -joined_list = _list0 + _list1 # [1, 2, 3, 4, 5, 6] -``` - -我们可以使用解包操作符 `*` 合并多个列表: - -```python -_list0 = [1, 2, 3] -_list1 = [4, 5, 6] -union_list = [*_list0, *_list1] # [1, 2, 3, 4, 5, 6] -``` - -我们可以使用 `if` 表达式动态的将元素添加到列表,符合条件的元素会被添加到列表,不符合条件的元素会被忽略。 - -```python -a = 1 # 1 -data = [ - 1 - if a == 1: 2 - if a > 0: 3 - if a < 0: 4 -] # [1, 2, 3] -``` - -```python -a = 1 # 1 -data1 = [ - 1 - if a == 1: - 2 - elif a == 2: - 3 - else: - 3 -] # [1, 2] -data2 = [ - 1 - if a == 1: 2 - elif a == 2: 2 - else: 3 -] # [1, 2] -``` - -我们可以合并(union)列表: - -```python -_list0 = [1, 2, 3] -_list1 = [4, 5, 6] -union_list = _list0 | _list1 # [4, 5, 6] -``` - -我们可以使用 `for k in list_var` 表达式遍历列表: - -```python -data = [1, 2, 3] -dataAnother = [val * 2 for val in data] # [2, 4, 6] -``` - -#### Dict - -Dict 是将可哈希的值映射到任意对象的映射对象。字典是有序的。键的顺序遵循其声明的顺序: - -这里有几个简单的 KCL 字典: - -```python -a = {"one" = 1, "two" = 2, "three" = 3} -b = {'one' = 1, 'two' = 2, 'three' = 3} -assert a == b # True -assert len(a) == 3 # True -``` - -在写多行的键-值时,可以省略每个键-值对行尾的逗号 `,`: - -```python -data = { - "key1" = "value1" # Ignore the comma ',' at the end of line - "key2" = "value2" -} # {"key1": "value1", "key2": "value2"} -``` - -在 Dict 键上使用简单的字面值时可以省略引号: - -```python -data = { - key1 = "value1" # Ignore key quotation '"' - key2 = "value2" -} # {"key1": "value1", "key2": "value2"} -``` - -此外,**选择表达式**可以用于定义包含嵌套键 dict 实例。 - -```python -person = { - base.count = 2 - base.value = "value" - labels.key = "value" -} # {"base": {"count": 2, "value": "value"}, "labels": {"key": "value"}} -``` - -输出的 YAML 为: - -```yaml -person: - base: - count: 2 - value: value - labels: - key: value -``` - -我们可以使用字典推导式构建字典: - -```python -x = {str(i): 2 * i for i in range(3)} -assert x == {"0" = 0, "1" = 2, "2" = 4} -``` - -此外,我们可以在字典推导式中使用两个变量。第一个变量表示字典的键,第二个变量表示字典中键对应的值。 - -```python -data = {key1 = "value1", key2 = "value2"} -# Single variable loop -dataKeys1 = {k: k for k in data} # {"key1": "key1", "key2": "key2"} -dataValues1 = {k: data[k] for k in data} # {"key1": "value1", "key2": "value2"} -# Double variable loop -dataKeys2 = {k: k for k, v in data} # {"key1": "key1", "key2": "key2"} -dataValues2 = {v: v for k, v in data} # {"value1": "value1", "value2": "value2"} -dataFilter = {k: v for k, v in data if k == "key1" and v == "value1"} # {"key1": "value1"} -# Use `_` to ignore loop variables -dataKeys3 = {k: k for k, _ in data} # {"key1": "key1", "key2": "key2"} -dataValues3 = {v: v for _, v in data} # {"value1": "value1", "value2": "value2"} -``` - -我们可以使用解包操作符 `**` 来合并字典: - -```python -_part1 = { - a = "b" -} - -_part2 = { - c = "d" -} - -a_dict = {**_part1, **_part2} # {"a: "b", "c": "d"} -``` - -此外,union 操作符 `|` 也能达到同样的效果: - -```python -_part1 = { - a = "b" -} - -_part2 = { - c = "d" -} - -a_dict = _part1 | _part2 # {"a: "b", "c": "d"} -``` - -我们可以使用 `if` 表达式动态的将元素添加到字典,符合条件的元素会被添加到字典,不符合条件的元素会被忽略。 - -```python -a = 1 # 1 -data = { - key1 = "value1" - if a == 1: key2 = "value2" - if a > 0: key3 = "value3" - if a < 0: key4 = "value4" -} # {"key1": "value1", "key2": "value2", "key3": "value3"} -``` - -```python -a = 1 # 1 -data1 = { - key1 = "value1" - if a == 1: - key2 = "value2" - elif a > 0: - key3 = "value3" - else: - key4 = "value4" -} # {"key1": "value1", "key2": "value2"} -data2 = { - key1 = "value1" - if a == 1: key2 = "value2" - elif a > 0: key3 = "value3" - else: key4 = "value4" -} # {"key1": "value1", "key2": "value2"} -``` - -我们可以使用 `for k in dict_var` 表达式来遍历字典, 并且可以使用 `in` 操作符来判断 dict 是否包含某个键。 - -```python -data = {key1 = "value1", key2 = "value2"} -dataAnother = {k: data[k] + "suffix" for k in data} # {"key1": "value1suffix", "key2": "value2suffix"} -containsKey1 = "key1" in data # True -containsKey2 = "key" in data # False -``` - -#### None - -在 KCL 中, `None` 表示对象的值为空, 这与 Go 中的 `nil` 和 Java 中的 `null` 一样,并且对应于 YAML 中的 `null`。 - -```python -a = None -b = [1, 2, None] -c = {key1 = "value1", key2 = None} -``` - -输出如下: - -```yaml -a: null -b: -- 1 -- 2 -- null -c: - key1: value1 - key2: null -``` - -请注意,`None` 不能参与四则运算,但它可以参与逻辑运算和比较运算。 - -```python -a = 1 + None # error -b = int(None) # error -c = not None # True -d = None == None # True -e = None or 1 # 1 -f = str(None) # None -``` - -#### Undefined - -`Undefined` 与 `None` 类似,但其语义是变量没有分配任何值,也不会输出到 YAML。 - -```python -a = Undefined -b = [1, 2, Undefined] -c = {key1 = "value1", key2 = Undefined} -``` - -输出如下: - -```yaml -b: -- 1 -- 2 -c: - key1: value1 -``` - -请注意,`Undefined` 不能参与四则运算,但它可以参与逻辑运算和比较运算。 - -```python -a = 1 + Undefined # error -b = int(Undefined) # error -c = not Undefined # True -d = Undefined == Undefined # True -e = Undefined or 1 # 1 -f = str(Undefined) # Undefined -``` - -### 运算符 - -以下字符表示运算符: - -``` - + - * ** / // % - << >> & | ^ < > - ~ <= >= == != @ \ -``` - -#### 算数运算符 - -KCL 支持常见的算数运算符: - -```python -assert 2 + 3 == 5 -assert 2 - 3 == -1 -assert 2 * 3 == 6 -assert 5 / 2 == 2.5 -assert 5 // 2 == 2 -assert 5 % 2 == 1 -``` - -#### 相等和关系运算符 - -KCL 支持相等和关系运算符: - -```python -assert 2 == 2 -assert 2 != 3 -assert 3 > 2 -assert 2 < 3 -assert 3 >= 3 -assert 2 <= 3 -``` - -#### 逻辑运算符 - -我们可以使用逻辑运算符反转或组合布尔表达式,例如:`and` 和 `or`: - -```python -if not done and (col == 0 or col == 3): - # ...Do something... - -``` - -#### 位运算符和移位运算符 - -以下是位运算符和移位运算符的例子: - -```python -value = 0x22 -bitmask = 0x0f - -assert (value & bitmask) == 0x02 -assert (value & ~bitmask) == 0x20 -assert (value | bitmask) == 0x2f -assert (value ^ bitmask) == 0x2d -assert (value << 4) == 0x220 -assert (value >> 4) == 0x02 -``` - -`|` 运算符可用于位运算,合并基本类型和集合及结构化数据,例如**列表**、**字典**和 **schema**。 - -位运算示例: - -```python -0x12345678 | 0xFF # 0x123456FF -``` - -联合基本类型示例: - -```python -schema x: - a: int | str # attribute a could be a int or string -``` - -#### 赋值运算符 - -以下 token 作为语法中的分隔符: - -``` - ( ) [ ] { } - , : . ; = -> - += -= *= /= //= %= - &= ^= >>= <<= **= -``` - -以下是使用赋值和参数赋值赋值运算符的例子: - -```python -_a = 2 -_a *= 3 -_a += 1 -assert _a == 7 -``` - -#### Identity 运算符 - -以下关键字作为语法中的 identity 运算符: - -```python -is, is not -``` - -Identity 运算符检查右侧和左侧是否时同一对象。它们通常用于检查某个变量是否是 `None/Undefined/True/False`。以下是一些例子: - -```python -empty_String = "" -empty_String is not None # True -``` - -#### 成员运算符 - -以下关键字作为语法中的成员运算符: - -```python -in, not in -``` - -- `in` 运算符计算了第一个操作数是否是第二个操作数的成员,第二个运算符必须是 list、dict、schema 或 string。 -- `not in` 运算符与 `in` 相反。它们都返回一个布尔值。 - -成员的含义因第二个操作数的类型而异:列表的成员是其元素;字典的成员是其键;字符串的成员是其所有子字符串。 - -```python -1 in [1, 2, 3] # True - -d = {one = 1, two = 2} -"one" in d # True -"three" in d # False -1 in d # False -[] in d # False - -"nasty" in "dynasty" # True -"a" in "banana" # True -"f" not in "way" # True - -d = Data {one = 1, two = 2} # Data is a schema with attributes one and two -"one" in d # True -"three" in d # False -``` - -#### 推导式 - -一个推导表达式通过遍历一个或多个迭代项并计算表达式生成的结果来生成连续的元素,并以此构造新的列表或字典。 - -我们可以如下使用列表和字典的推导表达式: - -```python -listVar = [_x for _x in range(20) if _x % 2 == 0] # list comprehension -dictVar = {str(_i): 2*_i for _i in range(3)} # dict comprehension -``` - -#### 其他运算符 - -- 使用 **()** 表示函数调用, 例如 `"{} {}".format("hello", world)`。 -- 使用 **[]** 引用列表中指定索引处的值。 -- 使用 **:** 定义类型注解。 -- 使用 **.** 引用成员字段。 -- 使用 **\\** 续行符编写长表达式。 - -```python -longString = "Too long expression " + \ - "Too long expression " + \ - "Too long expression " -``` - -### 表达式 - -#### 条件表达式 - -条件表达式的形式为 `a if cond else b`。它首先计算条件 `cond`。如果为真,则会计算 `a` 并生成它的值;否则,它会生成 `b` 的值。 - -示例: - -```python -x = True if enabled else False # If enabled is True, x is True, otherwise x is False -``` - -#### 索引表达式 - -索引表达式 `a[i]` 生成可索引类型的第 `i` 个元素,例如字符串或数组。索引 `i` 必须是 `-n` ≤ `i` < `n` 范围内的 `int` 值,其中 `n` 等于 `len(a)`。其他任何索引都会导致错误。 - -有效的负索引的行为类似于 `n+i`,允许方便的对序列末尾进行索引。 - -```python -val = "abc" -list = ["zero", "one", "two"] -str_0 = val[0] # "a" -str_1 = val[1] # "b" -str_n1 = val[-1] # "c" - -list_0 = list[0] # "zero" -list_1 = list[1] # "one" -list_n1 = list[-1] # "two" -``` - -索引表达式 `d[key]` 也可以用于字典 `d`,以获取指定键对应的值。如果字典中不包含这个键则会返回 `Undefined` - -出现在赋值符左侧的索引表达式会更新指定的列表或字典元素。 - -```python -d = {key1 = "value1", key2 = "value2"} -key1value = d["key1"] # value1 -key2value = d["key2"] # value2 -``` - -尝试更新不可变类型的元素值(如列表或字符串)或可变类型的不可变变量会产生错误。 - -#### 切片表达式 - -切片表达式 `a[start:stop:step]` 会生成 `a` 包含的一个子序列,其中 `a` 必须是字符串或者数组。 - -`start`、`stop` 和 `step` 三个操作数都是可选的。如果有的话,每个值都必须为整数。`step` 的默认值为 1。如果 `step` 未指定,它前面的冒号也可以省略。指定 `step` 为 0 会产生错误。 - -从概念上来说,这些操作数指定了一系列值,索引 `i` 从 `start` 开始,每次增加 `step` 直到 `i` 到达或超过 `stop`。结果由有效的 `i` 的 `a[i]` 组成。 - -如下所示,从三个操作数计算有效的开始和结束的索引。`n` 是序列的长度。 - -```python -val = "abc" -len = len(val) -a = val[1:len] # "bc" (remove first element) -b = val[0:-1] # "ab" (remove last element) -c = val[1:-1] # "b" (remove first and last element) -``` - -```python -"abc"[1:] # "bc" (remove first element) -"abc"[:-1] # "ab" (remove last element) -"abc"[1:-1] # "b" (remove first and last element) -"banana"[1::2] # "aaa" (select alternate elements starting at index 1) -"banana"[4::-2] # "nnb" (select alternate elements in reverse, starting at index 4) -``` - -KCL 禁止将切片表达式定义为左值。原因是列表和字符串是不可变的,重新切片可以直接操作操作数,以确保更好的性能。 - -#### 函数调用 - -KCL 允许调用内置函数,或者调用内置和系统模块中的函数。 - -调用函数的基本方法如下所示: - -```python -import math - -a = math.pow(2, 3) # 2 powers 3 is 8. -b = len([1, 2, 3]) # the length of [1, 2, 3] is 3 -``` - -参数以 `,` 分隔,并且 KCL 还支持位置参数和键-值对形式的参数。 - -```python -print("hello world", end="") -``` - -请注意: - -- 有些函数参数具有默认值。 -- 一些函数接受可变参数。 - -如果没有为没有默认值的参数提供参数,则会抛出错误。 - -#### 选择表达式 - -选择表达式选择值的属性或方法。KCL 提供了许多识别或过滤属性的方法: - -`x.y` - -- dict: 表示字典 `x` 中键 `y` 对应的值。 -- schema: 表示 schema `x` 中 `y` 属性的值。 -- package: 表示 package `x` 中 `y` 标示的标识符。 - -示例: - -```python -schema Person: - name: str - age: int - -person = Person { - name = "Alice" - age = 18 -} -name = person.name # "Alice" -age = person.age # 18 - -myDict = { - key = "value" -} -result = myDict.key # "value" -``` - -`x?.y` - -`x` 可以是 schema 实例或 dict。当 `x` 可能为 `None` 或者键 `y` 不在 `x` 中时这非常有用。 - -```python -# Example of dict: -data = {"key" = "value"} -a = data?.key # "value" -b = data?.name # Undefined - -# example of schema instance: -schema Company: - name: str - address: str - -schema Person: - name: str - job?: Company - -alice = Person { - name = "alice" -} - -if alice?.job?.name == "Group": - print("work in Group") -``` - -#### Quantifier 表达式 - -Quantifier 表达式用于集合:列表或字典。通常用于在处理集合后获得某个结果,主要有以下四种形式: - -- **all** - - 用于检测集合中所有元素都满足给定的逻辑表达式,并且返回一个布尔值作为结果。 - - 只有集合中所有元素都满足表达式为 true 时,`all` 表达式为 true,否则为 false。 - - 如果集合为空,返回 true。 - - 支持表达式执行期间逻辑表达式的短路。 -- **any** - - 用于检测集合中至少一个元素都满足给定的逻辑表达式,并且返回一个布尔值作为结果。 - - 当集合中至少一个元素都满足表达式为 true 时,`any` 表达式为 true,否则 false。 - - 如果集合为空,返回 false。 - - 支持表达式执行期间逻辑表达式的短路。 -- **map** - - 映射集合中的元素生成新的列表。 - - 新列表的长度严格等于原列表的长度。 -- **filter** - - 通过逻辑判断筛选原集合中的元素,返回一个经过筛选的子集合。 - - 当表达式为 true 时才将元素添加到子集合。 - - 产生的新集合的类型(list, dict 和 schema)与原集合的类型完全一致,并且长度为 `[0, len(original-collection)]`。 - -**all** 和 **any** 表达式的示例代码: - -```python -schema Config: - volumes: [{str:}] - services: [{str:}] - - check: - all service in services { - service.clusterIP == "NONE" if service.type == "ClusterIP" - }, "invalid cluster ip" - - any volume in volumes { - volume.mountPath in ["/home/admin", "/home/myapp"] - } -``` - -**map** 和 **filter** 表达式的示例代码: - -```python -a = map e in [{name = "1", value = 1}, {name = "2", value = 2}] { - {name = e.name, value = int(e.value) ** 2} -} # [{"name": "1", value: 1}, {"name": "2", "value": 4}] - -b = map k, v in {a = "foo", b = "bar"} { v } # ["foo", "bar"] - -c = filter e in [{name = "1", value = 1}, {name = "2", value = 2}] { - int(e.value) > 1 -} # [{"name": "2", "value": 2}] - -d = filter _, v in {a = "foo", b = "bar"} { - v == "foo" -} # {"a": "foo"} -``` - -请注意,区分 any 表达式和 any 类型的区别。当 `any` 在类型注解中使用,意味着变量的值是任意的,而 any 表达式意味着集合中的至少一个元素满足条件。 - -### 流程控制表达式 - -#### If 和 Else - -KCL 支持 `if` 表达式和可选的 `elif` 和 `else` 表达式, 示例如下: - -```python -a = 10 -if a == 0: - print("a is zero") -elif a < 100: - print("a < 100") - print("maybe a is negative") -else: - print("a >= 100") -``` - -`elif` 的例子: - -```python -_result = 0 -if condition == "one": - _result = 1 -elif condition == "two": - _result = 2 -elif condition == "three": - _result = 3 -else: - _result = 4 -``` - -`if-elif-else` 表达式可以嵌套,示例如下: - -```python -a = 10 -if a == 0: - print("a is zero") -elif a < 100: - print("a < 100") - if a < 0: - print("a is negative") - print("No matter a is negative or positive, this message is printed") -else: - print("a >= 100") -``` - -此外,对于简单的 `if` 表达式如下: - -```python -if success: - _result = "success" -else: - _result = "failed" -``` - -我们可以使用 ` if else ` 的形式将它们写在一行: - -```python -_result = "success" if success else "failed" -``` - -`if` 或 `elif` 语句计算一个给定的表达式。当表达式的计算结果为 `True`, `:` 之后的语句将被计算,而当表达式为 `False` ,后面的语句不会被计算。 - -请注意,常量 `False`, `None`, 数字 `0`, 空列表 `[]`, 空字典 `{}` 和空字符串 `""` 都被视为 `False` 。 - -```python -_emptyStr = "" -_emptyList = [] -_emptyDict = {} -isEmptyStr = False if _emptyStr else True -isEmptyList = False if _emptyList else True -isEmptyDict = False if _emptyDict else True -``` - -结果为: - -```yaml -isEmptyStr: true -isEmptyList: true -isEmptyDict: true -``` - -### 断言语句 - -当发生错误时,开发人员应该能够检测到错误并终止执行。因此,KCL 引入了 `assert` 语法,示例如下: - -```python -a = 1 -b = 3 -# a != b evaluates to True, therefore no error should happen. -assert a != b -# a == b is False, in the reported error message, the message "SOS" should be printed. -assert a == b, "SOS" -``` - -此外,我们可以为 assert 语声明一个条件,当条件满足时,才进行 assert 断言 - -- 使用 if 语句书写条件断言 - -```python -a = None -if a: - assert a > 2: -``` - -- 使用 if 表达式书写条件断言 - -```python -a = None -assert a > 2 if a -``` - -### 函数 - -KCL 支持使用 lambda 关键字定义一个函数 - -```python -func = lambda x: int, y: int -> int { - x + y -} -a = func(1, 1) # 2 -``` - -lambda 函数具有如下特性: - -- lambda 函数将最后一个表达式的值作为函数的返回值,空函数体返回 None -- 返回值类型注解可以省略,返回值类型为最后一个表达式值的类型 -- 函数体中没有与顺序无关的特性,所有的表达式都是按顺序执行的 - -```python -_func = lambda x: int, y: int -> int { - x + y -} # Define a function using the lambda expression -_func = lambda x: int, y: int -> int { - x - y -} # Ok -_func = lambda x: int, y: int -> str { - str(x + y) -} # Error (int, int) -> str can't be assigned to (int, int) -> int -``` - -lambda 函数对象不能参与任何计算,只能在赋值语句和调用语句中使用。 - -```python -func = lambda x: int, y: int -> int { - x + y -} -x = func + 1 # Error: unsupported operand type(s) for +: 'function' and 'int(1)' -``` - -lambda 函数支持捕获其外部作用域的变量,并且可以作为其他函数的参数进行传递 - -```python -a = 1 -func = lambda x: int { - x + a -} -funcOther = lambda f, para: int { - f(para) -} -r0 = funcOther(func, 1) # 2 -r1 = funcOther(lambda x: int { - x + a -}, 1) # 2 -``` - -输出为: - -```yaml -a: 1 -r: 2 -``` - -此外,可以定义一个匿名函数并直接调用。 - -```python -result = (lambda x, y { - z = 2 * x - z + y -})(1, 1) # 3 -``` - -并且还可以在 for 循环使用使用匿名函数 - -```python -result = [(lambda x, y { - x + y -})(x, y) for x in [1, 2] for y in [1, 2]] # [2, 3, 3, 4] -``` - -请注意,KCL 中定义的函数的均为纯函数: - -- 函数的返回结果只依赖于它的参数。 -- 函数执行过程里面没有副作用。 - -因此,KCL 函数不能修改外部的变量,只能引用外部的变量,比如如下代码会发生错误: - -```python -globalVar = 1 -func = lambda { - x = globalVar # Ok - globalVar = 1 # Error -} -``` - -### 类型系统 - -#### 类型注解 - -类型注解可用于包级变量,schema 属性和参数。 - -- 属性可以是基本类型,例如字符串(`string`),浮点数(`float`),定点数(`int`) 或布尔值(`bool`)。 -- 属性可以是字面值类型,例如字符串文本(`"TCP"` 和 `"UDP"`),数字文本 (`"1"` 和 `"1.2"`),布尔值文本(`True` 和 `False`)。 -- 属性也可以是列表或字典: - - 未指定元素类型的列表为 `[]`。 - - 元素类型为 `t` 的列表为 `[t]`。这里 `t` 是另一种类型。 - - 键的类型为 `kt` 且值的类型为 `vt` 的字典为 `{kt:vt}`。 - - `kt`, `vt` 或两者都可以为空, 就像列表未指定元素类型一样。 -- 属性可以是由 `|` 定义的 **联合类型** ,例如 `a | b`, 意为类型可以是 a 或 b。 - - 联合类型可以包含 `int`, `str`, `float`, `bool`, `list`, `dict`, 字面值类型和 schema 类型,并且支持类型的嵌套,例如: `{str:str|int}`、`[[int|str]|str|float]` 和 `2 | 4 | 6` 等。 -- 属性可以是 schema 类型。在这种情况下,使用包名 + schema 名称作为类型名。 -- 属性可以声明为任意类型,例如 `any`。 - -示例 - -- 基本类型 - -```python -"""Top level variable type annotation""" -a: int = 1 # Declare a variable `a` that has the type `int` and the value `1` -b: str = "s" # Declare a variable `b` that has the type `str` and the value `"s"` -c: float = 1.0 # Declare a variable `c` that has the type `float` and the value `1.0` -d: bool = True # Declare a variable `d` that has the type `bool` and the value `True` -``` - -- List/Dict/Schema 类型 - -```python -schema Person: - name: str = "Alice" - age: int = 10 - -a: [int] = [1, 2, 3] # Declare a variable `a` that has the list type `[int]` and the value `[1, 2, 3]` -b: {str:str} = {k1 = "v1", k2 = "v2"} # Declare a variable `b` that has the dict type `{str:str}` and the value `{k1 = "v1", k2 = "v2"}` -c: Person = Person {} # Declare a variable `c` that has the schema type `Person` and the value `Person {}` -``` - -- 联合类型 - -```python -# Basic union types -schema x[argc: int]: # Schema argument type annotation - p: int | str # Schema attribute type annotation -``` - -```python -# Literal union types -schema LiteralType: - # String literal union types, x_01 can be one of "TCP" and "UDP" - x_01: "TCP" | "UDP" - # Number literal union types, x_02 can be one of 2, 4, and 6 - x_02: 2 | 4 | 6 - # Unit union types, x_03 can be one of 1Gi, 2Gi and 4Gi - x_03: 1Gi | 2Gi | 4Gi - -x = LiteralType { - x_01 = "TCP" - x_02 = 2 - x_03 = 1Gi -} -``` - -当属性的值不符合联合类型定义时,编译器会抛出错误: - -```python -# Literal union types -schema LiteralType: - # String literal union types, x_01 can be one of "TCP" and "UDP" - x_01: "TCP" | "UDP" - -x = LiteralType { - x_01 = "HTTP" # Error: the type got is inconsistent with the type expected, expect str(TCP)|str(UDP), got str(HTTP) -} -``` - -- Any 类型 - -```python -# Any type -schema Config: - literalConf: any = 1 - dictConf: {str:any} = {key = "value"} - listConf: [any] = [1, "2", True] - -config = Config {} -``` - -请注意,一般在配置编写中不提倡使用 `float` 和 `any` 类型,因为它们都存在一定的不稳定因素,比如精度丢失,无法进行静态类型检查等。 - -此外在 KCL 中,不允许修改一个变量的类型。如果在重新分配值时不满足类型,将引发类型错误。 - -```python -_a = 1 # The type of `_a` is `int` -_a = "s" # Error: expect int, got str(s) -``` - -变量可以赋值给其上界类型,但不能赋值给它的特化类型。 - -`None` 和 `Undefined` 可以赋值给任何类型: - -- 任何类型都可以赋值给 `any` 类型, `None` 和 `Undefined` 可以赋值给 `any` 类型。 - -```python -a: int = None -b: str = Undefined -c: any = 1 -d: any = "s" -e: any = None -``` - -- `int` 类型可以赋值给 `float` 类型, `float` 类型不能赋值给 `int` 类型. - -```python -a: float = 1 -b: int = 1.0 # Error: expect int, got float(1.0) -``` - -- `int` 类型可以赋值给 `int|str` 类型, `int|str` 不能赋值给 `int` 类型. - -```python -a: int | str = 1 -b: int = 1 if a else "s" # Error: expect int, got int(1)|str(s) -``` - -请注意,在 KCL 中虽然提供了 any 类型,但是它仍然是静态类型,所有变量的类型在编译期间不可变。 - -#### 类型推导 - -如果顶层或 schema 中的变量或常量声明没有使用显式的类型注解,则会从初始值推断类型。 - -- 整形数值被推断为 `int`。 - -```python -a = 1 # The variable `a` has the type `int` -``` - -- 浮点数被推断为 `float`。 - -```python -a = 1.0 # The variable `a` has the type `float` -``` - -- 字符串被推断为 `str`。 - -```python -a = "s" # The variable `a` has the type `str` -``` - -- 布尔值被推断为 `bool`。 - -```python -a = True # The variable `a` has the type `bool` -b = False # The variable `b` has the type `bool` -``` - -- `None` 和 `Undefined` 被推断为 `any`。 - -```python -a = None # The variable `a` has the type `any` -b = Undefined # The variable `b` has the type `any` -``` - -- 列表的类型根据元素类型推断,并且是可变大小的。 - -```python -a = [1, 2, 3] # The variable `a` has the type `[int]` -b = [1, 2, True] # The variable `b` has the list union type `[int|bool]` -c = ["s", 1] # The variable `c` has the list union type `[int|str]` -``` - -请注意,空列表将被推导为 `[any]` 类型。 - -```python -a = [] # The variable `a` has the type `[any]` -``` - -- 字典的类型是根据元素的键和值推断的,并且是可变大小的。 - -```python -a = {key = "value"} # The variable `a` has the type `{str:str}` -b = {key = 1} # The variable `b` has the type `{str:int}` -c = {key1 = 1, key2 = "s"} # The variable `c` has the type `{str:int|str}` -``` - -请注意,空字典将被推导为 `{any:any}` 类型。 - -```python -a = {} # The variable `a` has the type `{any:any}` -``` - -- 携带运行时值的 if 条件表达式的类型将被静态推断为所有可能结果的联合类型。 - -```python -a: bool = True # The variable `a` has the type `bool` -b = 1 if a else "s" # The variable `b` has the type `int|str` -``` - -当变量被推导为某个类型时,它的类型不能再改变。 - -```python -_a = 1 -_a = "s" # Error: expect int, got str(1) -``` - -#### 类型别名 - -在 KCL 中,我们可以使用 `type` 关键字为所有类型声明一个类型别名简化复杂类型的书写。 - -```python -type Int = int -type String = str -type StringOrInt = String | Int -type IntList = [int] -type StringAnyDict = {str:} -``` - -我们可以从一个包中导入一个类型并为它定义一个别名。 - -```py -import pkg - -type Data = pkg.Data -``` - -此外,我们还可以使用类型别名和字面值联合类型充当近似枚举的效果。 - -```python -# A type alias of string literal union types -type Color = "Red" | "Yellow" | "Blue" - -schema Config: - color: Color = "Red" # The type of color is `"Red" | "Yellow" | "Blue"`, and it has an alias `Color`, whose default value is `"Red"` - -config = Config { - color = "Blue" -} -``` - -上述代码执行的输出结果为: - -```yaml -config: - color: Blue -``` - -请注意,类型别名不能与已有的内置类型 `any`、`int`、`float`、`bool` 和 `str` 等相同 - -```python -type any = int | str # Error -type int = str # Error -type float = int # Error -type bool = True # Error -type str = "A" | "B" | "C" # Error -``` - -#### 类型守卫 - -KCL 支持在程序中使用 `typeof` 函数对任意值求得其运行时的类型。 - -```python -import sub as pkg - -_a = 1 - -t1 = typeof(_a) -t2 = typeof("abc") - -schema Person: - name?: any - -_x1 = Person {} -t3 = typeof(_x1) - -_x2 = pkg.Person {} -t4 = typeof(_x2) -t5 = typeof(_x2, full_name=True) - -t6 = typeof(_x1, full_name=True) - -# Output -# t1: int -# t2: str -# t3: Person -# t4: Person -# t5: sub.Person -# t6: __main__.Person -``` - -除此之外,我们可以使用 `as` 关键字在运行时作类型转换。`as` 关键字的一般用法如下: - -- 具有偏序关系的基础类型,比如 `float -> int` -- 具有偏序关系的联合类型,比如 `int | str -> str` -- 对类型上界 `any` 的转换,比如 `any -> int` -- 具有偏序关系的结构类型,比如 `base-schema -> sub-schema` - -```python -schema Data1: - id?: int - -schema Data2: - name?: str - -data: Data1 | Data2 = Data1 {} - -if typeof(a) == "Data1": - data1 = data as Data1 # The type of `data1` is `Data1` -elif typeof(a) == "Data2": - data2 = data as Data2 # The type of `data2` is `Data2` -``` - -当类型转换失败时,一个运行时错误将被抛出。 - -```python -a: any = "s" -b: int = a as int # Error: The `str` type cannot be converted to the `int` type -``` - -如果不想要运行时类型转换失败,我们可以添加 `if` 防御式代码进行检查。 - -```python -a: any = "s" -b = a as int if typeof(a) == "int" else None # The type of b is `int` -``` - -请注意,`as` 转换的目标类型不能是字面值类型或者联合类型,因为它们在运行时不具有一个完全确定的类型。 - -### Schema - -#### 概述 - -Schema 是定义复杂配置的语言元素。我们可以定义带类型的属性,初始值和验证规则。此外,KCL 支持 schema 单继承、mixin 和 protocol 实现复杂配置的复用。 - -#### 基础部分 - -##### 属性 - -以下是 schema 基础定义的示例: - -```python -# A person has a first name, a last name and an age. -schema Person: - firstName: str - lastName: str - # The default value of age is 0 - age: int = 0 -``` - -在 KCL 中, 我们可以使用类型注解在 schema 中定义一些属性,每个属性都可以设置一个可选的默认值(比如上述代码中的 `age` 属性,它的默认值是 `0`),没有设置默认值的属性的初始值为 `Undefined`, 它们不会在 YAML 当中进行输出。 - -###### 不可变性 - -schema 中属性的不可变性遵循和全局变量不可变性一样的规则,只有 schema 中的可变属性可以在 schema 中修改。此外,schema 的属性默认值可被 schema 配置值修改: - -```python -schema Person: - age: int = 1 # Immutable attribute - _name: str = "Alice" # Mutable attribute - - age = 10 # Error, can't change the default value of the attribute `age` in the schema context. - _name = "Bob" # Ok - -person = Person { - age = 3 # Ok, can change the default value of the attribute `age` in the schema config. -} -``` - -###### 可选属性 - -schema 实例中每个属性 **必须** 赋值一个非 `None`/`Undefined` 的值,否则编译器会抛出错误,除非它被 `?` 符号标记为可选属性。 - -示例: - -```python -schema Employee: - bankCard: int # bankCard is a required attribute, and it can NOT be None or Undefined - nationality?: str # nationality is an optional attribute, and it can be None or Undefined - -employee = Employee { - bankCard = None # Error, attribute 'bankCard' of Employee is required and can't be None or Undefined - nationality = None # Ok -} -``` - -##### 顺序无关计算 - -schema 中顺序无关计算表示 schema 内部属性之间的引用关系。例如,当我们声明一个形式为 `a = b + 1` 的表达式时,`a` 值的计算依赖于 `b` 值的计算。当编译器计算 `a` 的值并且 `a` 的值取决于 `b` 的值时,编译器会选择先计算 `b` 的值,然后根据 `b` 的值计算 a 的值表达式 `a = b + 1`,这与传统过程语言的计算方法略有不同。 - -由于 schema 中值的计算是基于依赖关系的,就像有向无环图按照拓扑排序的顺序遍历图中的每个节点一样, schema 中属性的声明顺序并不那么重要,因此特征称为顺序无关计算。 - -请注意,不同 schema 属性值之间不能有循环引用。 - -我们可以通过下面的例子看到这个特性。 - -```python -schema Fib: - n1: int = n - 1 # Refers to the attribute `n` declared after `n1` - n2: int = n1 - 1 - n: int - value: int = 1 if n <= 2 else Fib {n = n1}.value + Fib {n = n2}.value - -fib8 = Fib {n = 8}.value -``` - -结果为: - -```yaml -fib8: 21 -``` - -在 schema 中,我们只需要简单的指定属性之间的依赖关系,编译器就会根据依赖关系自动计算出值,这样可以帮助我们节省大量的样板代码,减少配置编写难度。 - -##### Schema 上下文 - -我们可以定义 schema 的上下文来管理 schema 的属性,可以直接在 schema 中编写 schema 参数、临时变量和表达式等: - -```python -schema Person[_name: str]: # define a schema argument - name: str = _name # define a schema attribute - age: int = 10 # define a schema attribute with default value - hands: [int] = [i for i in [1, 2, 3]] # define a for statement -``` - -然后,我们可以通过如下代码实例化一个 `Person` 并将其赋值给 `alice` 变量: - -```python -alice = Person("alice") -``` - -可以得到如下 YAML 输出: - -```yaml -alice: - name: alice - age: 10 - hands: - - 1 - - 2 - - 3 -``` - -##### 校验 - -KCL 中为了确保代码稳定性,除了使用 **静态类型** (类型注解) 和 **不可变性**,还支持在 **check** 块中定义验证规则 (KCL 几乎原生支持所有 [OpenAPI](https://www.openapis.org/) 的验证能力): - -```python -import regex - -schema Sample: - foo: str - bar: int - fooList: [str] - - check: - bar > 0 # Minimum, also support the exclusive case - bar < 100 # Maximum, also support the exclusive case - len(fooList) > 0 # Min length, also support exclusive case - len(fooList) < 100 # Max length, also support exclusive case - regex.match(foo, "^The.*Foo$") # Regex match - isunique(fooList) # Unique - bar in range(100) # Range - bar in [2, 4, 6, 8] # Enum - multiplyof(bar, 2) # MultipleOf -``` - -使用 schema, 所有的实例将在编译时验证: - -```python -# Ok -goodSample = Sample { - foo = "The Foo" - bar = 2 - fooList = ["foo0", "foo1"] -} - -# Error: validation failure: Check failed on check conditions: bar < 100. -badSample = Sample { - foo = "The Foo" - bar = 123 - fooList = ["foo0", "foo1"] -} -``` - -此外,我们可以使用 **and**, **or**, **if** 来构建更复杂的检查逻辑: - -```python -schema Sample: - bar: int - foo: str - doCheck: bool - - check: - regex.match(foo, "^The.*Foo$") and bar in [2, 4, 6, 8] if doCheck -``` - -为了确保所有检查规则都能很好地发挥其相应的作用,我们可以通过编写 KCL 测试用例来测试不同数据组合的合理性和正确性,并通过 kcl test tool 运行所有测试用例。 - -##### 文档 - -通常在我们写好 schema 模型之后,我们会为 schema 写文档注释,可以用一个三引号字符串来完成,如下所示: - -```python -schema Server: - """Server is the common user interface for long-running - services adopting the best practice of Kubernetes. - - Attributes - ---------- - workloadType : str, default is Deployment - Use this attribute to specify which kind of long-running service you want. - Valid values: Deployment, CafeDeployment. - See also: kusion_models/core/v1/workload_metadata.k. - name : str, default is None - A Server-level attribute. - The name of the long-running service. - See also: kusion_models/core/v1/metadata.k. - labels : {str:str}, optional, default is None - A Server-level attribute. - The labels of the long-running service. - See also: kusion_models/core/v1/metadata.k. - - Examples - ---------------------- - myCustomApp = AppConfiguration { - name = "componentName" - } - """ - workloadType: str = "Deployment" - name: str - labels?: {str:str} -``` - -更多详细内容可见 Doc tools。 - -##### 配置 - -假设我们有如下 schema 定义: - -```python -schema Person: - firstName: str - lastName: str - labels?: {str:str} -``` - -可以用类 JSON 的表达式定义配置: - -```python -person = Person { - firstName = "firstName" - lastName = "lastName" -} -``` - -schema 遵循严格的属性定义,配置未定义的属性将触发编译错误: - -```python -person = Person { - firstName = "firstName" - lastName = "lastName" - fullName = "fullName" # Error: Cannot add member 'fullName' to schema 'Person', 'fullName' is not defined in schema 'Person' -} -``` - -此外,我们可以使用 `if` 表达式将元素动态的添加到 schema 实例中,将满足条件的元素添加到 schema 实例并忽略不满足条件的元素。并且除了使用一个 schema 类型实例化一个 schema,我们也可以通过 schema 实例得到一个新的实例。 - -```python -env = "prod" -person = Person { - firstName = "firstName" - lastName = "lastName" - if env == "prod": - labels.env = env - else: - labels.env = "other" -} -# We can use the person instance to get a new instance named `personx` directly. -personx = person { - firstName = "NewFirstName" -} -``` - -输出为: - -```yaml -env: prod -person: - firstName: firstName - lastName: lastName - labels: - env: prod -personx: - firstName: NewFirstName - lastName: lastName - labels: - env: prod -``` - -#### 高级功能 - -##### Protocol & Mixin - -除了 schema, 在 KCL 中还提供了一种额外的类型定义方式 `protocol`,它的性质如下: - -- 在 protocol 中,只能定义属性及其类型,不能书写复杂的逻辑与 check 表达式,也不能使用 mixin。 -- protocol 只能对非 `_` 开头的属性进行约束。 -- protocol 只能继承自或者引用 protocol, 不能继承自或者引用 schema。 - -此外,我们可以使用可选的 **mixin** 组装复杂的 schema,并使用 **protocol** 为 **mixin** 添加可选的宿主类型, 使用 `for` 关键字为 **mixin** 定义宿主类型,并且在 mixin 内部它将从宿主类型中查询到属性的类型。 - -```python -schema Person: - mixin [FullNameMixin] - - firstName: str # Required - lastName: str # Required - fullName?: str # Optional -``` - -FullNameMixin 是一个产生 fullName 字段的简单例子: - -```python -protocol PersonProtocol: - firstName: str - lastName: str - fullName?: str - -mixin FullNameMixin for PersonProtocol: - fullName = "{} {}".format(firstName, lastName) -``` - -然后我们可以通过一下方式获取 schema 实例: - -```python -person = Person { - firstName = "John" - lastName = "Doe" -} -``` - -输出结果为: - -```yaml -person: - firstName: John - lastName: Doe - fullName: John Doe -``` - -请注意,宿主类型 **protocol** 只能用于 **mixin** 的定义 (后缀名为 `Mixin`), 否则将会报错。 - -```python -protocol DataProtocol: - data: str - -schema Data for DataProtocol: # Error: only schema mixin can inherit from protocol - x: str = data -``` - -##### 索引签名 - -在 KCL schema 中可以定义索引签名,这意味着索引签名的键-值约束可用于构造具有 schema 类型的字典。或者可以将其他检查添加到额外的 schema 属性中,以增强 KCL 类型和语义检查。 - -###### 基本用法 - -使用 `[{attr_alias}: {key_type}]: {value_type}` 的形式去定义 schema 的类型注解, 其中 `{attr_alias}` 可以省略。 - -```python -schema Map: - """ - Map is a schema with a key of str type and a value of str type - """ - [str]: str # `{attr_alias}` can be omitted. - -data = Map { - key1 = "value1" - key2 = "value2" -} -``` - -###### 同时定义属性和索引签名 - -可以在 schema 中同时定义 schema 属性和索引签名,通常用于表示 schema 中额外属性的类型约束,比如如下代码 - -```python -schema Person: - name: str - age: int - [...str]: str # Except for the `name` and `age` attributes, the key type of all other attributes of the schema must be `str`, and the value type must also be `str`. -``` - -###### 定义索引签名别名 - -可以为索引签名定义类型注解的属性别名,并将其与检查块一起使用。 - -```python -schema Data: - [dataName: str]: str - check: - dataName in ["Alice", "Bob", "John"] # We can use the index signature key name in the check block. - -data = Data { - Alice = "10" - Bob = "12" - Jonn = "8" # Error: Jonn not in ["Alice", "Bob", "John"] -} -``` - -##### 继承 - -类似于其他面向对象语言,KCL 提供了基础且有限的面向对象支持,例如 **属性复用**,**私有和公有变量**和**单继承**。KCL 不支持多继承。 - -以下是单继承的例子: - -```python -# A person has a first name, a last name and an age. -schema Person: - firstName: str - lastName: str - # The default value of age is 0 - age: int = 0 - -# An employee **is** a person, and has some additional information. -schema Employee(Person): - bankCard: int - nationality?: str - -employee = Employee { - firstName = "Bob" - lastName = "Green" - age = 18 - bankCard = 123456 -} -``` - -结果为: - -```yaml -employee: - firstName: Bob - lastName: Green - age: 18 - bankCard: 123456 - nationality: null -``` - -请注意,KCL 只支持 schema 的 **单继承**。 - -此外,当 schema 存在继承关系时,可选属性的性质如下: - -- 如果该属性在基类 schema 中是可选的,则它在子类 schema 中是可选的,也可以是子类 schema 中必选的。 -- 如果该属性在基类 schema 中是必选的,则它在子类 schema 中也是必选的。 - -```python -schema Person: - bankCard?: int - nationality: str - -schema Employee(Person): - bankCard: int # Valid, both `bankCard: int` and `bankCard?: int` are allowed - nationality?: str # Error, only `nationality: str` is allowed -``` - -##### Schema 函数 - -schema 映射到函数上非常好用;它可以有任意数量的输入和输出参数。 例如,Fibonacci 函数可以使用递归 schema 如下编写: - -```python -schema Fib[n: int]: - n1 = n - 1 - n2 = n - 2 - if n == 0: - value = 0 - elif n == 1: - value = 1 - else: - value = Fib(n1).value + Fib(n2).value - -fib8 = Fib(8).value # 21 -``` - -##### 装饰器 - -像 Python 一样, KCL 支持在 schema 上使用装饰器。KCL 装饰器动态地改变 schema 的功能,而不必直接使用子 schema 或更改被装饰的 schema 的源代码。 和函数调用一样,装饰器支持传入额外的参数。 - -内置的 schema 装饰器: - -- `@deprecated` - 标识 schema 或 schema 属性被废弃. `@deprecated` 装饰器支持三种参数: - - **version** - 字符串类型,表示版本信息。 默认值为空。 - - **reason** - 字符串类型,表示不推荐使用的原因。 默认值为空。 - - **strict** - bool 类型,表示是报错还是警告。 默认值是 true。 如果 `strict` 为 `True` 并且抛出错误,程序将被中断。 如果 `strict` 为 `False`,则会输出警告并且不会中断程序。 - -示例: - -```python -@deprecated -schema ObsoleteSchema: - attr: str - -schema Person: - firstName: str = "John" - lastName: str - @deprecated(version="1.16", reason="use firstName and lastName instead", strict=True) - name: str - attrs: ObsoleteSchema = { # Error: ObsoleteSchema was deprecated - attr = "value" - } - -JohnDoe = Person { # Error: name was deprecated since version 1.16, use firstName and lastName instead - name = "deprecated" -} -``` - -- `@info` - 给 schema 或 schema 属性标识额外的信息,支持任意参数,用于语言静态分析 schema 或 schema 属性的扩展标记信息 - -示例: - -```python -@info(version="v1") -schema Person: - @info(message="name") - name: str - age: int -``` - -请注意,当前版本的 KCL 尚不支持用户自己定义装饰器。 - -##### 成员函数 - -内置函数和 schema 成员 - -- instances() - 返回 schema 的现有实例列表。 - -```python -schema Person: - name: str - age: int - -alice = Person { - name = "Alice" - age = 18 -} - -bob = Person { - name = "Bob" - age = 10 -} - -aliceAndBob = Person.instances() # Person is a schema type, instances() is its member method -``` - -输出为: - -```yaml -alice: - name: Alice - age: 18 -bob: - name: Bob - age: 10 -aliceAndBob: -- name: Alice - age: 18 -- name: Bob - age: 10 -``` - -### 配置操作 - -#### 配置合并 - -##### | 运算符 - -在 KCL 中,我们可以使用合并运算符 `|` 来合并配置。union 运算符支持的类型包括如下: - -``` -SchemaInstance | SchemaInstance -SchemaInstance | Dict -Dict | Dict -List | List -``` - -合并集合和结构化数据: - -- 合并 List。使用 `|` 运算符右边的列表表达式按照**索引**逐一覆盖左边列表表达式中的元素。 - -```python -_a = [1, 2, 3] -_b = [4, 5, 6, 7] -x = _a | _b # [4, 5, 6, 7] 1 -> 4; 2 -> 5; 3 -> 6; Undefined -> 7 -``` - -合并特定索引或所有元素仍在讨论中。 - -- 合并 Dict. 使用 `|` 运算符右边的列表表达式按照**键**逐一覆盖左边列表表达式中的元素。 - -```python -_a = {key1 = "value1"} -_b = {key1 = "overwrite", key2 = "value2"} -x = _a | _b # {"key1": "overwrite", "key2": "value2"} -``` - -集合和 schema 的合并是一个新的集合,其属性是将 b 合并到 a,保留从左到右的操作数顺序。 - -- 合并 schema。Schema 的合并与 dict 相似。 - -Schema 的合并操作如下: - -```python -schema Person: - firstName?: str - lastName?: str - -_a = Person { - firstName = "John" -} -_b = {lastName = "Doe"} -_c = _a | _b # {"firstName": "John", "lastName": "Doe"} -_d = _a | None # {"firstName": "John"} -_e = _a | Undefined # {"firstName": "John"} -_f = None | _a # {"firstName": "John"} -_g = Undefined | _a # {"firstName": "John"} -``` - -请注意,当 union 运算符的左右操作数之一为 None 时,将立即返回另一个操作数。 - -```python -data1 = {key = "value"} | None # {"key": "value"} -data2 = None | [1, 2, 3] # [1, 2, 3] -data3 = None | None # None -``` - -输出如下: - -```yaml -data1: - key: value -data2: -- 1 -- 2 -- 3 -data3: null -``` - -##### : 运算符 - -模式: `identifier : E` - -表达式 `E` 的值将被合并到元素值。 - -示例: - -```python -schema Data: - labels: {str:} = {key1 = "value1"} - -data = Data { - # union {key2: "value2"} into the attribute labels of the schema Data. - labels: {key2 = "value2"} -} -``` - -输出: - -```yaml -data: - labels: - key1: value1 - key2: value2 -``` - -除了在 schema 属性上使用属性运算符之外,还可以使用属性运算符对配置块执行不同的操作。 - -- schema 外部使用合并运算符 `:` - -```python -schema Data: - d1?: int - d2?: int - -schema Config: - data: Data - -# This is one configuration that will be merged. -config: Config { - data.d1 = 1 -} -# This is another configuration that will be merged. -config: Config { - data.d2 = 2 -} -``` - -与它等效的配置代码可以表示为: - -```python -schema Data: - d1?: int - d2?: int - -schema Config: - data: Data - -config: Config { - data.d1 = 1 - data.d2 = 1 -} -``` - -输出结果为: - -```yaml -config: - data: - d1: 1 - d2: 1 -``` - -- schema 内部使用合并运算符 `:` - -```python -schema Data: - d1?: int - d2?: int - -schema Config: - # This is one configuration that will be merged. - data: Data { - d1 = 1 - } - # This is another configuration that will be merged. - data: Data { - d2 = 1 - } - -config: Config {} -``` - -#### 配置覆盖 - -##### = 运算符 - -模式: `identifier = E` - -表达式 `E` 的值将覆盖元素值。 - -示例: - -```python -schema Data: - labels: {str:} = {key1 = "value1"} - -data = Data { - # override {key2: "value2"} into the attribute labels of the schema Data. - labels = {key2 = "value2"} -} -``` - -输出: - -```yaml -data: - labels: - key2: value2 -``` - -请注意,可以使用 `Undefined` 来覆盖,来“删除”内容。例如 `{ a = Undefined }`。 - -#### 配置添加 - -##### += 运算符 - -模式: `identifier += E` - -插入只能用于列表类型的 `identifier`. - -`E` 将插入到列表 `identifier` 指定索引后,并且索引以后的属性将自动后移。 - -示例: - -```python -schema Data: - labels: {str:} = {key1 = [0]} - -data = Data { - # insert [1] into the attribute labels.key1 of the schema Data. - labels: {key1 += [1]} -} -``` - -输出: - -```yaml -data: - labels: - key1: - - 0 - - 1 -``` - -如果没有定义索引,将使用最后一个索引。 - -#### 注意事项 - -合并运算符 `:` 是一个可交换的幂等运算符,当合并的值发生值的冲突时会发生值冲突错误,因此我们需要 `=` 和 `+=` 运算符表示配置的覆盖,添加和删除操作。 - -```python -data0 = {id: 1} | {id: 2} # Error:conflicting values between {'id': 2} and {'id': 1} -data1 = {id: 1} | {id = 2} # Ok, the value of `data` is {"id": 2} -``` - -`:` 运算符冲突检查的规则如下: - -- `None` 和 `Undefined` 不与任何值冲突 - -```python -data0 = None | {id: 1} # Ok -``` - -- 对于 `int`、`float`、`str` 和 `bool` 类型的变量,当它们的值不相同时发生冲突错误。 - -```python -data0 = 1 | 1 # Ok -data1 = 1 | "s" # Error -``` - -- 对于列表类型 - - 当它们的长度不相同时,它们被认为是冲突的 - - 当它们的长度相同时,当且仅当它们的任意一个子元素值冲突时,它们自身是冲突的 - -```python -data0 = [1] | [1] # Ok -data1 = [1, 2] | [1] # Error -``` - -- 对于 dict/schema 类型 - - 对于相同的 key,key 的值冲突时,它们自身是冲突的,否则是不冲突的 - -```python -data0 = {id: 1} | {id: 1} # Ok -data1 = {id: 1} | {id: 2} # Error -data1 = {id: 1} | {idAnother: 1} # Ok -``` - -### Rule - -KCL 支持使用 rule 关键字定义校验块,可用于数据校验,用法类似于 schema 中的 check 表达式。 - -```python -rule SomeRule: - age > 0, "rule check failure message" -``` - -可以像 schema 实例化那样调用一个 rule 进行校验 - -```python -age = 0 -name = "Bob" -rule SomeRule: - age > 0, "rule check failure message" - name == "Alice" - -rule1 = SomeRule() # Rule call -rule2 = SomeRule {} -``` - -可以使用 protocol 和 for 绑定语句为 rule 增加类型约束: - -```python -# Schema definition -protocol Service: - clusterIp: str - $type: str - -# Schema definition -protocol Volume: - mountPath: [str] - -# Protocol -protocol SomeProtocol: - id: int - env: {str: any} - services: [Service] - volumes: [Volume] - -rule SomeChecker for SomeProtocol: - id > 0, "id must >0" - - all service in services { - service.clusterIP == "NONE" if service.type == "ClusterIP" - } - - any volume in volumes { - volume.mountPath in ["/home/admin", "/home/myapp"] - } - -# Call rule to check with config parameter -SomeChecker { - id = 1 - env = { - MY_ENV = "MY_ENV_VALUE" - } - services = [ - { - type = "ClusterIP" - clusterIP = "NONE" - } - ] - volumes = [ - { - mountPath = "/home/admin" - } - { - mountPath = "/home/myapp" - } - ] -} -``` - -请注意,`protocol` 和 `rule` 的组合方式可以使属性和其约束定义进行分离,我们可以在不同的包中定义不同的 `rule` 和 `protocol` 按需进行组合,这与 schema 中的 check 表达式只能与 schema 属性定义在一起是不同的。 - -此外,有两种复用不同 rule 的方式 - -- 直接调用 - -```python -weather = "sunny" -day = "wednesday" - -rule IsSunny: - weather == "sunny" - -rule IsWednesday: - day == "wednesday" - -rule Main: - IsSunny() # Rule inline call - IsWednesday() # Rule inline call - -Main() # Rule call -``` - -使用 rule 的继承 (rule 不同于 schema, 可以多继承混用) - -```python -weather = "sunny" -day = "wednesday" - -rule IsSunny: - weather == "sunny" - -rule IsWednesday: - day == "wednesday" - -rule Main(IsSunny, IsWednesday): - id == 1 - -Main() -``` - -可以使用 option 函数与命令行 `-D` 参数获得外部数据进行校验 - -- 一个简单例子 - -```python -schema Day: - day: str - homework: str - -days: [Day] = option("days") - -rule Main: - filter d in days { - d.day not in ["saturday", "sunday"] and d.homework - } - -Main() -``` - -- 一个复杂例子 - -```python -data = option("data") -input = option("input") - -rule Allow: - UserIsAdmin() - any grant in UserIsGranted() { - input.action == grant.action and input.type == grant.type - } - -rule UserIsAdmin: - any user in data.user_roles[input.user] { - user == "admin" - } - -rule UserIsGranted: - [ - grant - for role in data.user_roles[input.user] - for grant in data.role_grants[role] - ] - -allow = Allow() or False -``` - -### 模块 - -KCL 配置文件以 **模块** 形式组织。 单个 KCL 文件被认为是一个 module,一个目录被认为是一个包。 - -同一个包内的模块是可见的,跨包引用需要通过导入可见。 - -``` -. -└── root - ├── model - │ ├── model1.k - | ├── model2.k - │ └── main.k - ├── service - │ └── service1.k - └── mixin - └── mixin1.k -``` - -model1.k: - -```python -# schema CatalogItem in model1.k - -schema CatalogItem: - id: int - image: CatalogItemImage # CatalogItemImage is defined in the module of the same package e.g., model2.k in package model - title: str -``` - -service1.k: - -```python -import ..model as model # cross-package references - -schema ImageService: - image: model.CatalogItemImage # CatalogItemImage is imported from another package e.g., model2.k in package model - name: str -``` - -#### 相对路径引用 - -我们可以使用 `.` 运算符来实现 KCL 入口文件的相对路径导入。 - -main.k: - -```python -import .model1 # Current directory module -import ..service # Parent directory -import ...root # Parent of parent directory - -s = service.ImageService {} -m = root.Schema {} -``` - -#### 绝对路径引用 - -`import a.b.c.d` 的语义为: - -1. 从当前目录寻找路径 `./a/b/c/d`。 -2. 如果当前目录寻找失败,从根目录寻找 `ROOT_PATH/a/b/c/d` - -根路径 `ROOT_PATH` 的定义为: - -从当前目录或者父级目录中查找 `kcl.mod` 文件对应的目录。 - -``` -. -└── root - ├── kcl.mod - ├── model - │ ├── model1.k - | ├── model2.k - │ └── main.k - ├── service - │ └── service1.k - └── mixin - └── mixin1.k -``` - -main.k: - -```python -import service # `root package` and `kcl.mod` are in the same directory -import mixin # `root package` and `kcl.mod` are in the same directory - -myModel = model.CatalogItem {} -``` - -请注意,对于 KCL 入口文件 `main.k`,不能导入所在文件夹,否则会出现递归导入错误: - -```python -import model # Error: recursively loading -``` - -### 动态参数 - -假设某些字段需要像用户输入一样动态传入,我们可以在模块中定义一个动态参数: - -```python -bankCard = option("bankCard") # Get bankCard through the option function. -``` - -我们可以如下使用 module: - -``` -kcl -DbankCard=123 employee.k -``` - -目前,支持顶级参数的类型有数字、字符串、布尔、列表和字典。 - -``` -kcl main.k -D list_key='[1,2,3]' -D dict_key='{"key":"value"}' -``` - -请注意,命令行中引号 `"` 等符号需要使用 `\` 进行转义 - -#### Setting 文件形式的参数 - -此外,它还支持输入一个 YAML 文件作为顶级参数。 - -```yaml -kcl_options: - - key: key_number - value: 1 - - key: key_dict - value: - innerDictKey: innerDictValue - - key: key_list - value: - - 1 - - 2 - - 3 - - key: bankCard - value: 123 -``` - -``` -kcl -Y setting.yaml employee.k -``` - -此外,setting 文件还支持配置命令行编译参数如下: - -```yaml -kcl_cli_configs: - files: - - file1.k - - file2.k - disable_none: true - strict_range_check: true - debug: 1 - verbose: 1 - output: ./stdout.golden -kcl_options: - - key: image - value: docker.io/kusion:latest -``` - -KCL CLI -Y 参数还支持多文件配置,并支持编译参数和顶级参数的单独写入与合并。 - -``` -kcl -Y compile_setting.yaml option_setting.yaml -``` - -- `compile_setting.yaml` - -```yaml -kcl_cli_configs: - files: - - file1.k - - file2.k - disable_none: true - strict_range_check: true - debug: 1 - verbose: 1 - output: ./stdout.golden -``` - -- `option_setting.yaml` - -```yaml -kcl_options: - - key: image - value: docker.io/kusion:latest -``` - -我们可以使用以下指令获取每个参数的含义 - -``` -kcl --help -``` - -#### Option Functions - -我们可以在 KCL 代码中使用 `option` 获取顶级参数。 - -```python -value = option(key="key", type='str', default="default_value", required=True, help="Set key value") -``` - -参数 - -- **key**: 参数的键。 -- **type**: 要转换的参数类型。 -- **default**: 参数默认值。 -- **required**: 当未提供参数且参数的 required 为 True 是报告错误。 -- **help**: 帮助信息。 - -### 多文件编译 - -除了上面的 KCL 单文件执行之外,我们还可以使用以下命令同时编译多个 KCL 入口文件: - -``` -kcl main_1.k main_2.k ... main_n.k -``` - -main_1.k - -```python -a = 1 -b = 2 -``` - -main_2.k - -```python -c = 3 -d = 4 -``` - -输出结果为: - -```yaml -a: 1 -b: 2 -c: 3 -d: 4 -``` - -利用**多文件编译**,我们可以组合多个 KCL 文件,而无需使用 import 管理文件。 我们来看一个结合**多文件编译**和 **schema 实例**的例子。 - -model.k - -```python -schema Model: - name: str - labels?: {str:} - annotations?: {str:} - replicas: int - -_model1 = Model { - name = "model1" - labels.key1 = "value1" - labels.key2 = "value2" - annotations.key = "value" - replicas = 2 -} - -_model2 = Model { - name = "model2" - replicas = 3 -} -``` - -backend.k - -```python -import yaml - -schema Backend: - apiVersion: str = "v1" - kind: str = "Deployment" - metadata: {str:} - spec: {str:} = { - minReadySeconds = 0 - paused = False - progressDeadlineSeconds = 600 - replicas = 1 - revisionHistoryLimit = 10 - selector = {} - } - -_backends = [Backend { - metadata.name = model.name - metadata.labels = model.labels - metadata.annotations = model.annotations - spec.selector.matchLabels: model.labels - spec.replicas = model.replicas -} for model in Model.instances()] # Schema Model is defined in model.k -print("---\n".join([yaml.encode(_b, ignore_private=True) for _b in _backends])) -``` - -命令为: - -``` -kcl model.k backend.k -``` - -输出为: - -```yaml -apiVersion: v1 -kind: Deployment -metadata: - name: model1 - labels: - key1: value1 - key2: value2 - annotations: - key: value -spec: - minReadySeconds: 0 - paused: false - progressDeadlineSeconds: 600 - replicas: 2 - revisionHistoryLimit: 10 - selector: - matchLabels: - key1: value1 - key2: value2 ---- -apiVersion: v1 -kind: Deployment -metadata: - name: model2 -spec: - minReadySeconds: 0 - paused: false - progressDeadlineSeconds: 600 - replicas: 3 - revisionHistoryLimit: 10 - selector: {} -``` - -### KCL 变量查询 - -我们可以在 KCL CLI 使用 `-S|--path-selector` 参数从 KCL 模型中查询一个或多个值。 - -变量查询形式如下: - -`pkg:var.name` - -- 在包中按名称选择节点 `pkg` - -`pkg:var.{name1,name2}` - -- 在包中选择多个节点 `pkg` - -`pkg:var.*` - -- 选择包中给定级别的所有节点 `pkg` - -`pkg:var.[index]` - -- 选择包 `pkg` 中列表 `var` 由 `index` 索引的元素 - -请注意,KCL 变量通过包名和变量标识符 `pkg:identifier` 的组合来确保全局唯一性。 因此,我们需要同时指定 `pkg` 和 `identifier`。 省略参数 `pkg` 时,表示从当前路径的入口文件中查找变量。 - -#### 示例 - -Code structure: - -``` -. -├── kcl.mod -└── main.k - └── pkg - └── model.k -``` - -pkg/model.k: - -```python -schema Person: - name: str - age: int - -var = Person { - name = "Alice" - age = 18 -} -``` - -main.k - -```python -import pkg - -var = pkg.Person { - name = "Bob" - age = 10 -} -``` - -命令为: - -``` -kcl main.k -S pkg:var -S :var.name -``` - -输出结果为: - -```yaml -var: - name: Bob ---- -var: - name: Alice - age: 18 -``` - -### KCL 变量修改 - -除了变量查询,KCL 还允许我们通过 KCL CLI 的 `-O|--overrides` 参数直接修改配置模型中的值。 - -变量修改参数的使用与变量查询类似,参数包含三部分,如 `pkg`、`identifier`、`attribute` 和 `override_value` . - -``` -kcl main.k -O override_spec -``` - -- `override_spec`: 表示需要修改的配置模型字段和值的统一表示 - -``` -override_spec: [[pkgpath] ":"] identifier ("=" value | "-") -``` - -- `pkgpath`: 表示需要修改标识符的包路径,通常为 `a.b.c` 的形式,对于 main 包,`pkgpath` 表示为 `__main__`, 可省略,省略不写时表示 main 包 -- `identifier`: 表示需要修改配置的标识符,通常为 `a.b.c` 的形式 -- `value`: 表示需要修改配置的值,可以是任意合法的 KCL 表达式,比如数字/字符串字面值,list/dict/schema 表达式等 -- `=`: 表示修改identifier的值 - - 当 identifier 存在时,修改已有 identifier的值为 value - - 当 identifier 不存在时,添加 identifier属性,并将其值设置为 value -- `-`: 表示删除 identifier属性 - - 当 identifier 存在时,直接进行删除 - - 当 identifier 不存在时,对配置不作任何修改 - -请注意,当 `identifier` 出现多次时,修改/删除全部 `identifier` 的值 - -此外,在 KCL 中还提供了 API 用于变量查询和修改,详见 [API 文档](../xlang-api/go-api.md) - -#### 示例 - -##### 修改示例 - -KCL 代码: - -```python -schema Person: - name: str - age: int - -person = Person { - name = "Alice" - age = 18 -} -``` - -命令为: - -``` -kcl main.k -O :person.name=Bob -O :person.age=10 -``` - -输出结果为: - -```yaml -person: - name: Bob - age: 10 -``` - -此外,当我们使用 KCL CLI `-d` 参数时,KCL 文件将同时修改为以下内容 - -``` -kcl main.k -O :person.name=Bob -O :person.age=10 -d -``` - -```python -schema Person: - name: str - age: int - -person = Person { - name = "Bob" - age = 10 -} -``` - -##### 删除示例 - -KCL 代码: - -```python -schema Config: - x?: int = 1 - y?: str = "s" - -config = Config { - x = 2 -} -``` - -命令为: - -``` -kcl main.k -O config.x- -``` - -输出结果为: - -```yaml -config: - x: 1 - y: s -``` - -### KCL 工具 - -KCL 内部还内置了一些语言外设工具来辅助 KCL 的编写和测试,比如格式化工具、文档生成工具、测试工具、lint 工具、插件工具等,以下是各个工具的文档链接。 - -- [Format](/reference/cli/kcl/fmt.md) -- [Docgen](/reference/cli/kcl/docgen.md) -- [Test](/reference/cli/kcl/test.md) -- [Lint](/reference/cli/kcl/lint.md) -- [Plugin](../plugin/index.md) - -### 总结 - -本页总结了 KCL 语言中的常用功能。 KCL 作为一种新的语言,会根据配置场景的需求,逐步增加功能特性。 diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/model/_category_.json b/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/model/_category_.json deleted file mode 100644 index 71e8053a..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/model/_category_.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "label": "Module System", - "position": 2 -} diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/model/base64.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/model/base64.md deleted file mode 100644 index 6a9c5b93..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/model/base64.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -title: "base64" -linkTitle: "base64" -type: "docs" -description: base64 编码解码 -weight: 100 ---- -## encode - -`encode(value: str, encoding: str = "utf-8") -> str` - -Encode the string `value` using the codec registered for encoding. - -## decode - -`decode(value: str, encoding: str = "utf-8") -> str` - -Decode the string `value` using the codec registered for encoding. diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/model/builtin.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/model/builtin.md deleted file mode 100644 index 91b5bb9d..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/model/builtin.md +++ /dev/null @@ -1,396 +0,0 @@ ---- -title: "builtin" -sidebar_position: 1 ---- -KCL 提供了一个内置系统模块的列表,这些模块是自动加载的,无需提供任何模块名称即可直接使用。例如,`print` 就是一个广泛使用的内置模块提供的函数。 - -## 类型转换 - -KCL的 `bool`、`int`、`float`、`str`、`list`、`dict`等类型有内置同名的转换函数。其中 `int` 不仅仅可以用于截断浮点数,也可以用来将字符串转化为整数(解析时为10进制,也可以制定其他值)。 - -下面是类型相关函数常见的用法: - -```py -b1 = bool(1) # true -b2 = bool(1.5) # true -b3 = bool("true") # true -b4 = bool("") # false -b5 = bool([]) # false -b6 = bool({}) # false - -i1 = int("11") # 11 -i2 = int("11", base=8) # 9 -i3 = int("11", base=2) # 3 - -f1 = float(1) # 1.0 -f2 = float("1.5") # 1.5 - -s1 = str(1) # 1 - -l1 = list([1, 2, 3]) -``` - -## print - -`print(*args:any, end:str='\n')` - -内置的打印函数,提供不同类型的可变参数打印,默认在结尾添加一个换行符号。以下上常见的用法: - -```python -print("hello KCL") -print() -print(None, end=':') -print(None) -print(True) -print(False) -print(123) -print(123.0) -print('abc ${123}') -print("abc ${456}") -print([1,'a', True]) -print(1,'a', True) -print({}) -print({a: 123}) -``` - -输出格式如下: - -```shell -hello KCL - -None:None -True -False -123 -123.0 -abc 123 -abc 456 -[1, 'a', True] -1 a True -{} -{'a': 123} -``` - -如果不希望在默认换行时,可以通过 `end=''` 命名参数重新指定结尾的字符串。 - -## multiplyof - -`multiplyof(a:int, b:int) -> bool` - -判断整数 `a` 是否为 `b` 的整数倍,返回布尔值: - -```python -print(multiplyof(2, 1)) # True -print(multiplyof(1, 2)) # False -print(multiplyof(0, 1)) # True -print(multiplyof(0, 2)) # True -print(multiplyof(1, 0)) # Error -``` - -`0` 是任何数的倍数。但是 `b` 不能为 `0`,否则将抛出异常。 - -## isunique - -`isunique(list: [any]) -> bool` - -判断数组中是否存在重复的元素,返回布尔值: - -```python -print(isunique([])) # True -print(isunique([1])) # True -print(isunique([1, 2])) # True - -print(isunique([1, 1])) # False -print(isunique([1, 1.0])) # False -print(isunique([1.1, 1.1])) # False - -print(isunique(['abc', "abc"])) # False -print(isunique(['abc', "a${'bc'}"])) # False -``` - -需要注意的是整数和浮点数会忽略类型差异,根据值是否相等判断。 - -## len - -`len(x: str | [any] | {:}) -> int` - -返回字符串、列表和数组的长度: - -```python -print(len([])) # 0 -print(len({})) # 0 - -print(len([1])) # 1 -print(len({abc:123})) # 1 - -print("abc") # 3 -``` - -注:不支持对 `schema` 对象计算长度。 - -## abs - -`abs(x: number) -> number` - -计算 `x` 的绝对值。 - -## all_true - -`all_true(x:str|[]|{:}) -> bool` - -判断列表或字典类全部元素为真,用法如下: - -```python -print(all_true([])) # True -print(all_true({})) # True - -print(all_true([True])) # True -print(all_true([1])) # True - -print(all_true([True, False])) # False -print(all_true([True, None])) # False -``` - -当列表为空时返回真。 - - - -## any_true - -`any_true(x:str|[]|{:}) -> bool` - -判断可迭代对象中至少有一个元素为真,用法如下: - -```python -print(any_true([])) # False -print(any_true([1])) # True -``` - -## bin - -`bin(x:number) -> str` - -返回整数的二进制表示的字符串,用法如下: - -```python -print(bin(8)) # 0b1000 -``` - -## hex - -`hex(number)` - -返回整数的十六进制表示的字符串,用法如下: - -```python -print(hex(18)) # 0x12 -``` - -## oct - -`oct(number)` - -返回整数的八进制表示的字符串,用法如下: - -```python -print(oct(10)) # 0o12 -``` - -## option - -`option(key:str, type:str='', required=False, default=None, help="") -> any` - -获取命令行参数输入的值。 - -## ord - -`ord(c) -> int` - -获取字符的 Unicode 码点值,用法如下: - -```python -print(ord('A')) # 65 -print(ord('B')) # 66 -print(ord('C')) # 67 -``` - -## sorted - -`sorted(x: []) -> []` - -返回排序后的列表,用法如下: - -```python -_a = [] -_b = [2, 1] - -_c = sorted(_a) -_d = sorted(_b) - -print(_a) # [] -print(_b) # [2, 1] -print(_c) # [] -print(_d) # [1, 2] -``` - -## range - -`range(start:int, end:int, step=1) -> [int]` - -产生迭代列表,用法如下: - -```python -print(range(1,5)) # [1, 2, 3, 4] -print(range(1,5, 2)) # [1, 3] -print(range(5, 1, -1)) # [5, 4, 3, 2] -``` - -## min - -`min(x:[number]) -> number` - -返回列表中最小的元素,用法如下: - -```python -print(min([1,2])) # 1 -print(min([2,1])) # 1 -``` - - - -## max - -`max(x:[number]) -> number` - -返回列表中最大的元素,用法如下: - -```python -print(max([1,2])) # 2 -print(max([2,1])) # 2 -``` - -## sum - -`sum(x:[number], init_value=0) -> number` - -返回列表中全部元素的和,用法如下: - -``` -print(sum([1,2])) # 3 -print(sum([2,1], 1000)) # 1003 -``` - -## pow - -`pow(x: number, y: number, z: number = None) -> number` - - 计算 `x**y`,如果 `z` 非空则计算 `(x**y)%z`,支持整数和浮点数。 - -下面的常见的用法: - -```python -print(pow(2,3)) # 8 -print(pow(2, 3, 5)) # 8%5 == 3 - -print(pow(2, 0.5)) # 1.414 -``` - -## round - -`round(number: int|float, ndigits:int|None) -> float | int` - -返回 `number` 的四舍五入近似值。如果 `ndigits` 非 `None` 则返回浮点数并保留指定位数的小数(不能为负数),否则返回整数结构。 - -下面是常用的用法: - -```python -print(round(1)) # 1 -print(round(1.4)) # 1 -print(round(1.5)) # 2 - -print(round(1.5555, 1)) # 1.6 -print(round(1.5555, 2)) # 1.56 - -print(round(1.5555)) # 2 -print(round(1.5555, 0)) # 2.0 -``` - -需要注意的是,`ndigits` 为 `None` 和 `0` 的区别是前缀返回 `int` 类型、后者返回 `float` 类型。 - -## typeof - -`typeof(x: any, full_name: bool = False) -> str` - -输出 `x` 在运算时的类型。当 `full_name` 参数设置为 `True` 时,将返回 `pkg.schema` 形式的包前缀。 - -下面是常见的用法: - -```python -import sub as pkg - -_a = 1 - -t1 = typeof(_a) -t2 = typeof("abc") - -schema Person: - name?: any - -_x1 = Person{} -t3 = typeof(_x1) - -_x2 = pkg.Person{} -t4 = typeof(_x2) -t5 = typeof(_x2, full_name=True) - -t6 = typeof(_x1, full_name=True) - -# 输出 -# t1: int -# t2: str -# t3: Person -# t4: Person -# t5: sub.Person -# t6: __main__.Person -``` - -## zip - -`zip(*args: str|list|dict)` - -用于将可迭代的对象作为参数,将对象中对应的元素打包成一个个元组,然后返回由这些元组组成的列表。 - -下面是常见的用法: - -```py -a = zip([0, 1, 2], [3, 4, 5]) -b = zip([0, 1], [3, 4, 5]) -c = zip([0, 1, 2], [3, 4, 5, 6]) - -# 输出 -# a: -# - - 0 -# - 3 -# - - 1 -# - 4 -# - - 2 -# - 5 -# b: -# - - 0 -# - 3 -# - - 1 -# - 4 -# c: -# - - 0 -# - 3 -# - - 1 -# - 4 -# - - 2 -``` diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/model/crypto.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/model/crypto.md deleted file mode 100644 index 0d6fd00a..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/model/crypto.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -title: "crypto" -linkTitle: "crypto" -type: "docs" -description: crypto 包 - 提供 SHA 相关的哈希函数 -weight: 100 ---- -## md5 - -`md5(value: str, encoding: str = "utf-8") -> str` - -Encrypt the string `value` using `MD5` and the codec registered for encoding. - -## sha1 - -`sha1(value: str, encoding: str = "utf-8") -> str` - -Encrypt the string `value` using `SHA1` and the codec registered for encoding. - -## sha224 - -`sha224(value: str, encoding: str = "utf-8") -> str` - -Encrypt the string `value` using `SHA224` and the codec registered for encoding. - -## sha256 - -`sha256(value: str, encoding: str = "utf-8") -> str` - -Encrypt the string `value` using `SHA256` and the codec registered for encoding. - -## sha384 - -`sha384(value: str, encoding: str = "utf-8") -> str` - -Encrypt the string `value` using `SHA384` and the codec registered for encoding. - -## sha512 - -`sha512(value: str, encoding: str = "utf-8") -> str` - -Encrypt the string `value` using `SHA512` and the codec registered for encoding. diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/model/datetime.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/model/datetime.md deleted file mode 100644 index 53473664..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/model/datetime.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -title: "datetime" -linkTitle: "datetime" -type: "docs" -description: datetime 包 - 时间处理 -weight: 100 ---- -- datetime - - ticks() -> float - Return the current time in seconds since the Epoch. Fractions of a second may be present if the system clock provides them. - - date() -> str - Return the `%Y-%m-%d %H:%M:%S` format date. - - now() -> str - Return the local time. e.g. `'Sat Jun 06 16:26:11 1998'` - - today() -> str - Return the `%Y-%m-%d %H:%M:%S.%{ticks}` format date. - -## time - -`ticks() -> float` - -Return the current time in seconds since the Epoch. Fractions of a second may be present if the system clock provides them. - -## date - -`date() -> str` - -Return the `%Y-%m-%d %H:%M:%S` format date. - -## now - -`now() -> str` - -Return the local time. e.g. `'Sat Jun 06 16:26:11 1998'` - -## today - -`today() -> str` - -Return the `%Y-%m-%d %H:%M:%S.%{ticks}` format date. diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/model/index.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/model/index.md deleted file mode 100644 index 8f1d50c4..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/model/index.md +++ /dev/null @@ -1,3 +0,0 @@ -# Module System - -KCL provides engineering extensibility through system modules, user modules and plug-in modules. This section describes the basic concepts of system modules. Plugin modules refer to [plugin system](/docs/reference/lang/plugin). diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/model/json.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/model/json.md deleted file mode 100644 index 2954f2e7..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/model/json.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -title: "json" -linkTitle: "json" -type: "docs" -description: JSON 编码解码 -weight: 100 ---- -## encode - -``` -encode( - data: any, - sort_keys: bool = False, - indent: int = None, - ignore_private: bool = False, - ignore_none: bool = False -) -> str -``` - -Serialize a KCL object `data` to a JSON formatted str. - -## decode - -`decode(value: str) -> any` - -Deserialize `value` (a string instance containing a JSON document) to a KCL object. - -## dump_to_file - -``` -dump_to_file( - data: any, - filename: str, - ignore_private: bool = False, - ignore_none: bool = False -) -> None -``` - -Serialize a KCL object `data` to a JSON formatted str and write it into the file `filename`. diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/model/math.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/model/math.md deleted file mode 100644 index 4fa70b94..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/model/math.md +++ /dev/null @@ -1,101 +0,0 @@ ---- -title: "math" -linkTitle: "math" -type: "docs" -description: math 包 - 数学函数 -weight: 100 ---- -## ceil - -`ceil(x) -> int` - -Return the ceiling of x as an Integral. This is the smallest integer >= x. - -## factorial - -`factorial(x) -> int` - -Return x!. Raise a error if x is negative or non-integral. - -## floor - -`floor(x) -> int` - -Return the floor of x as an Integral. This is the largest integer <= x. - -## gcd - -`gcd(a: int, b: int) -> int` - -Return the greatest common divisor of x and y - -## isfinite - -`isfinite(x) -> bool` - -Return True if x is neither an infinity nor a NaN, and False otherwise. - -## isinf - -`isinf(x) -> bool` - -Return True if x is a positive or negative infinity, and False otherwise. - -## isnan - -`isnan(x) -> bool` - -Return True if x is a NaN (not a number), and False otherwise. - -## modf - -`modf(x) -> Listfloat, float]` - -Return the fractional and integer parts of x. Both results carry the sign of x and are floats. - -## exp - -`exp(x) -> float` - -Return e raised to the power of x. - -## expm1 - -`expm1(x) -> float` - -Return exp(x)-1. This function avoids the loss of precision involved in the direct evaluation of exp(x)-1 for small x. - -## log - -`log(x) -> float` - -Return the logarithm of x to the base e. - -## log1p - -`log1p(x) -> float` - -Return the natural logarithm of 1+x (base e). The result is computed in a way which is accurate for x near zero. - -## log2 - -`log2(x) -> float` -Return the base 2 logarithm of x. - -## log10 - -`log10(x) -> float` - -Return the base 10 logarithm of x. - -## pow - -`pow(x, y) -> float` - -Return x**y (x to the power of y). - -## sqrt - -`sqrt(x) -> float` - -Return the square root of x. diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/model/net.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/model/net.md deleted file mode 100644 index 361068b3..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/model/net.md +++ /dev/null @@ -1,102 +0,0 @@ ---- -title: "net" -linkTitle: "net" -type: "docs" -description: net 包 - 网络IP处理 -weight: 100 ---- -## split_host_port - -`split_host_port(ip_end_point: str) -> List[str]` - -Split the 'host' and 'port' from the ip end point. - -## join_host_port - -`join_host_port(host, port) -> str` - -Merge the 'host' and 'port'. - -## fqdn - -`fqdn(name: str = '') -> str` - -Return Fully Qualified Domain Name (FQDN). - -## parse_IP - -`parse_IP(ip) -> str` - -Parse 'ip' to a real IP address - -## to_IP4 - -`to_IP4(ip) -> str` - -Get the IP4 form of 'ip'. - -## to_IP16 - -`to_IP16(ip) -> int` - -Get the IP16 form of 'ip'. - -## IP_string - -`IP_string(ip: str | int) -> str` - -Get the IP string. - -## is_IPv4 - -`is_IPv4(ip: str) -> bool` - -Whether 'ip' is a IPv4 one. - -## is_IP - -`is_IP(ip: str) -> bool` - -Whether ip is a valid ip address. - -## is_loopback_IP - -`is_loopback_IP(ip: str) -> bool` - -Whether 'ip' is a loopback one. - -## is_multicast_IP - -`is_multicast_IP(ip: str) -> bool` - -Whether 'ip' is a multicast one. - -## is_interface_local_multicast_IP - -`is_interface_local_multicast_IP(ip: str) -> bool` - -Whether 'ip' is a interface, local and multicast one. - -## is_link_local_multicast_IP - -`is_link_local_multicast_IP(ip: str) -> bool` - -Whether 'ip' is a link local and multicast one. - -## is_link_local_unicast_IP - -`is_link_local_unicast_IP(ip: str) -> bool` - -Whether 'ip' is a link local and unicast one. - -## is_global_unicast_IP - -`is_global_unicast_IP(ip: str) -> bool` - -Whether 'ip' is a global and unicast one. - -## is_unspecified_IP - -`is_unspecified_IP(ip: str) -> bool` - -Whether 'ip' is a unspecified one. diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/model/overview.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/model/overview.md deleted file mode 100644 index bc126880..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/model/overview.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -sidebar_position: 0 ---- - -# Overview - -KCL 是面向配置的编程语言,通过内置模块、KCL 模块和插件模块提供工程化的扩展能力。 - -![](/img/docs/reference/lang/model/kcl-module.png) - -用户代码中不用导入直接使用 builtin 的函数(比如用 len 计算列表的长度、通过 typeof 获取值的类型等),而对于字符串等基础类型也提供了一些内置方法(比如转化字符串的大小写等方法)。对于相对复杂的通用工作则通过标准库提供,比如通过 import 导入 math 库就可以使用相关的数学函数,可以通过导入 regex 库使用正则表达式库。而针对 KCL 代码也可以组织为模块,比如 Konfig 大库中将基础设施和各种标准的应用抽象为模块供上层用户使用。此外还可以通过 Plugin 机制,采用 Python 为 KCL 开发插件,比如目前有 meta 插件可以通过网络查询中心配置信息,app-context 插件则可以用于获取当前应用的上下文信息从而简化代码的编写。 diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/model/regex.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/model/regex.md deleted file mode 100644 index 884b4f44..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/model/regex.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -title: "regex" -linkTitle: "regex" -type: "docs" -description: regex 包 - 正则表达式 -weight: 100 ---- -## replace - -`replace(string: str, pattern: str, replace: str, count=0) -> str` - -Return the string obtained by replacing the leftmost non-overlapping occurrences of the pattern in string by the replacement. - -## match - -`match(string: str, pattern: str) -> bool` - -Try to apply the pattern at the start of the string, returning a bool value True if any match was found, or False if no match was found. - -## compile - -`compile(pattern: str) -> bool` - -Compile a regular expression pattern, returning a bool value denoting whether the pattern is valid. - -## findall - -`findall(string: str, pattern: str) -> List[str]` - -Return a list of all non-overlapping matches in the string. - -## search - -`search(string: str, pattern: str) -> bool` - -Scan through string looking for a match to the pattern, returning a bool value True if any match was found, or False if no match was found. - -## split - -`split(string: str, pattern: str, maxsplit=0) -> List[str]` - -Scan through string looking for a match to the pattern, returning a Match object, or None if no match was found. diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/model/units.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/model/units.md deleted file mode 100644 index 4fe257ae..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/model/units.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -title: "units" -linkTitle: "units" -type: "docs" -description: units 包 - 单位处理 -weight: 100 ---- -## 单位的常量 - -- 定点数: `n`, `u`, `m`, `k`, `K`, `G`, `T` and `P`. -- 2 的幂: `Ki`, `Mi`, `Gi`, `Ti` and `Pi`. - -## 函数列表 - -- `to_n(num: int) -> str` - Int literal to string with `n` suffix -- `to_u(num: int) -> str` - Int literal to string with `u` suffix -- `to_m(num: int) -> str` - Int literal to string with `m` suffix -- `to_K(num: int) -> str` - Int literal to string with `K` suffix -- `to_M(num: int) -> str` - Int literal to string with `M` suffix -- `to_G(num: int) -> str` - Int literal to string with `G` suffix -- `to_T(num: int) -> str` - Int literal to string with `T` suffix -- `to_P(num: int) -> str` - Int literal to string with `P` suffix -- `to_Ki(num: int) -> str` - Int literal to string with `Ki` suffix -- `to_Mi(num: int) -> str` - Int literal to string with `Mi` suffix -- `to_Gi(num: int) -> str` - Int literal to string with `Gi` suffix -- `to_Ti(num: int) -> str` - Int literal to string with `Ti` suffix -- `to_Pi(num: int) -> str` - Int literal to string with `Pi` suffix diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/model/yaml.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/model/yaml.md deleted file mode 100644 index e2e1c601..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/model/yaml.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -title: "yaml" -linkTitle: "yaml" -type: "docs" -description: yaml 编码解码 -weight: 300 ---- -## encode - -``` -encode( - data: any, - sort_keys: bool = False, - ignore_private: bool = False, - ignore_none: bool = False -) -> str -``` - -Serialize a KCL object `data` to a YAML formatted str. - -## decode - -`decode(value: str) -> any` - -Deserialize `value` (a string instance containing a YAML document) to a KCL object. - -## dump_to_file - -``` -dump_to_file( - data: any, - filename: str, - ignore_private: bool = False, - ignore_none: bool = False -) -> None -``` - -Serialize a KCL object `data` to a YAML formatted str and write it into the file `filename`. diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/plugin/_category_.json b/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/plugin/_category_.json deleted file mode 100644 index e77dfb1c..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/plugin/_category_.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "label": "Plugin System", - "position": 4 -} diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/plugin/index.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/plugin/index.md deleted file mode 100644 index df299e93..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/plugin/index.md +++ /dev/null @@ -1 +0,0 @@ -# Plugin System diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/plugin/overview.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/plugin/overview.md deleted file mode 100644 index 3030bdd2..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/plugin/overview.md +++ /dev/null @@ -1,179 +0,0 @@ ---- -sidebar_position: 1 ---- - -# Introduction - -KCL 是声明式配置策略语言,对于不方便通过配置直接描述的复杂的业务逻辑可以通过通用的编程语言开发 KCL 插件对语言进行扩展。KCL 支持通过通用语言开发插件,KCL 程序导入插件中的函数。KCL 通过插件运行时和辅助的命令行工具提供插件支持。KCL 插件框架支持多种不同的通用语言开发插件,这里我们以 Python 为例简单说明插件的使用。 - -插件的 Git 仓库: [https://github.com/KusionStack/kcl-plugin](https://github.com/KusionStack/kcl-plugin) - -## 1. 你好插件 - -KCL 插件在 KCLVM 的 `plugins` 子目录(通常安装在 `$HOME/.kusion/kclvm/plugins` 目录),或者通过 `$KCL_PLUGINS_ROOT` 环境变量设置(环境变量优先级更高)。对于插件开发人员,插件都在 Git 仓库管理: [https://github.com/KusionStack/kcl-plugin](https://github.com/KusionStack/kcl-plugin) ,可以将插件仓库克隆到该目录进行开发。 - -输入 `kcl-plugin info` 命令查看查看插件目录(将其中的 `/Users/kcl_user` 替换成本地的 `$HOME` 路径): - -```shell -$ kcl-plugin info -# plugin_root: /Users/kcl_user/.kusion/kclvm/plugins -``` - -通过 `kcl-plugin list` 子命令查看插件列表: - -```shell -$ kcl-plugin list -hello: hello doc - 0.0.1 -``` - -其中 `hello` 是 KCL 内置的示例插件(不要修改改插件)。 - -在 KCL 代码中,可以通过 `kcl_plugin.hello` 导入 `hello` 插件。`main.k` 代码如下: - -```python -import kcl_plugin.hello - -name = "kcl" -three = hello.add(1,2) -``` - -输出结果如下: - -```shell -$ kcl main.k -name: kcl -three: 3 -``` - -## 2. `kcl-plugin` 辅助命令 - -`kcl-plugin` 是提供的插件辅助工具,命令行帮助如下: - -```shell -$ kcl-plugin -usage: kcl-plugin [-h] {list,info,init,gendoc,test} ... -positional arguments: - {list,info,init,gendoc,test} - kcl plugin sub commands - list list all plugins - info show plugin document - init init a new plugin - gendoc gen all plugins document - test test plugin -optional arguments: - -h, --help show this help message and exit -``` - -其中 `list` 子命令用于查看插件列表;`info` 用户查看插件目录和每个插件的信息;`init` 可以用户初始化新插件;`gendoc` 更新全部插件的 API 文档;`test` 测试指定的插件。 - -## 3. 插件信息和文档 - -输入 `kcl-plugin info hello` 查看 `hello` 插件信息: - -```shell -$ kcl-plugin info hello -{ - "name": "hello", - "describe": "hello doc", - "long_describe": "long describe", - "version": "0.0.1", - "method": { - "add": "add two numbers, and return result", - "foo": "no doc", - "list_append": "no doc", - "say_hello": "no doc", - "tolower": "no doc", - "update_dict": "no doc" - } -} -``` - -插件的信息主要包含插件的名字和版本信息,插件提供的函数信息。该信息和插件目录中自动生成的 `api.md` 文件是一致的(插件 API 变化时通过 `kcl-plugin gendoc` 为全部的插件重新生成 `api.md` 文件)。 - -## 4. 插件的目录结构 - -插件的目录结构如下(将其中的 `/Users/kcl_user` 替换成本地的 `$HOME` 路径): - -```shell -$ tree /Users/kcl_user/.kusion/kclvm/plugins/ -/Users/kcl_user/.kusion/kclvm/plugins/ -├── _examples -├── _test -└── hello - ├── api.md - ├── plugin.py - └── plugin_test.py -$ -``` - -其中 `_examples` 目录下是插件的示例代码,`_test` 目录下是插件的 KCL 测试代码,其他以字母开头的目录是普通的插件(目录中同时包含 `plugin.py` 和 `plugin_test.py` 文件)。 - -KCL 的插件是有一个独立的纯 Python 代码文件实现,并且插件相互之间不能直接调用。插件的内容如下: - -```shell -$ cat ./hello/plugin.py -# Copyright 2020 The KCL Authors. All rights reserved. -INFO = { - 'name': 'hello', - 'describe': 'hello doc', - 'long_describe': 'long describe', - 'version': '0.0.1', -} -def add(a: int, b: int) -> int: - """add two numbers, and return result""" - return a + b -... -``` - -其中 `INFO` 指明了插件的名字、概要说明、详细说明和版本信息。而所有名字以字母开头的函数是插件给 KCL 提供的函数,因此 KCL 中可以直接调用 `add` 函数。 - -## 5. 创建一个插件 - -通过 `kcl-plugin init` 命令可以创建一个插件示例: - -``` -$ kcl-plugin init hi -$ kcl-plugin list -hello: hello doc - 0.0.1 -hi: hi doc - 0.0.1 -``` - -`kcl-plugin init` 命令会以内置的模板构造一个新的插件,然后通过 `kcl-plugin list` 命令可以查看到新创建的插件。 - -## 6. 插件的删除 - -KCL 插件在 KCLVM 的 `plugins` 子目录(通常安装在 `$HOME/.kusion/kclvm/plugins` 目录)。 -可以通过命令 `kcl-plugin info` 查询插件安装目录。 - -```shell -$ kcl-plugin info -/Users/kcl_user/.kusion/kclvm/plugins/ -$ tree /Users/kcl_user/.kusion/kclvm/plugins/ -/Users/kcl_user/.kusion/kclvm/plugins/ -├── _examples -├── _test -└── hello -- 删除这个目录就可以删除 hello plugin - ├── api.md - ├── plugin.py - └── plugin_test.py -$ -``` - -## 7. 插件的测试 - -插件是独立的纯 Python 文件实现,插件目录下有个 `plugin_test.py` 文件是插件的单元测试文件(基于 pytest 测试框架)。此外在 `_test` 目录下放置的是 KCL 文件的插件集成测试。`plugin_test.py` 单元测试是必须的,`_test` 目录下的 KCL 集成测试可以根据情况添加。 - -可以通过 `kcl-plugin test` 执行插件的单元测试: - -```shell -$ kcl-plugin test hello -============================= test session starts ============================== -platform darwin -- Python 3.7.6+, pytest-5.3.5, py-1.9.0, pluggy-0.13.1 -rootdir: /Users/kcl_user -collected 5 items -.kusion/kclvm/plugins/hello/plugin_test.py ..... [100%] -============================== 5 passed in 0.03s =============================== -$ -``` - -集成测试可以通过在 `_test` 目录下执行 `python3 -m pytest` 命令进行测试。 diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/plugin/project_context.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/plugin/project_context.md deleted file mode 100644 index 6171f883..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/plugin/project_context.md +++ /dev/null @@ -1,59 +0,0 @@ -# project_context - -project_context extract base info from project.yaml&stack.yaml - -*version: 0.0.1* - -## `get_project_current_path` - -return the relative path of first file - -Example: - -```py -import kcl_plugin.project_context as ctx - -path = ctx.get_project_current_path() -print(path) -``` - -## `get_project_input_file` - -return compiling file list - -Example: - -```py -import kcl_plugin.project_context as ctx - -input_file = ctx.get_project_input_file() -print(input_file) -``` - -## `get_project_context` - -return the current project context from project.yaml - -Example: - -```py -import kcl_plugin.project_context as ctx - -project = ctx.get_project_context() -# Get project name -print(project?.name) -``` - -## `get_stack_context` - -return the current stack context from stack.yaml - -Example: - -```py -import kcl_plugin.project_context as ctx - -stack = ctx.get_stack_context() -# Get stack name -print(stack?.name) -``` diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/use_case/_category_.json b/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/use_case/_category_.json deleted file mode 100644 index c3c2aee8..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/use_case/_category_.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "label": "Use case", - "position": 5 -} diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/use_case/index.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/use_case/index.md deleted file mode 100644 index 1b2042db..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/use_case/index.md +++ /dev/null @@ -1 +0,0 @@ -# Use Case diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/use_case/validation.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/use_case/validation.md deleted file mode 100644 index 153e6b4d..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/use_case/validation.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -sidebar_position: 1 ---- -# KCL Validation - -除了使用 KCL 代码生成 JSON/YAML 等配置格式,KCL 还支持对 JSON/YAML 数据进行格式校验。作为一种配置语言,KCL 在验证方面几乎涵盖了 OpenAPI 的所有功能。在 KCL 中可以通过一个结构定义来约束配置数据,同时支持通过 check 块自定义约束规则,在 schema 中书写校验表达式对 schema 定义的属性进行校验和约束。通过 check 表达式可以非常清晰简单地校验输入的 JSON/YAML 是否满足相应的 schema 结构定义与 check 约束。 - -## 简介 - -在 schema 定义当中可以使用 check 关键字编写 schema 属性的校验规则, 如下所示,check 代码块中的每一行都对应一个条件表达式,当满足条件时校验成功,当不满足条件时校验失败, 条件表达式后可跟 `, "check error message"` 表示当校验失败时需要显示的信息。 - -```python -import regex - -schema Sample: - foo: str # Required, 不能为None/Undefined, 且类型必须为str - bar: int # Required, 不能为None/Undefined, 且类型必须为int - fooList: [int] # Required, 不能为None/Undefined, 且类型必须为int列表 - color: "Red" | "Yellow" | "Blue" # Required, 字面值联合类型,且必须为"Red", "Yellow", "Blue"中的一个,枚举作用 - id?: int # Optional,可以留空,类型必须为int - - check: - 0 <= bar < 100 # bar必须大于等于0,小于100 - 0 < len(fooList) < 100 # fooList不能为None/Undefined,并且长度必须大于0,小于100 - regex.match(foo, "^The.*Foo$") # regex 正则表达式匹配 - bar in range(100) # range, bar范围只能为1到99 - bar in [2, 4, 6, 8] # enum, bar只能取2, 4, 6, 8 - bar % 2 == 0 # bar必须为2的倍数 - all foo in fooList { - foo > 1 - } # fooList中的所有元素必须大于1 - any foo in fooList { - foo > 10 - } # fooList中至少有一个元素必须大于10 - abs(id) > 10 if id # check if 表达式,当 id 不为空时,id的绝对值必须大于10 -``` - -综上所述,KCL Schema 中支持的校验类型为: - -| 校验类型 | 使用方法 | -| -------- | ------------------------------------------------------- | -| 范围校验 | 使用 `<`, `>` 等比较运算符 | -| 正则校验 | 使用 `regex` 系统库中的 `match` 等方法 | -| 长度校验 | 使用 `len` 内置函数,可以求 `list/dict/str` 类型的变量长度 | -| 枚举校验 | 使用字面值联合类型 | -| 非空校验 | 使用 schema 的可选/必选属性 | -| 条件校验 | 使用 check if 条件表达式 | - -基于此,KCL 提供了相应的[校验工具](/reference/cli/kcl/vet.md)和 [ValidateCode API](../xlang-api/rest-api.md#3-kclvmservice-%E6%9C%8D%E5%8A%A1) 直接对 JSON/YAML 数据进行校验。此外,通过 KCL schema 的 check 表达式可以非常清晰简单地校验输入的 JSON 是否满足相应的 schema 结构定义与 check 约束。 - -## 未来计划 - -KCL 校验能力的提升将逐渐围绕“静态化”方面展开工作,即在编译时结合形式化验证的能力直接分析出数据是否满足约束条件,约束条件是否冲突等问题,并且可以通过 IDE 实时透出约束错误,而无需在运行时发现错误。 diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/xlang-api/_c-api.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/xlang-api/_c-api.md deleted file mode 100644 index 454c1ce4..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/xlang-api/_c-api.md +++ /dev/null @@ -1,4 +0,0 @@ -# C API - -- 运行时 C API -- 编译 C API diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/xlang-api/_category_.json b/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/xlang-api/_category_.json deleted file mode 100644 index c2aafb21..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/xlang-api/_category_.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "label": "Multi-Language", - "position": 4 -} diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/xlang-api/go-api.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/xlang-api/go-api.md deleted file mode 100644 index bd0a61e1..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/xlang-api/go-api.md +++ /dev/null @@ -1,568 +0,0 @@ - - -# kclvm - -```go -import "github.com/KusionStack/kclvm-go" -``` - -### KCLVM binding for Go - -``` -┌─────────────────┐ ┌─────────────────┐ ┌─────────────────┐ -│ kcl files │ │ KCLVM-Go-API │ │ KCLResultList │ -│ ┌───────────┐ │ │ │ │ │ -│ │ 1.k │ │ │ │ │ │ -│ └───────────┘ │ │ │ │ ┌───────────┐ │ ┌───────────────┐ -│ ┌───────────┐ │ │ ┌───────────┐ │ │ │ KCLResult │──┼────────▶│x.Get("a.b.c") │ -│ │ 2.k │ │ │ │ Run(path) │ │ │ └───────────┘ │ └───────────────┘ -│ └───────────┘ │────┐ │ └───────────┘ │ │ │ -│ ┌───────────┐ │ │ │ │ │ ┌───────────┐ │ ┌───────────────┐ -│ │ 3.k │ │ │ │ │ │ │ KCLResult │──┼────────▶│x.Get("k", &v) │ -│ └───────────┘ │ │ │ │ │ └───────────┘ │ └───────────────┘ -│ ┌───────────┐ │ ├───▶│ ┌───────────┐ │──────────▶│ │ -│ │setting.yml│ │ │ │ │RunFiles() │ │ │ ┌───────────┐ │ ┌───────────────┐ -│ └───────────┘ │ │ │ └───────────┘ │ │ │ KCLResult │──┼────────▶│x.JSONString() │ -└─────────────────┘ │ │ │ │ └───────────┘ │ └───────────────┘ - │ │ │ │ │ -┌─────────────────┐ │ │ │ │ ┌───────────┐ │ ┌───────────────┐ -│ Options │ │ │ ┌───────────┐ │ │ │ KCLResult │──┼────────▶│x.YAMLString() │ -│WithOptions │ │ │ │MustRun() │ │ │ └───────────┘ │ └───────────────┘ -│WithOverrides │────┘ │ └───────────┘ │ │ │ -│WithWorkDir │ │ │ │ │ -│WithDisableNone │ │ │ │ │ -└─────────────────┘ └─────────────────┘ └─────────────────┘ -``` - -
Example -

- -```go -{ - const k_code = ` -import kcl_plugin.hello - -name = "kcl" -age = 1 - -two = hello.add(1, 1) - -schema Person: - name: str = "kcl" - age: int = 1 - -x0 = Person {} -x1 = Person { - age = 101 -} -` - - yaml := kclvm.MustRun("testdata/main.k", kclvm.WithCode(k_code)).First().YAMLString() - fmt.Println(yaml) - - fmt.Println("----") - - result := kclvm.MustRun("./testdata/main.k").First() - fmt.Println(result.JSONString()) - - fmt.Println("----") - fmt.Println("x0.name:", result.Get("x0.name")) - fmt.Println("x1.age:", result.Get("x1.age")) - - fmt.Println("----") - - var person struct { - Name string - Age int - } - fmt.Printf("person: %+v\n", result.Get("x1", &person)) -} -``` - -

-
- -## Index - -- [func FormatCode(code interface{}) ([]byte, error)](<#func-formatcode>) -- [func FormatPath(path string) (changedPaths []string, err error)](<#func-formatpath>) -- [func InitKclvmRuntime(n int)](<#func-initkclvmruntime>) -- [func LintPath(path string) (results []string, err error)](<#func-lintpath>) -- [func OverrideFile(file string, specs []string) (bool, error)](<#func-overridefile>) -- [func RunPlayground(address string) error](<#func-runplayground>) -- [func ValidateCode(data, code string, opt *ValidateOptions) (ok bool, err error)](<#func-validatecode>) -- [type KCLResult](<#type-kclresult>) - - [func EvalCode(code string) (*KCLResult, error)](<#func-evalcode>) -- [type KCLResultList](<#type-kclresultlist>) - - [func MustRun(path string, opts ...Option) *KCLResultList](<#func-mustrun>) - - [func Run(path string, opts ...Option) (*KCLResultList, error)](<#func-run>) - - [func RunFiles(paths []string, opts ...Option) (*KCLResultList, error)](<#func-runfiles>) -- [type KclType](<#type-kcltype>) - - [func GetSchemaType(file, code, schemaName string) ([]*KclType, error)](<#func-getschematype>) -- [type Option](<#type-option>) - - [func WithCode(codes ...string) Option](<#func-withcode>) - - [func WithDisableNone(disableNone bool) Option](<#func-withdisablenone>) - - [func WithKFilenames(filenames ...string) Option](<#func-withkfilenames>) - - [func WithOptions(key_value_list ...string) Option](<#func-withoptions>) - - [func WithOverrides(override_list ...string) Option](<#func-withoverrides>) - - [func WithPrintOverridesAST(printOverridesAST bool) Option](<#func-withprintoverridesast>) - - [func WithSettings(filename string) Option](<#func-withsettings>) - - [func WithWorkDir(workDir string) Option](<#func-withworkdir>) -- [type ValidateOptions](<#type-validateoptions>) - - -## func [FormatCode]() - -```go -func FormatCode(code interface{}) ([]byte, error) -``` - -FormatCode returns the formatted code\. - -
Example -

- -```go -{ - out, err := kclvm.FormatCode(`a = 1+2`) - if err != nil { - log.Fatal(err) - } - fmt.Println(string(out)) - -} -``` - -#### Output - -``` -a = 1 + 2 -``` - -

-
- -## func [FormatPath]() - -```go -func FormatPath(path string) (changedPaths []string, err error) -``` - -FormatPath formats files from the given path path: if path is \`\.\` or empty string\, all KCL files in current directory will be formatted\, not recursively if path is \`path/file\.k\`\, the specified KCL file will be formatted if path is \`path/to/dir\`\, all KCL files in the specified dir will be formatted\, not recursively if path is \`path/to/dir/\.\.\.\`\, all KCL files in the specified dir will be formatted recursively - -the returned changedPaths are the changed file paths \(relative path\) - -
Example -

- -```go -{ - changedPaths, err := kclvm.FormatPath("testdata/fmt") - if err != nil { - log.Fatal(err) - } - fmt.Println(changedPaths) -} -``` - -

-
- -## func [InitKclvmRuntime]() - -```go -func InitKclvmRuntime(n int) -``` - -InitKclvmRuntime init kclvm process\. - -## func [LintPath]() - -```go -func LintPath(path string) (results []string, err error) -``` - -LintPath lint files from the given path - -
Example -

- -```go -{ - - results, err := kclvm.LintPath("testdata/lint/import.k") - if err != nil { - log.Fatal(err) - } - for _, s := range results { - fmt.Println(s) - } - -} -``` - -#### Output - -``` -Unable to import abc. -a is reimported multiple times. -a imported but unused. -``` - -

-
- -## func [OverrideFile]() - -```go -func OverrideFile(file string, specs []string) (bool, error) -``` - -OverrideFile rewrites a file with override spec file: string\. The File that need to be overridden specs: \[\]string\. List of specs that need to be overridden\. Each spec string satisfies the form: \:\=\ or \:\\- When the pkgpath is '\_\_main\_\_'\, it can be omitted\. - -## func [RunPlayground]() - -```go -func RunPlayground(address string) error -``` - -RunPlayground start KCL playground on given address\. - -
Example -

- -```go -{ - addr := "localhost:2022" - fmt.Printf("listen at http://%s\n", addr) - - kclvm.RunPlayground(addr) -} -``` - -

-
- -## func [ValidateCode]() - -```go -func ValidateCode(data, code string, opt *ValidateOptions) (ok bool, err error) -``` - -ValidateCode validate data match code - -## type [KCLResult]() - -```go -type KCLResult = kcl.KCLResult -``` - -
Example -

- -```go -{ - const k_code = ` -import kcl_plugin.hello - -name = "kcl" -age = 1 - -two = hello.add(1, 1) - -schema Person: - name: str = "kcl" - age: int = 1 - -x0 = Person {name = "kcl-go"} -x1 = Person {age = 101} -` - - result := kclvm.MustRun("testdata/main.k", kclvm.WithCode(k_code)).First() - - fmt.Println("x0.name:", result.Get("x0.name")) - fmt.Println("x1.age:", result.Get("x1.age")) - -} -``` - -#### Output - -``` -x0.name: kcl-go -x1.age: 101 -``` - -

-
- -
Example ('et_struct) -

- -```go -{ - const k_code = ` -schema Person: - name: str = "kcl" - age: int = 1 - X: int = 2 - -x = { - "a": Person {age = 101} - "b": 123 -} -` - - result := kclvm.MustRun("testdata/main.k", kclvm.WithCode(k_code)).First() - - var person struct { - Name string - Age int - } - fmt.Printf("person: %+v\n", result.Get("x.a", &person)) - fmt.Printf("person: %+v\n", person) - -} -``` - -#### Output - -``` -person: &{Name:kcl Age:101} -person: {Name:kcl Age:101} -``` - -

-
- -### func [EvalCode]() - -```go -func EvalCode(code string) (*KCLResult, error) -``` - -## type [KCLResultList]() - -```go -type KCLResultList = kcl.KCLResultList -``` - -### func [MustRun]() - -```go -func MustRun(path string, opts ...Option) *KCLResultList -``` - -MustRun is like Run but panics if return any error\. - -
Example -

- -```go -{ - yaml := kclvm.MustRun("testdata/main.k", kclvm.WithCode(`name = "kcl"`)).First().YAMLString() - fmt.Println(yaml) - -} -``` - -#### Output - -``` -name: kcl -``` - -

-
- -
Example (Settings) -

- -```go -{ - yaml := kclvm.MustRun("./testdata/app0/kcl.yaml").First().YAMLString() - fmt.Println(yaml) -} -``` - -

-
- -### func [Run]() - -```go -func Run(path string, opts ...Option) (*KCLResultList, error) -``` - -Run evaluates the KCL program with path and opts\, then returns the object list\. - -
Example (Get Field) -

- -```go -{ - - x, err := kclvm.Run("./testdata/app0/kcl.yaml") - assert(err == nil, err) - - fmt.Println(x.First().Get("deploy_topology.1.zone")) - -} -``` - -#### Output - -``` -RZ24A -``` - -

-
- -### func [RunFiles]() - -```go -func RunFiles(paths []string, opts ...Option) (*KCLResultList, error) -``` - -RunFiles evaluates the KCL program with multi file path and opts\, then returns the object list\. - -
Example -

- -```go -{ - result, _ := kclvm.RunFiles([]string{"./testdata/app0/kcl.yaml"}) - fmt.Println(result.First().YAMLString()) -} -``` - -

-
- -## type [KclType]() - -```go -type KclType = kcl.KclType -``` - -### func [GetSchemaType]() - -```go -func GetSchemaType(file, code, schemaName string) ([]*KclType, error) -``` - -GetSchemaType returns schema types from a kcl file or code\. - -file: string The kcl filename code: string The kcl code string schema\_name: string The schema name got\, when the schema name is empty\, all schemas are returned\. - -## type [Option]() - -```go -type Option = kcl.Option -``` - -### func [WithCode]() - -```go -func WithCode(codes ...string) Option -``` - -WithCode returns a Option which hold a kcl source code list\. - -### func [WithDisableNone]() - -```go -func WithDisableNone(disableNone bool) Option -``` - -WithDisableNone returns a Option which hold a disable none switch\. - -### func [WithKFilenames]() - -```go -func WithKFilenames(filenames ...string) Option -``` - -WithKFilenames returns a Option which hold a filenames list\. - -### func [WithOptions]() - -```go -func WithOptions(key_value_list ...string) Option -``` - -WithOptions returns a Option which hold a key=value pair list for option function\. - -
Example -

- -```go -{ - const code = ` -name = option("name") -age = option("age") -` - x, err := kclvm.Run("hello.k", kclvm.WithCode(code), - kclvm.WithOptions("name=kcl", "age=1"), - ) - if err != nil { - log.Fatal(err) - } - - fmt.Println(x.First().YAMLString()) - -} -``` - -#### Output - -``` -age: 1 -name: kcl -``` - -

-
- -### func [WithOverrides]() - -```go -func WithOverrides(override_list ...string) Option -``` - -WithOverrides returns a Option which hold a override list\. - -### func [WithPrintOverridesAST]() - -```go -func WithPrintOverridesAST(printOverridesAST bool) Option -``` - -WithPrintOverridesAST returns a Option which hold a printOverridesAST switch\. - -### func [WithSettings]() - -```go -func WithSettings(filename string) Option -``` - -WithSettings returns a Option which hold a settings file\. - -### func [WithWorkDir]() - -```go -func WithWorkDir(workDir string) Option -``` - -WithWorkDir returns a Option which hold a work dir\. - -## type [ValidateOptions]() - -```go -type ValidateOptions = validate.ValidateOptions -``` - - - -Generated by [gomarkdoc]() diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/xlang-api/index.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/xlang-api/index.md deleted file mode 100644 index f6f8e076..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/xlang-api/index.md +++ /dev/null @@ -1 +0,0 @@ -# Multi-Language diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/xlang-api/overview.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/xlang-api/overview.md deleted file mode 100644 index 1081c34f..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/xlang-api/overview.md +++ /dev/null @@ -1,235 +0,0 @@ ---- -sidebar_position: 1 ---- - -# Introduction - -KCL 语言提供 C/Rust/Go/Python/Java 等通用编程语言接口,相关语言正在开发完整中。 - -## 1. C/Rust 语言 - -KCL 核心采用 Rust 语言开发,对外导出 C 语言 API 供 Go/Python/Java 等高级语言包装和集成。 - -## 2. Go 语言 - -Go 语言是通过 CGO 包装 KCL 提供的 C-API,同时提供更深度的定制特性以满足 KusionCtl 等上层工具的需求。 - -### 2.1. API 抽象模型 - -Go 语言 API 的抽象模型如下图: - -``` -┌─────────────────┐ ┌─────────────────┐ ┌─────────────────┐ -│ kcl files │ │ KCLVM-Go-API │ │ KCLResultList │ -│ ┌───────────┐ │ │ │ │ │ -│ │ 1.k │ │ │ │ │ │ -│ └───────────┘ │ │ │ │ ┌───────────┐ │ ┌───────────────┐ -│ ┌───────────┐ │ │ ┌───────────┐ │ │ │ KCLResult │──┼────────▶│x.Get("a.b.c") │ -│ │ 2.k │ │ │ │ Run(path) │ │ │ └───────────┘ │ └───────────────┘ -│ └───────────┘ │────┐ │ └───────────┘ │ │ │ -│ ┌───────────┐ │ │ │ │ │ ┌───────────┐ │ ┌───────────────┐ -│ │ 3.k │ │ │ │ │ │ │ KCLResult │──┼────────▶│x.Get("k", &v) │ -│ └───────────┘ │ │ │ │ │ └───────────┘ │ └───────────────┘ -│ ┌───────────┐ │ ├───▶│ ┌───────────┐ │──────────▶│ │ -│ │setting.yml│ │ │ │ │RunFiles() │ │ │ ┌───────────┐ │ ┌───────────────┐ -│ └───────────┘ │ │ │ └───────────┘ │ │ │ KCLResult │──┼────────▶│x.JSONString() │ -└─────────────────┘ │ │ │ │ └───────────┘ │ └───────────────┘ - │ │ │ │ │ -┌─────────────────┐ │ │ │ │ ┌───────────┐ │ ┌───────────────┐ -│ Options │ │ │ ┌───────────┐ │ │ │ KCLResult │──┼────────▶│x.YAMLString() │ -│WithOptions │ │ │ │MustRun() │ │ │ └───────────┘ │ └───────────────┘ -│WithOverrides │────┘ │ └───────────┘ │ │ │ -│WithWorkDir │ │ │ │ │ -│WithDisableNone │ │ │ │ │ -└─────────────────┘ └─────────────────┘ └─────────────────┘ -``` - -其中输入的文件包含 KCL 文件和 `setting.yml` 配置文件,`Options` 可以用于指定额外的参数和工作目录等信息。“KCLVM-Go-API”部分是提供的 KCLVM 执行函数,执行函数根据输入文件和额外的参数执行 KCL 程序,最终输出 `KCLResultList` 结果。`KCLResultList` 是一个 `KCLResult` 构成的列表,每个 `KCLResult` 对应一个生成的配置文件或 `map[string]interface{}`。 - -### 2.2. 例子 - -```go -package main - -import ( - "fmt" - - "kusionstack.io/kclvm-go/api/kcl" -) - - -func main() { - const k_code = ` -import kcl_plugin.hello - -name = "kcl" -age = 1 - -two = hello.add(1, 1) - -schema Person: - name: str = "kcl" - age: int = 1 - -x0 = Person{} -x1 = Person{age:101} -` - - result := kcl.MustRun("hello.k", kcl.WithCode(k_code)).First() - fmt.Println(result.YAMLString()) - - fmt.Println("----") - fmt.Println("x0.name:", result.Get("x0.name")) - fmt.Println("x1.age:", result.Get("x1.age")) - - fmt.Println("----") - - var person struct { - Name string - Age int - } - fmt.Printf("person: %+v\n", result.Get("x1", &person)) -} -``` - -输出结果: - -```yaml -age: 1 -name: kcl -two: 2 -x0: - age: 1 - name: kcl -x1: - age: 101 - name: kcl - ----- -x0.name: kcl -x1.age: 101 ----- -person: &{Name:kcl Age:101} -``` - -## 3. REST-API - -KCL 提供的 C-API 并没有 REST-API,REST-API 是通过 Protobuf 定义,最终由上层的 Go-SDK 提供实现。 - -### 3.1. 启动 REST 服务 - -通过以下方式可以启动 RestAPI 服务: - -- 底层多个 KCLVM 进程: `kcl-go rest-server -http=:2021` -- 底层一个 KCLVM 进程: `kclvm -m kclvm.program.rpc-server -http=:2021` - -然后可以通过 POST 协议请求服务: - -```shell -$ curl -X POST http://127.0.0.1:2021/api:protorpc/BuiltinService.Ping --data '{}' -{ - "error": "", - "result": {} -} -``` - -其中 POST 请求和返回的 JSON 数据和 Protobuf 定义的结构保持一致。 - -### 3.2. `BuiltinService` 服务 - -其中 `/api:protorpc/BuiltinService.Ping` 路径表示 `BuiltinService` 服务的 `Ping` 方法。 - -完整的 `BuiltinService` 由 Protobuf 定义: - -```protobuf -service BuiltinService { - rpc Ping(Ping_Args) returns(Ping_Result); - rpc ListMethod(ListMethod_Args) returns(ListMethod_Result); -} - -message Ping_Args { - string value = 1; -} -message Ping_Result { - string value = 1; -} - -message ListMethod_Args { - // empty -} -message ListMethod_Result { - repeated string method_name_list = 1; -} -``` - -其中 `Ping` 方法可以验证服务是否正常,`ListMethod` 方法可以查询提供的全部服务和函数列表。 - -### 3.3. `KclvmService` 服务 - -`KclvmService` 服务是和 KCLVM 功能相关的服务。用法和 `BuiltinService` 服务一样。 - -比如有以下的 `Person` 结构定义: - -```python -schema Person: - key: str - - check: - "value" in key # 'key' is required and 'key' must contain "value" -``` - -然后希望通过 `Person` 来校验以下的 JSON 数据: - -```json -{"key": "value"} -``` - -可以通过 `KclvmService` 服务的 `ValidateCode` 方法完成。参考 `ValidateCode` 方法的 `ValidateCode_Args` 参数结构: - -```protobuf -message ValidateCode_Args { - string data = 1; - string code = 2; - string schema = 3; - string attribute_name = 4; - string format = 5; -} -``` - -根据 `ValidateCode_Args` 参数结构构造 POST 请求需要的 JSON 数据,其中包含 `Person` 定义和要校验的 JSON 数据: - -```json -{ - "code": "\nschema Person:\n key: str\n\n check:\n \"value\" in key # 'key' is required and 'key' must contain \"value\"\n", - "data": "{\"attr_name\": {\"key\": \"value\"}}" -} -``` - -将该 JSON 数据保存到 `vet-hello.json` 文件,然后通过以下命令进行校验: - -```shell -$ curl -X POST \ - http://127.0.0.1:2021/api:protorpc/KclvmService.ValidateCode \ - -H "accept: application/json" \ - --data @./vet-hello.json -{ - "error": "", - "result": { - "success": true - } -} -``` - -说明校验成功。 - -## 4. Python 语言 - -Python 通过 SWIG 包装 Rust 提供的 C-API。同时提供访问 RestAPI 的客户端。具体细节待完善。 - -## 5. Java 语言 - -Java 通过 Jni 包装 Rust 提供的 C-API。同时提供访问 RestAPI 的客户端。具体细节待完善。 - -## 6. 其它语言 - -用户也可以基于 C-API 和 RestAPI 包装其它语言的 SDK。 diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/xlang-api/rest-api.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/xlang-api/rest-api.md deleted file mode 100644 index 80b00fa5..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/reference/lang/xlang-api/rest-api.md +++ /dev/null @@ -1,506 +0,0 @@ ---- -sidebar_position: 2 ---- -# Rest API - -## 1. 启动 REST 服务 - -通过以下方式可以启动 RestAPI 服务: - -- 底层多个 KCLVM 进程: `kcl-go rest-server -http=:2021` -- 底层一个 KCLVM 进程: `kclvm -m kclvm.program.rpc-server -http=:2021` - -然后可以通过 POST 协议请求服务: - -```shell -$ curl -X POST http://127.0.0.1:2021/api:protorpc/BuiltinService.Ping --data '{}' -{ - "error": "", - "result": {} -} -``` - -其中 POST 请求和返回的 JSON 数据和 Protobuf 定义的结构保持一致。 - -## 2. `BuiltinService` 服务 - -其中 `/api:protorpc/BuiltinService.Ping` 路径表示 `BuiltinService` 服务的 `Ping` 方法。 - -完整的 `BuiltinService` 由 Protobuf 定义: - -```protobuf -service BuiltinService { - rpc Ping(Ping_Args) returns(Ping_Result); - rpc ListMethod(ListMethod_Args) returns(ListMethod_Result); -} - -message Ping_Args { - string value = 1; -} -message Ping_Result { - string value = 1; -} - -message ListMethod_Args { - // empty -} -message ListMethod_Result { - repeated string method_name_list = 1; -} -``` - -其中 `Ping` 方法可以验证服务是否正常,`ListMethod` 方法可以查询提供的全部服务和函数列表。 - -## 3. `KclvmService` 服务 - -`KclvmService` 服务是和 KCLVM 功能相关的服务。用法和 `BuiltinService` 服务一样。 - -比如有以下的 `Person` 结构定义: - -```python -schema Person: - key: str - - check: - "value" in key # 'key' is required and 'key' must contain "value" -``` - -然后希望通过 `Person` 来校验以下的 JSON 数据: - -```json -{"key": "value"} -``` - -可以通过 `KclvmService` 服务的 `ValidateCode` 方法完成。参考 `ValidateCode` 方法的 `ValidateCode_Args` 参数结构: - -```protobuf -message ValidateCode_Args { - string data = 1; - string code = 2; - string schema = 3; - string attribute_name = 4; - string format = 5; -} -``` - -根据 `ValidateCode_Args` 参数结构构造 POST 请求需要的 JSON 数据,其中包含 `Person` 定义和要校验的 JSON 数据: - -```json -{ - "code": "\nschema Person:\n key: str\n\n check:\n \"value\" in key # 'key' is required and 'key' must contain \"value\"\n", - "data": "{\"attr_name\": {\"key\": \"value\"}}" -} -``` - -将该 JSON 数据保存到 `vet-hello.json` 文件,然后通过以下命令进行校验: - -```shell -$ curl -X POST \ - http://127.0.0.1:2021/api:protorpc/KclvmService.ValidateCode \ - -H "accept: application/json" \ - --data @./vet-hello.json -{ - "error": "", - "result": { - "success": true - } -} -``` - -说明校验成功。 - -## 4. 完整的 Protobuf 服务定义 - -跨语言的 API 通过 Protobuf 定义([https://github.com/KusionStack/kclvm-go/blob/main/pkg/spec/gpyrpc/gpyrpc.proto](https://github.com/KusionStack/kclvm-go/blob/main/pkg/spec/gpyrpc/gpyrpc.proto)): - -```protobuf -// Copyright 2021 The KCL Authors. All rights reserved. -// -// This file defines the request parameters and return structure of the KCLVM RPC server. -// We can use the following command to start a KCLVM RPC server. -// -// ``` -// kclvm -m kclvm.program.rpc-server -http=:2021 -// ``` -// -// The service can then be requested via the POST protocol: -// -// ``` -// $ curl -X POST http://127.0.0.1:2021/api:protorpc/BuiltinService.Ping --data '{}' -// { -// "error": "", -// "result": {} -// } -// ``` - -syntax = "proto3"; - -package gpyrpc; - -option go_package = "kusionstack.io/kclvm-go/pkg/spec/gpyrpc;gpyrpc"; - -import "google/protobuf/any.proto"; -import "google/protobuf/descriptor.proto"; - -// ---------------------------------------------------------------------------- - -// kcl main.k -D name=value -message CmdArgSpec { - string name = 1; - string value = 2; -} - -// kcl main.k -O pkgpath:path.to.field=field_value -message CmdOverrideSpec { - string pkgpath = 1; - string field_path = 2; - string field_value = 3; - string action = 4; -} - -// ---------------------------------------------------------------------------- -// gpyrpc request/response/error types -// ---------------------------------------------------------------------------- - -message RestResponse { - google.protobuf.Any result = 1; - string error = 2; - KclError kcl_err = 3; -} - -message KclError { - string ewcode = 1; // See kclvm/kcl/error/kcl_err_msg.py - string name = 2; - string msg = 3; - repeated KclErrorInfo error_infos = 4; -} - -message KclErrorInfo { - string err_level = 1; - string arg_msg = 2; - string filename = 3; - string src_code = 4; - string line_no = 5; - string col_no = 6; -} - -// ---------------------------------------------------------------------------- -// service requset/response -// ---------------------------------------------------------------------------- - -// gpyrpc.BuiltinService -service BuiltinService { - rpc Ping(Ping_Args) returns(Ping_Result); - rpc ListMethod(ListMethod_Args) returns(ListMethod_Result); -} - -// gpyrpc.KclvmService -service KclvmService { - rpc Ping(Ping_Args) returns(Ping_Result); - - rpc ParseFile_LarkTree(ParseFile_LarkTree_Args) returns(ParseFile_LarkTree_Result); - rpc ParseFile_AST(ParseFile_AST_Args) returns(ParseFile_AST_Result); - rpc ParseProgram_AST(ParseProgram_AST_Args) returns(ParseProgram_AST_Result); - - rpc ExecProgram(ExecProgram_Args) returns(ExecProgram_Result); - - rpc ResetPlugin(ResetPlugin_Args) returns(ResetPlugin_Result); - - rpc FormatCode(FormatCode_Args) returns(FormatCode_Result); - rpc FormatPath(FormatPath_Args) returns(FormatPath_Result); - rpc LintPath(LintPath_Args) returns(LintPath_Result); - rpc OverrideFile(OverrideFile_Args) returns (OverrideFile_Result); - - rpc EvalCode(EvalCode_Args) returns(EvalCode_Result); - rpc ResolveCode(ResolveCode_Args) returns(ResolveCode_Result); - rpc GetSchemaType(GetSchemaType_Args) returns(GetSchemaType_Result); - rpc ValidateCode(ValidateCode_Args) returns(ValidateCode_Result); - rpc SpliceCode(SpliceCode_Args) returns(SpliceCode_Result); - - rpc Complete(Complete_Args) returns(Complete_Result); - rpc GoToDef(GoToDef_Args) returns(GoToDef_Result); - rpc DocumentSymbol(DocumentSymbol_Args) returns(DocumentSymbol_Result); - rpc Hover(Hover_Args) returns(Hover_Result); - - rpc ListDepFiles(ListDepFiles_Args) returns(ListDepFiles_Result); - rpc LoadSettingsFiles(LoadSettingsFiles_Args) returns(LoadSettingsFiles_Result); -} - -message Ping_Args { - string value = 1; -} -message Ping_Result { - string value = 1; -} - -message ListMethod_Args { - // empty -} -message ListMethod_Result { - repeated string method_name_list = 1; -} - -message ParseFile_LarkTree_Args { - string filename = 1; - string source_code = 2; - bool ignore_file_line = 3; -} -message ParseFile_LarkTree_Result { - string lark_tree_json = 1; -} - -message ParseFile_AST_Args { - string filename = 1; - string source_code = 2; -} -message ParseFile_AST_Result { - string ast_json = 1; // json value -} - -message ParseProgram_AST_Args { - repeated string k_filename_list = 1; -} -message ParseProgram_AST_Result { - string ast_json = 1; // json value -} - -message ExecProgram_Args { - string work_dir = 1; - - repeated string k_filename_list = 2; - repeated string k_code_list = 3; - - repeated CmdArgSpec args = 4; - repeated CmdOverrideSpec overrides = 5; - - bool disable_yaml_result = 6; - - bool print_override_ast = 7; - - // -r --strict-range-check - bool strict_range_check = 8; - - // -n --disable-none - bool disable_none = 9; - // -v --verbose - int32 verbose = 10; - - // -d --debug - int32 debug = 11; -} -message ExecProgram_Result { - string json_result = 1; - string yaml_result = 2; - - string escaped_time = 101; -} - -message ResetPlugin_Args { - string plugin_root = 1; -} -message ResetPlugin_Result { - // empty -} - -message FormatCode_Args { - string source = 1; -} - -message FormatCode_Result { - bytes formatted = 1; -} - -message FormatPath_Args { - string path = 1; -} - -message FormatPath_Result { - repeated string changedPaths = 1; -} - -message LintPath_Args { - string path = 1; -} - -message LintPath_Result { - repeated string results = 1; -} - -message OverrideFile_Args { - string file = 1; - repeated string specs = 2; -} - -message OverrideFile_Result { - bool result = 1; -} - -message EvalCode_Args { - string code = 1; -} -message EvalCode_Result { - string json_result = 2; -} - -message ResolveCode_Args { - string code = 1; -} - -message ResolveCode_Result { - bool success = 1; -} - -message GetSchemaType_Args { - string file = 1; - string code = 2; - string schema_name = 3; // emtry is all -} -message GetSchemaType_Result { - repeated KclType schema_type_list = 1; -} - -message ValidateCode_Args { - string data = 1; - string code = 2; - string schema = 3; - string attribute_name = 4; - string format = 5; -} - -message ValidateCode_Result { - bool success = 1; - string err_message = 2; -} - -message CodeSnippet { - string schema = 1; - string rule = 2; -} - -message SpliceCode_Args { - repeated CodeSnippet codeSnippets = 1; -} - -message SpliceCode_Result { - string spliceCode = 1; -} - -message Position { - int64 line = 1; - int64 column = 2; - string filename = 3; -} - -message Complete_Args { - Position pos = 1; - string name = 2; - string code = 3; -} - -message Complete_Result { - string completeItems = 1; -} - -message GoToDef_Args { - Position pos = 1; - string code = 2; -} - -message GoToDef_Result { - string locations = 1; -} - -message DocumentSymbol_Args { - string file = 1; - string code = 2; -} - -message DocumentSymbol_Result { - string symbol = 1; -} - -message Hover_Args { - Position pos = 1; - string code = 2; -} - -message Hover_Result { - string hoverResult = 1; -} - -message ListDepFiles_Args { - string work_dir = 1; - bool use_abs_path = 2; - bool include_all = 3; - bool use_fast_parser = 4; -} - -message ListDepFiles_Result { - string pkgroot = 1; - string pkgpath = 2; - repeated string files = 3; -} - -// --------------------------------------------------------------------------------- -// LoadSettingsFiles API -// Input work dir and setting files and return the merged kcl singleton config. -// --------------------------------------------------------------------------------- - -message LoadSettingsFiles_Args { - string work_dir = 1; - repeated string files = 2; -} - -message LoadSettingsFiles_Result { - CliConfig kcl_cli_configs = 1; - repeated KeyValuePair kcl_options = 2; -} - -message CliConfig { - repeated string files = 1; - string output = 2; - repeated string overrides = 3; - repeated string path_selector = 4; - bool strict_range_check = 5; - bool disable_none = 6; - int64 verbose = 7; - bool debug = 8; -} - -message KeyValuePair { - string key = 1; - string value = 2; -} - -// ---------------------------------------------------------------------------- -// JSON Schema Lit -// ---------------------------------------------------------------------------- - -message KclType { - string type = 1; // schema, dict, list, str, int, float, bool, null, type_string - repeated KclType union_types = 2 ; // union types - string default = 3; // default value - - string schema_name = 4; // schema name - string schema_doc = 5; // schema doc - map properties = 6; // schema properties - repeated string required = 7; // required schema properties, [property_name1, property_name2] - - KclType key = 8; // dict key type - KclType item = 9; // dict/list item type - - int32 line = 10; - - repeated Decorator decorators = 11; // schema decorators -} - -message Decorator { - string name = 1; - repeated string arguments = 2; - map keywords = 3; -} - -// ---------------------------------------------------------------------------- -// END -// ---------------------------------------------------------------------------- -``` diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/concepts/_category_.json b/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/concepts/_category_.json deleted file mode 100644 index a072fd9e..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/concepts/_category_.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "label": "架构 & 概念", - "position": 3 -} diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/concepts/arch.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/concepts/arch.md deleted file mode 100644 index d591a007..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/concepts/arch.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -sidebar_position: 4 ---- - -# Kusion 整体架构 - -架构图本质上反应的是 Kusion 的工作流程和工作机制。熟悉了整体架构图之后,同样能够做到纲举目张为 Kusion 的日常使用和开发提供指导。 - -## 1. 架构简图 - -先从架构简图开始,其中涉及 App 和基础设施如何通过 Kusion 技术栈联通目标平台。如下图所示: - -![](/img/docs/user_docs/concepts/iac-arch-01.png) - - -App 和 Infra 的开发人员完整最基础的功能和配置开发,SRE 则基于基础的功能通过完成运维配置代码化工作,然后通过 Kusion 技术栈将运维配置代码转化为不同目标平台的具体操作。 - -## 2. 架构大图 - -在架构简图的之上,将 Kusion 面对的具体场景和特性填充到各个子模块中的得到了架构大图。Kusion 开源可编程协议栈处于云原生应用到 K8S 平台中间层,上面承接基础设施运维和 App 业务功能、下面对接 K8S 等云平台。Kusion 的架构大图如下: - -![](/img/docs/user_docs/concepts/iac-arch-02.png) - -Kusion 可编程协议栈内部又分为用户界面、核心能力、配置语言等部分。其中用户界面主要面试上层的云原生应用的用户,通过 Kusion 提供的 Konfig 大库自动集成 Pipeline、Operation 操作界面、VSCode 的插件和相关的命令行工具等,和内部的 Kusion 模型库、IaC 引擎和 Kusion 服务等进行能力对接。最下面的是 KCL 配置策略语言,以及和 KCL 语言相关的语言插件、其他高级语言的 SDK 和其他 OpenAPI 风格的配置数据对接等,语言和协议层为上面的 Konfig 大库提供编程能力、为 IaC 引擎提供可被编程的 SDK 能力等。 - -## 3. 和上游下游的关系 - -![](/img/docs/user_docs/concepts/kusion-connect-x-01.png) - -Kusion 属于云原生运维、基础设施管理、领域编程语言及编译器开源领域。Kusion 涉及的上下游相关项目有:Kubernetes 自身及生态技术 -、Kubenetes 运维自动化技术、GitOps CICD 系统、IaaS 管理技术及各个云厂商和 IaC 领域语言及数据格式等。 diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/concepts/index.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/concepts/index.md deleted file mode 100644 index ae5bf028..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/concepts/index.md +++ /dev/null @@ -1,3 +0,0 @@ -# 架构 & 概念 - -在直观体验过 Kusion 如何快速部署 Kubernetes 官方的 Guestbook 例子后,可以更深入理解 Kusion 可编程配置技术栈背后的架构和概念。其中用户界面展示了 Kusion 技术栈用户使用界面,Project & Stack 介绍了 Kusion 推荐的项目组织结构,State & Backends 则介绍了 Kusion 背后的状态管理和厚度存储设计。最后介绍了 Kusion 架构和 KCL 语言的特色和设计思路。 diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/concepts/kcl-lang.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/concepts/kcl-lang.md deleted file mode 100644 index 04f14259..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/concepts/kcl-lang.md +++ /dev/null @@ -1,35 +0,0 @@ ---- -sidebar_position: 4 ---- - -# KCL - -[KCL](https://github.com/KusionStack/KCLVM) 是一个开源的基于约束的记录及函数语言。KCL 通过成熟的编程语言技术和实践来改进对大量繁杂配置的编写,致力于构建围绕配置的更好的模块化、扩展性和稳定性,更简单的逻辑编写,以及更快的自动化集成和良好的生态延展性。 - -## 特性 - -![](/img/docs/user_docs/intro/kcl.png) - -+ **简单易用**:源于 Python、Golang 等高级语言,采纳函数式编程语言特性,低副作用 -+ **设计良好**:独立的 Spec 驱动的语法、语义、运行时和系统库设计 -+ **快速建模**:以 [Schema](https://kusionstack.io/docs/reference/lang/lang/tour#schema) 为中心的配置类型及模块化抽象 -+ **功能完备**:基于 [Config](https://kusionstack.io/docs/reference/lang/lang/codelab/simple)、[Schema](https://kusionstack.io/docs/reference/lang/lang/tour/#schema)、[Lambda](https://kusionstack.io/docs/reference/lang/lang/tour/#function)、[Rule](https://kusionstack.io/docs/reference/lang/lang/tour/#rule) 的配置及其模型、逻辑和策略编写 -+ **可靠稳定**:依赖[静态类型系统](https://kusionstack.io/docs/reference/lang/lang/tour/#type-system)、[约束](https://kusionstack.io/docs/reference/lang/lang/tour/#validation)和[自定义规则](https://kusionstack.io/docs/reference/lang/lang/tour#rule)的配置稳定性 -+ **强可扩展**:通过独立配置块[自动合并机制](https://kusionstack.io/docs/reference/lang/lang/tour/#-operators-1)保证配置编写的高可扩展性 -+ **易自动化**:[CRUD APIs](https://kusionstack.io/docs/reference/lang/lang/tour/#kcl-cli-variable-override),[多语言 SDK](https://kusionstack.io/docs/reference/lang/xlang-api/overview),[语言插件](https://github.com/KusionStack/kcl-plugin) 构成的梯度自动化方案 -+ **极致性能**:使用 Rust & C,[LLVM](https://llvm.org/) 实现,支持编译到本地代码和 [WASM](https://webassembly.org/) 的高性能编译时和运行时 -+ **API 亲和**:原生支持 [OpenAPI](https://github.com/KusionStack/kcl-openapi)、 Kubernetes CRD, Kubernetes YAML 等 API 生态规范 -+ **开发友好**:[语言工具](https://kusionstack.io/docs/reference/cli/kcl/) (Format,Lint,Test,Vet,Doc 等)、 [IDE 插件](https://github.com/KusionStack/vscode-kcl) 构建良好的研发体验 -+ **安全可控**:面向领域,不原生提供线程、IO 等系统级功能,低噪音,低安全风险,易维护,易治理 -+ **生产可用**:广泛应用在蚂蚁集团平台工程及自动化的生产环境实践中 - -## 场景 - -您可以将 KCL 用于 - -+ 生成静态配置数据如 JSON, YAML 等 -+ 使用 schema 对配置数据进行建模并减少配置数据中的样板文件 -+ 为配置数据定义带有规则约束的 schema 并对数据进行自动验证 -+ 无副作用地组织、简化、统一和管理庞大的配置 -+ 通过分块编写配置数据可扩展地管理庞大的配置 -+ 与 [Kusion Stack](https://kusionstack.io) 一起,用作平台工程语言来交付现代应用程序 diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/concepts/project-stack.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/concepts/project-stack.md deleted file mode 100644 index 51d9f606..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/concepts/project-stack.md +++ /dev/null @@ -1,60 +0,0 @@ ---- -sidebar_position: 2 ---- - -# Project & Stack - -为了应对规模化运维面临的配置、团队多维度爆炸的挑战,Kusion 采用业界通用的思路通过引入 Project & Stack 一层抽象来规范和简化化配置代码的管理。它们是 Kusion 抽象的项目组织结构,项目采用 Project 概念抽象,其中对应多个 Stack 为可以独立部署、单独配置的最小 KCL 程序单元。二者关系如下图: - -![](/img/docs/user_docs/concepts/konfig.png) - -其中的 Base 比较特殊,对应 Project 中通用的基线配置,用于不同的 Stack 共享(并非独立部署单元)。 - -## 1. Project - -任何一个直接包含 project.yaml 的文件夹被称为一个 Project,project.yaml 用于说明该 project 的元数据信息。一个 Project 由一个或多个互相关联的 Stack 组成。Project 往往具有明确的业务语意,用户可以把一个应用映射成一个 Project,也可以把一种运维场景映射为一个 Project,比如建站运维等。一个 Project 必然属于一个租户。 - -## 2. Stack - -Stack 是一组可以独立部署、单独配置的 KCL 程序,是可以部署为一组基础设施实体的最小配置单元。一个 Stack 的所有专用配置文件需要位于同一文件夹内,该文件夹需要位于其所属 Project 的路径下,并且直接包含 stack.yaml。stack.yaml 用于说明该 Stack 的元数据信息,并且可以作为识别一个 Stack 的标识。Stack 往往表示 CI/CD 流程中的不同阶段,如 dev、gray、prod 等,或者表示一个大项目中不同的小项目。Kusion 提供 Stack 级别的权限控制。 - -## 3. Project 和 Stack 的关系 - -一个 Project 包含一个或多个 Stack,而一个 Stack 必须属于一个 Project 并且只能属于一个 Project。用户可以根据自身需求解释 Project 和 Stack 的含义,灵活地组织两者之间的结构和关系。在跨团队协作、云原生的场景下,我们提供了如下 3 种 Project 和 Stack 关系组织的最佳实践: - -1. Enviroment 模式:Project 以一个整体存在,Stack 表示 Project 的不同环境,比如 dev、pre 和 prod 等,这些环境通常与 CI/CD 流程中的不同阶段相对应。Stack 具有 Project 需要的全部配置,只是对应部署的环境不同。 -2. Micro-Project 模式:与微服务类似,Project 由多个较小的 Project 组成,比如一个 Project 需要的基础设施可能包括 Kubernetes 对象、数据库实体、监控实体等,这些基础设施实体均可作为一个较小的 Project 进行单独配置。我们将这些的较小的 Project 称为 Micro-Project,并通过 Stack 进行表示。Stack 仅具有 Project 需要的部分配置。 -3. Hybrid 模式:Hybrid 模式是 Enviroment 模式与 Micro-Project 模式的组合,Stack 不仅可以表示 Project 部署的不同环境,也可以表示 Micro-Project。一般而言,具有 Micro-Project 特性的 Stack 可以和某个具有 Enviroment 特性的 Stack 共同提供 Project 在该环境下的全部配置。具有 Micro-Project 特性的 Stack 往往需要在 Project 的不同环境间进行复用。具有 Enviroment 特性的 Stack 可能也不包含 Project 的需要全部配置。 - -**Tip:** 在 Hybrid 模式中,具有 Micro-Project 特性的 Stack 和 base 文件夹均可提供不同环境中的通用能力,两者存在如下两点不同。1、具有 Micro-Project 特性的 Stack 可以进行编译、部署成一个基础设施实体,而 base 无法和一个基础设施实体进行对应;2、具有 Micro-Project 特性的 Stack 表示一个较小 Project 的配置,而 base 表示多个环境和 Micro-Project 的通用配置。总的来说,只需记住具有 Micro-Project 特性的 Stack 仍是一个 Stack,具备 Stack 的特性;而 base 文件夹只是通用 KCL 程序的集合,并不具备 Stack 的特性。 - -## 4. Project 和 Stack 的工程结构 - -用户可以灵活地组织 Project 和 Stack 的工程结构,只需遵循如下 2 个规则: - -1. Stack 目录必须位于 Project 目录下,但不要求 Stack 目录是 Project 目录的下一层; -2. Project 之间和 Stack 之间不可嵌套,即不允许一个 Project 目录下有另一个 Project 目录,一个 Stack 目录下有另一个 Stack 目录。 - -我们推荐用户把 Project 和 Stack 名称作为对应的文件夹名称,但这不是必须的。用户可以根据自身需求对 Project 和 Stack 进行分类,组织对应的目录结构。一种常见的目录结构遵循 [康威定律](https://zh.wikipedia.org/wiki/%E5%BA%B7%E5%A8%81%E5%AE%9A%E5%BE%8B):`Project_Type/Orgnization_Name/Project_Name/Stack_Name` 项目的类型、组织结构的类型和代码保存相似的映射关系。 - -## 5. 例子:Nginx - -以 Konfig 中自带的 [appops/nginx-example](https://github.com/KusionStack/konfig/tree/master/appops/nginx-example) 为例,下面是对应的目录和文件结构: - -```bash -appops/nginx-example -├── README.md # Project 介绍文件 -├── base # 各环境通用配置 -│ └── base.k # 通用 KCL 配置 -├── dev # 环境特有配置 -│ ├── ci-test # 测试目录 -│ │ ├── settings.yaml # 测试数据 -│ │ └── stdout.golden.yaml # 测试期望结果 -│ ├── kcl.yaml # 多文件编译配置,是 KCL 编译的入口 -│ ├── main.k # 当前环境 KCL 配置 -│ └── stack.yaml # Stack 配置文件 -└── project.yaml # Project 配置文件 -``` - -根目录中有 `project.yaml` 文件表示对应一个 Project,然后 `dev/stack.yaml` 表示对应 Stack,base 目录对应基线配置(不是一个 Stack)。`dev/ci-test` 是测试相关配置和数据,`kcl.yaml` 和 `main.k` 是应用的 KCL 配置程序代码。 - diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/concepts/state-backend.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/concepts/state-backend.md deleted file mode 100644 index 75f52892..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/concepts/state-backend.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -sidebar_position: 3 ---- - -# State & Backends - -Kusion 工具的核心之一是一个结构化配置数据的状态按需执行框架引擎,其前端通过 KCL 开发的配置产出 YAML 等价的配置数据,然后通过可热插拔的后端执行引擎将 YAML 配置数据按需应用到不同的云原生平台。 - -![](/img/docs/user_docs/concepts/kusion-arch-01.png) - -## 1. State - -State 用于存储资源下发后 Backend 驱动返回的状态,是对于 IaaS 状态的映射。用户执行 Apply 命令时,Kusion 会根据 Konfig 与 State 之间的差异,来按需执行资源创建、更新或删除操作。用户执行 Apply 命令后,Kusion 会根据 Backend 返回的信息更新 state 状态。 - -Kusion state 默认存储于"kusion_state.json"文件中,也可存储于远程后端,便于团队协作。 - -## 2. Backends - -Kusion state 表示存储 Backend 状态的存储配置,默认情况下使用 Local 类型表示的本地磁盘存储状态。对于团队协作项目,State 可存储在远程服务上供多人共享使用。 diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/concepts/user-interface.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/concepts/user-interface.md deleted file mode 100644 index da55b84c..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/concepts/user-interface.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -sidebar_position: 1 ---- - -# 用户界面 - -## 1. 单 App 应用界面 - -Kusion 用户界面并不是仅仅指传统的网页控制台界面,它是用户在使用 Kusion 过程中直接感知的能力和功能界面。最简单的 Kusion 用户界面就是 [KusionCtl](/docs/user_docs/support/faq-concepts#5-kusionctl) 命令行工具。 - -![](/img/docs/user_docs/concepts/kusion-user-interface-01.png) - -以上只是针对单 App 应用这种简单场景的关系图,用户只面对一个 Kusion 命令行工具。 - -## 2. 规模化 App 应用界面 - -Kusion 最初是针对规模化 App 应用运维而开发的可编程配置技术栈。为了开发效率,Kusion 同时为 kusionctl 命令行提供了配套和的提效工具:KCL 命令、DevOps 工具、IDE 插件等。此外为了和开发系统集成,Kusion 同时也可针对 Github Action、Gitlab Action 和 ArgoCD 等系统配置对应的能力。因此更详细的 Kusion 用户界面如下图所示: - -![](/img/docs/user_docs/concepts/kusion-user-interface-02.png) - -现在 Kusion 用户界面对应其中向左倒下的大写 L 形状的界面,包含本地工具和各种 CICD 集成的界面等。 - -## 3. 不同角色和场景组合 - -在规模化运维中,经常需要涉及 App 和基础设施两个不同的应用场景,同时涉及开发者和 SRE 两种不同的角色,两种角色和两种场景产生的多种组合。不同角色的用户针对不同的场景分别开发 App Ops Config 和 Infra Ops Config,并统一到 Konfig 大库中进行开发和版本管理。这些配置最终构成了规模化运维涉及到各种配置代码,它们通过 Kusion 用户界面开发、测试,并最终通过各种 CICD 系统自动化 Apply 到最终目标平台。 - -## 4. 本地工具和生态集成 - -KCL 命令行工具、DevOps 工具和 IDE 插件等本地工具用于提供原子的能力,同时方便本地开发工作。开发完成之后通过 Konfig 大库合并代码时触发各种 CICD 系统自动化 Apply 到最终目标平台。 - diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/getting-started/_category_.json b/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/getting-started/_category_.json deleted file mode 100644 index c826d665..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/getting-started/_category_.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "label": "快速开始", - "position": 1 -} diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/getting-started/_konfig.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/getting-started/_konfig.md deleted file mode 100644 index 5209a51d..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/getting-started/_konfig.md +++ /dev/null @@ -1,60 +0,0 @@ ---- -sidebar_position: 9 ---- - -# Kusion 模型库 - -**Kusion 模型库**也叫做 `Kusion Model`,是 KusionStack 中预置的、用 KCL 描述的配置模型,它提供给用户开箱即用、高度抽象的配置接口,模型库最初朴素的出发点就是改善 YAML 用户的效率和体验,我们希望通过将代码更繁杂的模型抽象封装到统一的模型中,从而简化用户侧配置代码的编写。 - -Konfig 仓库地址:https://github.com/KusionStack/konfig - -![](/img/docs/user_docs/getting-started/konfig-arch-01.png) - -## 1. 目录结构 - -先克隆 Kusion 模型库:`git clone git@github.com:KusionStack/Konfig.git`。 - -Konfig 配置大库整体结构如下: - -```bash -. -├── Makefile # 通过 Makefile 封装常用命令 -├── README.md # 配置大库说明 -├── appops # 应用运维目录,用来放置所有应用的 KCL 运维配置 -│ ├── guestbook-frontend -│ ├── http-echo -│ └── nginx-example -├── base # Kusion Model 模型库 -│ ├── examples # Kusion Model 样例代码 -│ │ ├── monitoring # 监控配置样例 -│ │ ├── native # Kubernetes 资源配置样例 -│ │ └── server # 云原生应用运维配置模型样例 -│ └── pkg -│ ├── kusion_kubernetes # Kubernetes 底层模型库 -│ ├── kusion_models # 核心模型库 -│ └── kusion_prometheus # Prometheus 底层模型库 -├── hack # 放置一些脚本 -└── kcl.mod # 大库配置文件,通常用来标识大库根目录位置以及大库所需依赖 -``` - -## 2. 测试 Konfig 代码 - -在安装完成 Kusion 工具之后,在 Konfig 根目录执行 `make check-all` 测试大库全部应用(参考 [Konfig](/docs/user_docs/concepts/konfig)),或者执行 `make check-http-echo` 测试 `appops/http-echo` 应用。 - -如果需要单独测试 `appops/http-echo` 应用的 dev 版本,可以进入 `appops/http-echo/dev` 目录执行 `kusion compile` 命令(或者通过更底层的 `kcl -Y kcl.yaml ./ci-test/settings.yaml` 命令),输出的文件在 `appops/http-echo/dev/ci-test/stdout.golden.yaml` 文件。 - -## 3. 添加应用 - -在 [快速开始/Usecase](/docs/user_docs/getting-started/usecase) 我们已经展示如何快速添加一个应用(参考 [Konfig](/docs/user_docs/concepts/konfig))。 - -## 4. Konfig 架构图 - -之所以用一个大的仓库管理全部的 IaC 配置代码,是由于不同代码包的研发主体不同,会引发出包管理和版本管理的问题,从而导致平台侧需要支持类似编译平台的能力。采用大库模式下,业务配置代码、基础配置代码在一个大库中,因此代码间的版本依赖管理比较简单,平台侧处理也比较简单,定位唯一代码库的目录及文件即可,代码互通,统一管理,便于查找、修改、维护(大库模式也是 Google 等头部互联网公司内部实践的模式)。 - -下面是 Konfig 的架构图: - -![](/img/docs/user_docs/getting-started/konfig-arch-01.png) - -核心模型内部通过前端模型和后端模型两层抽象简化前端用户的配置代码,底层模型则是通过 [KCL OpenAPI](/docs/reference/cli/openapi) 工具自动生成。 - -模型的更详细文档可参考 [参考手册/Kusion 模型库](/docs/reference/model)。 diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/getting-started/getting-started.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/getting-started/getting-started.md deleted file mode 100644 index 40a61e15..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/getting-started/getting-started.md +++ /dev/null @@ -1,3 +0,0 @@ -# 快速开始 - -快速开始以最快流程对 [Kusion](/docs/user_docs/intro/kusion-intro) 有一个简单直观的体验,其中包含 Kusion 的快速安装、KCL 配置语言的速览、Kusion 模型库和如何通过 Kusion 工具快速部署 [Kubernetes](https://kubernetes.io) 官方的 [Guestbook](https://kubernetes.io/zh/docs/tutorials/stateless-application/guestbook) 例子。 diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/getting-started/install/_category_.json b/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/getting-started/install/_category_.json deleted file mode 100644 index bc165fce..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/getting-started/install/_category_.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "label": "下载和安装", - "position": 1 -} diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/getting-started/install/docker.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/getting-started/install/docker.md deleted file mode 100644 index 938d3cf6..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/getting-started/install/docker.md +++ /dev/null @@ -1,66 +0,0 @@ ---- -sidebar_position: 2 ---- - -# Kusion 的 Docker 镜像 - -如果环境无法安装本地执行版本,可以选择 KusionStack 的 Docker 版本。首先在本地安装好 [Docker](https://www.docker.com/) 环境,并启动 Docker 服务。然后通过 `docker info` 命令验证本地的 Docker 服务已经正常启动。 - -Kusion 镜像的网址: https://hub.docker.com/r/kusionstack/kusion - -## 1. 拉取最新版本 - -通过以下命令拉取最新的版本: - -```shell -$ docker pull kusionstack/kusion -Using default tag: latest -latest: Pulling from kusion/kusion -... -kusionstack/kusion:latest -$ -``` - -然后通过以下命令查看 KCL 版本号: - -```shell -$ docker run --rm -it kusionstack/kusion kcl --version -kclvm version is 0.4.1; checksum: *** -$ -``` - -## 2. 拉取指定版本 - -查看[镜像版本号列表](https://hub.docker.com/r/kusionstack/kusion/tags),或者通过以下命令拉取 kusion 最新镜像(Kusion镜像中包含KCL命令行工具): - -```shell -$ docker pull kusionstack/kusion -... -``` - -然后通过以下命令查看 KCL 版本号: - -```shell -$ docker run --rm -it kusionstack/kusion:v0.4.1 kcl --version -kclvm version is 0.4.1 -$ -``` - -## 3. 执行 KCL 代码 - -如果要验证执行 KCL 程序,可以先在当前目录创建一个 `hello.k` 文件,内容如下: - -```python -hello = "world" -``` - -然后通过以下命令执行 `hello.k` 文件: - -```shell -$ docker run --rm -it -v `pwd`:/root/hello.k kusionstack/kusion kcl /root/hello.k -hello: world -$ -``` - -程序输出的YAML格式的 `hello: world` 数据,说明Docker环境的KCL已经可以正常工作了。 - diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/getting-started/install/index.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/getting-started/install/index.md deleted file mode 100644 index 48326e4d..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/getting-started/install/index.md +++ /dev/null @@ -1,4 +0,0 @@ -# 下载和安装 - -可以通过 Kusionup 工具安装和管理本地 KusionStack 版本,也可以通过 KusionStack 的 Docker 镜像使用。 - diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/getting-started/install/kusionup.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/getting-started/install/kusionup.md deleted file mode 100644 index 879d8fde..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/getting-started/install/kusionup.md +++ /dev/null @@ -1,206 +0,0 @@ ---- -sidebar_position: 1 ---- - -# Kusionup 安装 - -推荐通过 kusionup 工具安装 Kusion,这是一个优雅的 kusion 多版本管理工具,你可以通过它: - -- 一键安装任何版本的 kusion -- 灵活切换不同版本的 kusion -- 自定义本地 kusion 版本 - -## 1. 安装 Kusionup - -通过以下命令一键安装 `kusionup`: - -```bash -brew install KusionStack/tap/kusionup && kusionup init --skip-prompt && source $HOME/.kusionup/env -``` - -升级 `kusionup`: -```bash -brew update -brew upgrade KusionStack/tap/kusionup -``` - -该脚本执行后会创建 `$HOME/.kusionup` 目录,该目录下会包含: - -- `$HOME/.kusionup/bin` 目录用于放置 `kusionup` 二进制工具 -- `$HOME/.kusionup/env` 文件用于声明 `kusionup` 和 `kusion` 技术栈所需要的环境变量 -- `$HOME/.kusionup/current` 软链接用于标识当前激活的 `kusion` 版本 -- `$HOME/.kusionup/$VERSION` 目录代表不同的 `kusion` 版本目录,比如默认安装的最新版本 `$HOME/.kusionup/latest` - -**💡 自定义默认安装版本**: -上述脚本会默认安装最新的 kusion 版本(`latest`),如果想**自定义默认安装版本**,可以运行下述命令(将最后的 `latest` 替换为你想要默认安装的版本号就就行): - -```bash -brew install KusionStack/tap/kusionup && kusionup init --skip-install && source $HOME/.kusionup/env && kusionup reinstall latest -``` - -**💡 安装失败问题排查**: - -**❓ 问题 1**:M1 Mac Openssl dylib 库找不到或 SSL module is not available 的问题 - -1. 确保你有一个 homebrew arm64e-version 安装在 /opt/homebrew, 否则通过如下命令安装 arm 版本的 brew - -```python -/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" -# 添加到 path -export PATH=/opt/homebrew/bin:$PATH -``` - -2. 通过 brew 安装 openssl 1.1 版本 - -```python -brew install openssl@1.1 -``` - -**❓ 问题 2**:mac KCLVM gettext dylib 找不到的问题 -![image.png](https://intranetproxy.alipay.com/skylark/lark/0/2022/png/317257/1646538731635-b1e290a5-465d-4838-b8d1-7f22cb48e267.png#clientId=uc50abf48-5ee8-4&crop=0&crop=0&crop=1&crop=1&from=paste&height=200&id=ub5ce78d1&margin=%5Bobject%20Object%5D&name=image.png&originHeight=400&originWidth=1158&originalType=binary&ratio=1&rotation=0&showTitle=false&size=238920&status=done&style=none&taskId=ue75303e6-140d-450f-84de-464da45a473&title=&width=579) - -- 使用 which 命令找到自己 gettext 的位置 (假设为/Users/yueyi/tools/homebrew/bin/gettext) - -```python -which gettext -``` - -- 使用 otool -L 命令获得 libintl.8.dylib 的位置 - -```python -C02Y90Q4JHD2:bin yueyi$ otool -L /Users/yueyi/tools/homebrew/bin/gettext -/Users/yueyi/tools/homebrew/bin/gettext: - /System/Library/Frameworks/CoreFoundation.framework/Versions/A/CoreFoundation (compatibility version 150.0.0, current version 1675.129.0) - /Users/yueyi/tools/homebrew/Cellar/gettext/0.21/lib/libintl.8.dylib (compatibility version 11.0.0, current version 11.0.0) - /usr/lib/libiconv.2.dylib (compatibility version 7.0.0, current version 7.0.0) - /usr/lib/libSystem.B.dylib (compatibility version 1.0.0, current version 1281.100.1) -``` - -- 将 `/Users/yueyi/tools/homebrew/Cellar/gettext/0.21/lib/libintl.8.dylib` 拷贝到 `/usr/local/opt/gettext/lib/libintl.8.dylib` - -## 2. 管理 Kusion 版本 - -执行完安装脚本后,默认已经安装好了 `kusionup` 和一个默认的 `kusion` 版本,你可以通过执行 `kusionup` 查看已安装的所有版本: - -```bash -$ kusionup -Use the arrow keys to navigate: ↓ ↑ → ← -? Select a version: - ▸ latest -``` - -执行 `kusionup ls-ver` 列出来自内置安装源中的**所有可安装的 **`**kusion**`** 版本**: - -```bash -$ kusionup ls-ver -cdn@latest -cdn@v0.4.2 -cdn@v0.4.1 -github@latest -github@v0.4.2 -github@v0.4.1 -``` - -执行 `kusionup install $VERSION` 安装指定版本: - -```shell -# 这里假设从 cdn 加速源安装 kusion 的最新版本 ↓ -$ kusionup install cdn@latest -Downloaded 0.0% ( 2426 / 139988826 bytes) ... -Downloaded 11.4% ( 16003466 / 139988826 bytes) ... -Downloaded 21.0% ( 29433014 / 139988826 bytes) ... -Downloaded 32.2% ( 45077686 / 139988826 bytes) ... -Downloaded 41.9% ( 58642898 / 139988826 bytes) ... -Downloaded 51.2% ( 71647010 / 139988826 bytes) ... -Downloaded 61.6% ( 86258486 / 139988826 bytes) ... -Downloaded 71.2% ( 99667706 / 139988826 bytes) ... -Downloaded 81.5% (114078806 / 139988826 bytes) ... -Downloaded 91.5% (128134166 / 139988826 bytes) ... -Downloaded 100.0% (139988826 / 139988826 bytes) -INFO[0055] Unpacking /root/.kusionup/kusion@latest/kusion-linux.tgz ... -INFO[0061] Success: latest downloaded in /root/.kusionup/kusion@latest -INFO[0061] Default Kusion is set to 'latest' - -$ kusion version -releaseVersion: v0.4.1 -...... -``` - -执行 `kusionup show` 查看目前正在使用的 kusion 版本: - -```bash -$ kusionup show -| VERSION | ACTIVE | -|---------------|--------| -| cdn@latest | * | -| cdn@v0.4.1 | | -``` - -执行 `kusionup remove $VERSION` 删除指定版本: - -```bash -# 这里假设删除 kusion 的最新版本 ↓ -$ kusionup remove latest -INFO[0000] Removing latest - -$ kusionup -Use the arrow keys to navigate: ↓ ↑ → ← -? Select a version: - ▸ cdn@v0.4.1 # 已经没有 latest 的选项了 -``` - -## 3. Kusionup 帮助文档 - -```bash -$ kusionup -h -The Kusion installer - -Usage: - kusionup [flags] - kusionup [command] - -Available Commands: - default Set the default Kusion version - help Help about any command - install Install Kusion with a version - ls-ver List Kusion versions to install - remove Remove Kusion with a version - show Show installed Kusion - version Show kusionup version - -Flags: - -h, --help help for kusionup - -v, --verbose Verbose - -Use "kusionup [command] --help" for more information about a command. -``` - -## 4. Kusionup Tips - -**将本地任何 kusion 版本加入到 kusionup 切换列表中?** - -该方法可以将本地的任何一个版本的 `.kusion` 目录加入到 `kusionup` 的切换列表中,可用于调试,使用更加灵活。 - -```bash -# 假设本地有个调试版本的 kusion 放在 $HOME/.kusion 目录中 -# 执行下面的命令后可以将它加入到 kusionup 切换列表中 - -$ mv $HOME/.kusion $HOME/.kusionup/kusion-debug - -# 接下来就可以通过 kusionup 切换到 debug 版本了 -$ kusionup -Use the arrow keys to navigate: ↓ ↑ → ← -? Select a version: - cdn@latest - ▸ debug -``` - -**注意**:`.kusionup` 下的 `kusion` 目录必须以 `kusion-` 为前缀,后缀是版本号; - -## 5. VS Code 插件 - -为了提高 IDE 开发 KCL 的效率,Kusion 为 VS Code 在线版和本地版本提供了插件支持。在线版本可以从 https://vscode.dev 地址打开,然后安装“KCL for vscode.dev 插件”,效果如下: - -![](/img/docs/user_docs/getting-started/install/ide-vscode.png) - -本地 VS Code 可以安装完整的 [KCL 插件](https://marketplace.visualstudio.com/items?itemName=kcl.kcl-vscode-extension),提供了高亮、自动补全(部分:关键字补全等)、跳转、悬停、大纲等功能。插件虽然不是 Kusion 必须的部分,但是可以提高效率推荐安装。 diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/getting-started/kcl.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/getting-started/kcl.md deleted file mode 100644 index b1ffc8f2..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/getting-started/kcl.md +++ /dev/null @@ -1,152 +0,0 @@ ---- -sidebar_position: 3 ---- - -# KCL 语言速览 - -KCL(Kusion Configuration Language)是 Kusion 内置的面相云原生领域配置策略语言。KCL 设计之初受 Python3 启发,同时吸收了声明式、OOP 编程范式的理念设计等设计,是一种专用于配置策略定义、校验的静态强类型的面相配置和策略场景的语言。本节我们将快速展示 KCL 语言的基本特性。 - -## 1. Hello KCL - -学习新语言的最佳途径是自己亲手写几个小程序,配置语言也是如此。KCL 作为一种配置策略语言,我们可以像写配置一样写 KCL 程序。 - -下面是一个简单的 `hello.k` 程序: - -```python -hello = "KCL" -``` - -将 `hello` 属性设置为 `"KCL"` 字符串。然后将代码保存到 `hello.k` 文件中。 - -如何执行这个程序取决于具体的开发环境,我们先假设本地的 macOS 或者是 Linux 系统已经安装了 `kcl` 命令(或者通过 `docker run --rm -it kusionstack/kusion bash` 进入 Docker 环境测试)。然后在文件所在的目录命令行输入以下命令执行: - -```shell -$ kcl hello.k -hello: KCL -``` - -命令行执行的效果如图所示: - -![](/img/docs/user_docs/getting-started/hello.gif) - -输出的是 YAML 格式的配置数据。这个程序虽然简单,但是我们可以通过执行 KCL 配置程序到输出结果验证了开发环境和 `kcl` 命令行的基本用法。 - -## 2. 再复杂一点的配置 - -常见的配置数据除了的普通的 key-value 对,还有嵌套的字典和列表类型,同时 value 基础类型除了字符串还有布尔和数值等类型。下面是更为复杂一点的 `server.k` 配置: - -```python -# This is a KCL document - -title = "KCL Example" - -owner = { - name = "The KCL Authors" - data = "2020-01-02T03:04:05" -} - -database = { - enabled = True - ports = [8000, 8001, 8002] - data = [["delta", "phi"], [3.14]] - temp_targets = {cpu = 79.5, case = 72.0} -} - -servers = [ - {ip = "10.0.0.1", role = "frontend"} - {ip = "10.0.0.2", role = "backend"} -] -``` - -其中 `#` 开头的表示行注释。`owner` 的 value 是一个字典,字典的面值通过 `{}` 方式包含的内容,字典内部的 key-value 和 `hello = "KCL"` 例子的写法类似。`database` 则是另一个字典,其中字典属性的 value 出现了布尔 `True`、列表 `[]` 和 `{}` 字典,其中列表和字典中还出现了数值类型的 value。 最后一个 `servers` 属性则是一个列表,列表内部嵌套着字典(字典和列表以及后续将要讲到的 `schema` 都可以相互嵌套)。 - -该配置输出的 YAML 结果如下: - -```yaml -$ kcl server.k -title: KCL Example -owner: - name: The KCL Authors - data: '2020-01-02T03:04:05' -database: - enabled: true - ports: - - 8000 - - 8001 - - 8002 - data: - - - delta - - phi - - - 3.14 - temp_targets: - cpu: 79.5 - case: 72.0 -servers: -- ip: 10.0.0.1 - role: frontend -- ip: 10.0.0.2 - role: backend -``` - -## 3. schema 定义配置的结构 - -KCL 通过 `schema` 语法结构为有着固定属性结构和默认值行为的属性提供抽象支持。 - -比如上面例子的中 `database` 的配置一般是用默认值即可。这样我们可以通过为数据库的默认配置定义一个结构: - -```python -schema DatabaseConfig: - enabled: bool = True - ports: [int] = [8000, 8001, 8002] - data: [[str|float]] = [["delta", "phi"], [3.14]] - temp_targets: {str: float} = {cpu = 79.5, case = 72.0} -``` - -`enabled` 是布尔类型;`ports` 为整数列表类型;`data` 为列表的列表,内层的列表元素是字符串或者浮点数类型;`temp_targets` 则是一个字典类型,字典的属性值是浮点数类型。并且 `DatabaseConfig` 的每个属性都定义了默认值。 - -然后通过 `database = DatabaseConfig {}` 就可以产生和默认值相同属性的结构。用户也可以修改默认值: - -```python -database = DatabaseConfig { - ports = [2020, 2021] -} -``` - -`schema DatabaseConfig` 不仅仅为属性提供了默认值,还为属性添加了类型信息。因此,如果用户不小心写错属性值类型的话,KCL 将会给出友好的错误提示,比如下面的例子将 `ports` 错误地写成了浮点数类型: - -```python -database = DatabaseConfig { - ports = [1.2, 1.3] -} -``` - -执行时将产生类似以下的错误(显示的文件路径和本地环境有关): - -```shell -$ kcl server.k -KCL Compile Error[E2G22] : The type got is inconsistent with the type expected - ---> File /path/to/server.k:8:2 - 8 | ports = [1.2, 1.3] - 5 ^ -> got [float(1.2)|float(1.3)] - ---> File /path/to/server.k:3:2 - 3 | ports: [int] = [8000, 8001, 8002] - 5 ~ -> expect [int] -expect [int], got [float(1.2)|float(1.3)] -``` - -类似地我们可以用以下的代码封装 `servers` 部分的属性: - -```python -schema ServerConfig: - ip: str - role: "frontend" | "backend" - -servers = [ - ServerConfig {ip = "10.0.0.1", role = "frontend"} - ServerConfig {ip = "10.0.0.2", role = "backend"} -] -``` - -其中 `ServerConfig` 的 `ip` 是字符串类型,并没有给出默认值。用户在生成 `ServerConfig` 类型的属性时必须手工添加 `ip` 属性的值,否则 KCL 将会报出缺少必填属性的错误。`role` 属性是 `"frontend" | "backend"` 枚举字符串类型。 - -此外,`schema` 还可以结合 `check`、`mixin`、可选属性、继承和扩展模块实现更为复杂的配置和策略数据的抽象,细节可以参考手册部分的文档。 diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/getting-started/usecase.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/getting-started/usecase.md deleted file mode 100644 index b6e18d0f..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/getting-started/usecase.md +++ /dev/null @@ -1,112 +0,0 @@ ---- -sidebar_position: 2 ---- - -# Use Cases -This tutorial will demonstrate how to deliver an App with a Loadbalancer in one Kusion command. - -## Prerequisites - -- [Kusion](/docs/user_docs/getting-started/install) -- [Kubernetes](https://kubernetes.io/) or [Kind](https://kind.sigs.k8s.io/) - -## Init Project - -Firstly, let's clone the Konfig repo and enter the root directory: - -```shell -git clone git@github.com:KusionStack/konfig.git && cd konfig -``` - -After this step, we can init this tutorial project with online templates -```shell -kusion init --online -``` - -All init templates are listed as follows: - -```shell -➜ konfig git:(main) ✗ kusion init --online -? Please choose a template: [Use arrows to move, type to filter] -> code-city Code City metaphor for visualizing Go source code in 3D. - deployment-multi-stack A minimal kusion project of multi stacks - deployment-single-stack A minimal kusion project of single stack -``` - -Select `code-city` and press `Enter`. After that, we will see hints below and use the default value to config this project and stack. - -![](/img/docs/user_docs/getting-started/choose-template.gif) - - -After this process, we can get the whole file hierarchy with this command -```shell -cd code-city && tree -``` - -```shell -➜ konfig git:(main) ✗ cd code-city && tree -. -├── base -│   └── base.k -├── dev -│   ├── ci-test -│   │   └── settings.yaml -│   ├── kcl.yaml -│   ├── main.k -│   └── stack.yaml -└── project.yaml - -3 directories, 6 files -``` - More details about the directory structure can be found in -[Konfig](/docs/user_docs/concepts/konfig). - -### Review Config Files - -```python -# main.k -import base.pkg.kusion_models.kube.frontend - -# The application configuration in stack will overwrite -# the configuration with the same attribute in base. -appConfiguration: frontend.Server { - image = "howieyuen/gocity:latest" -} -``` -`main.k` only contains 4 lines. Line 1 imports a pkg that contains the model `Server` which is an abstract model representing the App we will deliver later. This model hides the complexity of Kubernetes `Deployment` and `Service` and only one field `image` is needed to make this App ready to use. - -More details about Konfig Models can be found in [Konfig](https://github.com/KusionStack/konfig) - -## Delivery -```shell -cd dev && kusion apply --watch -``` -Go to the `dev` folder and we will deliver this App into a Kubernetes cluster with one command `kusion apply --watch` - -![](/img/docs/user_docs/getting-started/apply.gif) - -Check `Deploy` status -```shell -kubectl -ncode-city get deploy -``` -The expected output is shown as follows: - -```shell -➜ dev git:(main) ✗ kubectl -ncode-city get deploy -NAME READY UP-TO-DATE AVAILABLE AGE -code-citydev 1/1 1 1 1m -``` - -Port-forward our App with the `service` -```shell -kubectl port-forward -ncode-city svc/gocity 4000:4000 -``` -```shell -➜ dev git:(main) ✗ kubectl port-forward -ncode-city svc/gocity 4000:4000 -Forwarding from 127.0.0.1:4000 -> 4000 -Forwarding from [::1]:4000 -> 4000 -``` - -Visit [http://localhost:4000/#/github.com/KusionStack/kusion](http://localhost:4000/#/github.com/KusionStack/kusion) in your browser and enjoy. - -![](/img/docs/user_docs/getting-started/gocity.png) diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/guides/_category_.json b/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/guides/_category_.json deleted file mode 100644 index ffb4f82a..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/guides/_category_.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "label": "用户指南", - "position": 4 -} diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/guides/adopting/_category_.json b/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/guides/adopting/_category_.json deleted file mode 100644 index 4f4ae719..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/guides/adopting/_category_.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "label": "开始 Kusion", - "position": 1 -} diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/guides/adopting/from_kubernetes.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/guides/adopting/from_kubernetes.md deleted file mode 100644 index 1aed13d0..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/guides/adopting/from_kubernetes.md +++ /dev/null @@ -1,101 +0,0 @@ -# From Kubernetes - -## 1. Kubernetes OpenAPI Spec - -从 Kubernetes 1.4 开始,引入了对 OpenAPI 规范(在捐赠给 Open API Initiative 之前称为 swagger 2.0)的 alpha 支持,API 描述遵循 [OpenAPI 规范 2.0](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/2.0.md),从 Kubernetes 1.5 开始,Kubernetes 能够直接从[源码自动地提取模型并生成 OpenAPI 规范](https://github.com/kubernetes/kube-openapi),自动化地保证了规范和文档与操作/模型的更新完全同步。 - -此外,Kubernetes CRD 使用 [OpenAPI v3.0 validation](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#validation) 来描述(除内置属性 apiVersion、kind、metadata 之外的)自定义 schema,在 CR 的创建和更新阶段,APIServer 会使用这个 schema 对 CR 的内容进行校验。 - -## 2. KCL OpenAPI 支持 - -KCLOpenAPI 工具支持从 OpenAPI/CRD 定义提取并生成 KCL schema. 在[KCLOpenapi Spec](/reference/cli/openapi/spec.md)中明确定义了 OpenAPI 规范与 KCL 语言之间的映射关系。 - -[安装 Kusion 工具包](/docs/user_docs/getting-started/install)的同时会默认安装 KCLOpenapi 工具,KCLOpenapi 工具的使用和示例可参见[KCLOpenAPI 工具](/docs/reference/cli/openapi) - -## 3. 从 Kubernetes 模型迁移到 Kusion - -Kubernetes 内置模型的完整 OpenAPI 定义存放在 [Kubernetes openapi-spec 文件](https://github.com/kubernetes/kubernetes/blob/master/api/openapi-spec/swagger.json)。以该文件作为输入,KCLOpenapi 工具能够生成相应版本的全部模型 schema. 接下来以发布部署场景为例,演示从 Kubernetes 迁移到 Kusion 的流程。假设您的项目正在使用 [Kubernetes Deployment](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/) 定义发布部署配置,迁移到 Kusion 只需要如下步骤: - -### 1. 使用已有的 kusion_models 模型包 - -在 Konfig 的 kusion_models 目录中已经保存了一份经过良好抽象的模型 —— Server 模型,点此查看 [Server Schema](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_models/kube/frontend/server.k)。至于存量的 Kubernetes 配置数据,Kusion 计划提供 kube2kcl 转换工具,将其转换为 KCL 描述的基于上述 Server 模型的配置实例,这一工具正在开发中,近期即可开放使用。完成配置模型和数据迁移之后,接下来只需要按照 [Kubernetes - 使用 Kusion 进行应用部署运维](/docs/user_docs/guides/working-with-k8s/)中的指引定义并部署项目即可。 - -### 2. 创建自定义的 models 前端模型 - -当已有的 kusion_models 模型包不足以满足业务需求时,您还可以设计自定义的前端模型包。在 Konfig 的 kusion_kubernetes 目录中已经保存了一份生成好的 Kubernetes 1.22 版本模型,您可在此基础上直接编写自定义前端模型。并且您可仿照 kusion_models 的模式,开发自定义脚本,完成配置数据的迁移。此后项目部署参考 [Kubernetes - 使用 Kusion 进行应用部署运维](/docs/user_docs/guides/working-with-k8s/) 指引即可。 - -#### 1. Kubernetes Deployment 转为 KCL Schema - -在 Konfig 的 base/pkg/kusion_kubernetes 目录中,我们已经保存了一份生成的 [KCL 文件(Kubernetes 1.22 版本)](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/apps/v1/deployment.k),并生成了对应的 模型文档。您可跳过该步骤,使用已生成的模型包,或者你可自行生成特定版本。 - -从 [Kubernetes 1.23 版本的 openapi-spec 文件](https://github.com/kubernetes/kubernetes/blob/release-1.23/api/openapi-spec/swagger.json)中,可以找到 apps/v1.Deployment 模型相关的定义,截取片段如下: - -```json -{ - "definitions": { - "io.k8s.api.apps.v1.Deployment": { - "description": "Deployment enables declarative updates for Pods and ReplicaSets.", - "properties": { - "apiVersion": { - "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - "type": "string" - }, - "kind": { - "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - "type": "string" - }, - "metadata": { - "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", - "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" - }, - "spec": { - "$ref": "#/definitions/io.k8s.api.apps.v1.DeploymentSpec", - "description": "Specification of the desired behavior of the Deployment." - }, - "status": { - "$ref": "#/definitions/io.k8s.api.apps.v1.DeploymentStatus", - "description": "Most recently observed status of the Deployment." - } - }, - "type": "object", - "x-kubernetes-group-version-kind": [ - { - "group": "apps", - "kind": "Deployment", - "version": "v1" - } - ] - } - }, - "info": { - "title": "Kubernetes", - "version": "unversioned" - }, - "paths": {}, - "swagger": "2.0" -} -``` - -将以上述 spec 保存为 deployment.json,执行 ```kcl-openapi generate model -f deployment.json```,将在当前工作空间生成所有相关的 KCL schema 文件,如 [KCL Deployment 文件(Kubernetes 1.22 版本)](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_kubernetes/api/apps/v1/deployment.k) 所示。 - -#### 2. 编写自定义前端模型 - -由于 Kubernetes 内置模型较为原子化和复杂,我们推荐以 Kubernetes 原生模型作为后端输出的模型,对其进一步抽象,而向用户暴露一份更为友好和简单的前端模型界面,具体您可参照 Konfig 仓库中 [kusion_models Server](https://github.com/KusionStack/konfig/blob/main/base/pkg/kusion_models/kube/frontend/server.k) 模型的设计方式进行。 - -#### 3. 批量迁移配置数据 - -对于存量的 Kubernetes 配置数据,您可以仿照 kube2Kcl 工具的做法,编写自定义的转换脚本,进行一键迁移。Kusion 后续将提供该脚本的编写脚手架和编写指南。 - -## 4. 从 Kubernetes CRD 迁移到 Kusion - -如果您的项目中使用了 CRD,也可以采用类似的模式,生成 CRD 对应的 KCL schema,并基于该 schema 声明 CR。 - -* 从 CRD 生成 KCL Schema - - ``` - kcl-openapi generate model --crd --skip-validation -f your_crd.yaml - ``` - -* 使用 KCL 声明 CR - - 使用 KCL 声明 CR 的模式与声明 Kubernetes 内置模型配置的模式相同,在此不做赘述。 diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/guides/adopting/index.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/guides/adopting/index.md deleted file mode 100644 index 1885db44..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/guides/adopting/index.md +++ /dev/null @@ -1,7 +0,0 @@ -# 开始接入 Kusion - -对于全新的项目来说,您只需要从头开始采用 Kusion 技术栈编写和管理基础设施配置即可,我们提供了针对不同运行时的[用户指南](../../guides/guides.md)文档引导您这一过程。 -然而,对于已经建设了基础设施的项目,可能已有存量的配置模型和数据,对此,Kusion 也提供了一些自动化工具帮助您快速迁移: - -对于 kubernetes 用户,Kusion 提供了 OpenAPI 到 KCL 模型代码的转换工具,以直接复用 Kubernetes 已有的上百个核心模型。 -对于 istio 用户,以及 Kubernetes 内置模型无法支持的情况, Kusion 还支持将 CRD 自动生成为 KCL 模型代码。 diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/guides/argocd/_category_.json b/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/guides/argocd/_category_.json deleted file mode 100644 index 17630407..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/guides/argocd/_category_.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "label": "ArgoCD", - "position": 7 -} diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/guides/argocd/argocd.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/guides/argocd/argocd.md deleted file mode 100644 index 5d83e218..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/guides/argocd/argocd.md +++ /dev/null @@ -1 +0,0 @@ -# ArgoCD diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/guides/argocd/drift-detection-by-argocd.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/guides/argocd/drift-detection-by-argocd.md deleted file mode 100644 index 1e749a4e..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/guides/argocd/drift-detection-by-argocd.md +++ /dev/null @@ -1,136 +0,0 @@ -# 使用 ArgoCD 进行漂移检查 - -## 准备开始 - -安装 ArgoCD: - -```bash -kubectl create namespace argocd -kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj/argo-cd/stable/manifests/install.yaml -``` - -## 配置 Kusion 插件 - -目前,ArgoCD 内置了一些常见的配置插件,包括 helm、jsonnet、kustomize。而对于 KCL 来说,作为一门全新的配置语言,想要使用 ArgoCD 实现漂移检查的能力,需要遵循它的插件化的机制,配置 Kusion 插件。具体操作如下: - -1. 下载 [patch](https://github.com/KusionStack/examples/blob/main/kusion/argo-cd/patch-argocd-cm.yaml) 文件 - -```shell -wget -q https://raw.githubusercontent.com/KusionStack/examples/main/kusion/argo-cd/patch-argocd-cm.yaml -``` - -2. 更新配置 - -```shell -kubectl -n argocd patch cm/argocd-cm -p "$(cat patch-argocd-cm.yaml)" -``` - -## 更新 ArgoCD 部署 - -完成第一步,ArgoCD 就可以识别 Kusion 插件,但 Kusion 插件还没有载入到 ArgoCD 的镜像中。要实现配置漂移检查,需要修改 argocd-repo-server 的 Deployment。 - -1. 下载 [patch](https://github.com/KusionStack/examples/blob/main/kusion/argo-cd/patch-argocd-repo-server.yaml) 文件 - -```shell -wget -q https://raw.githubusercontent.com/KusionStack/examples/main/kusion/argo-cd/patch-argocd-repo-server.yaml -``` - -2. 更新配置 - -```shell -kubectl -n argocd patch deploy/argocd-repo-server -p "$(cat patch-argocd-repo-server.yaml)" -``` - -3. 升级完成 - -```shell -kubectl get pod -n argocd -l app.kubernetes.io/name=argocd-repo-server -``` - -## 创建 KCL 项目 - -到此,准备工具已经完成,现在开始验证。这里我们使用开源 Konfig 大库中的示例项目。 - -1. 开启本地端口转发 - -```shell -kubectl port-forward svc/argocd-server -n argocd 8080:443 -``` - -2. 登录 ArgoCD - -```shell -argocd login localhost:8080 -``` - -3. 创建 ArgoCD Application - -```shell -argocd app create guestbook-test \ ---repo https://github.com/KusionStack/konfig.git \ ---path appops/guestbook-frontend/prod \ ---dest-namespace default \ ---dest-server https://kubernetes.default.svc \ ---config-management-plugin kusion -``` - -:::info -注意:如果你正在使用私有仓库,需要先配置私有仓库的访问私钥凭证,再执行创建命令。详细操作,请参见 [Private Repositories](https://argo-cd.readthedocs.io/en/stable/user-guide/private-repositories/#ssh-private-key-credential)。 -::: - -创建成功后,可以看到以下输出: - -``` -application 'guestbook-test' created -``` - -通过ArgoCD UI,可以看到,已经创建的应用暂未同步,此处可以手动同步,也可以设置自动同步。 - -![](/img/docs/user_docs/guides/argocd/out-of-sync.jpg) - -4. 设置同步策略(仅同步 `unsynced` 的资源): - -```shell -argocd app set guestbook-test --sync-option ApplyOutOfSyncOnly=true -``` - -:::info -有关同步策略的详细信息,请参见 [Sync Options](https://argo-cd.readthedocs.io/en/stable/user-guide/sync-options/) -::: - -同步成功: - -![](/img/docs/user_docs/guides/argocd/synced.jpg) - -## 配置漂移检查 - -到此,已经完成了 ArgoCD 监听 KCL 项目,实现配置漂移检查并实现结果一致性。我们来修改 `guestbook-test` 的镜像版本,实现配置变更。 - -1. 更新镜像 - -```diff - appConfiguration: frontend.Server { -- image = "gcr.io/google-samples/gb-frontend:v4" -+ image = "gcr.io/google-samples/gb-frontend:v5" - schedulingStrategy.resource = res_tpl.tiny - } -``` - -2. 更新编译结果 - -```shell -kusion compile -w appops/guestbook-frontend/prod -``` - -3. Git 提交并推送 - -```shell -git add . -git commit -m "mannual drifted config for appops/guestbook-frontend/prod" -git push origin main -``` - -4. 漂移配置自动收敛 - -![](/img/docs/user_docs/guides/argocd/reconcile-drifted-config.jpg) - diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/guides/guides.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/guides/guides.md deleted file mode 100644 index b59fa5e3..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/guides/guides.md +++ /dev/null @@ -1,3 +0,0 @@ -# 用户指南 - -用户指南主要面向用户各种真实的场景,通过具体直观的操作达成某些日常工作。 diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/guides/organizing-projects-stacks/_category_.json b/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/guides/organizing-projects-stacks/_category_.json deleted file mode 100644 index 6c61d6bd..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/guides/organizing-projects-stacks/_category_.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "label": "项目组织最佳实践", - "position": 5 -} diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/guides/organizing-projects-stacks/mapping.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/guides/organizing-projects-stacks/mapping.md deleted file mode 100644 index 32c68164..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/guides/organizing-projects-stacks/mapping.md +++ /dev/null @@ -1,30 +0,0 @@ -# 映射关系 - -在[架构 & 概念](/user_docs/concepts/konfig.md)中已经介绍过 Project 和 Stack 的基本概念,配置大库中可以使用 Project 进行配置单元的隔离,一个配置大库由 Project 组成,Project 下进一步进行逻辑隔离便是 Stack,每个 Project 下包含一个或多个 Stack。Project 和 Stack 可以根据需要选择映射关系,比如用户可以把一个应用映射成一个 Project,也可以把一种运维场景映射为一个 Project,比如建站运维等,在本节中将详细介绍几种映射关系的最佳实践。 - -## 1. 云原生:应用和集群 - -在应用为中心的运维体系中,应用是核心概念,也是 DevOps 主要运维的对象。在云原生场景中,应用通常部署在 Kubernetes 集群中,所以在这种典型场景中,可以将 Project 映射为应用,将 Stack 映射为集群。一个应用的配置根据集群进行隔离,集群间的差异化配置存放到每个 Stack 目录中,集群间的通用配置存放在 base 目录中。 - -| 概念 | 映射为 | -| ------- | ------ | -| Project | 应用 | -| Stack | 集群 | - -## 2. 单租户场景:应用和环境 - -在某些通用场景,我们可以对集群的概念进一步进行抽象为环境,一个应用通常需要部署到多个环境,比如测试、预发、生产。在单租户场景中,一个推荐的做法是将 Project 映射为应用,Stack 映射为环境。 - -| 概念 | 映射为 | -| ------- | ------ | -| Project | 应用 | -| Stack | 环境 | - -## 3 多租户场景:应用和环境 - -在多租户场景中且以应用为核心的运维体系中,一种推荐做法是在应用名中添加租户信息,应用名在不同租户间是唯一的,即 Project 映射为多租户间唯一应用,Project 的 Name 是多租户间唯一应用名,Stack 映射为该应用下的环境配置。 - -| 概念 | 映射为 | -| ------- | ---------------------- | -| Project | 多租户间唯一的应用名称 | -| Stack | 环境 | diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/guides/organizing-projects-stacks/organizing-projects-stacks.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/guides/organizing-projects-stacks/organizing-projects-stacks.md deleted file mode 100644 index 19bf9982..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/guides/organizing-projects-stacks/organizing-projects-stacks.md +++ /dev/null @@ -1 +0,0 @@ -# 项目组织最佳实践 diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/guides/prometheus/_category_.json b/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/guides/prometheus/_category_.json deleted file mode 100644 index baac31dc..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/guides/prometheus/_category_.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "label": "Prometheus", - "position": 8 -} diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/guides/prometheus/recording-and-alerting.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/guides/prometheus/recording-and-alerting.md deleted file mode 100644 index 5b3b989d..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/guides/prometheus/recording-and-alerting.md +++ /dev/null @@ -1,470 +0,0 @@ ---- -sidebar_position: 1 ---- - -# 记录与告警 - -本指南将向你展示如何基于 Prometheus Operator 一键部署 Alertmanager 集群并集成 Prometheus,并使用 PrometheusRules 记录指标数据和推送告警。 - -## 介绍 - -Prometheus 是一个开源系统监控和警报工具包。它将其指标收集并存储为时间序列数据,即指标信息与记录它的时间戳一起存储,以及称为标签的可选键值对。 - -下图说明了 Prometheus 的架构及其一些生态系统组件: - -![](/img/docs/user_docs/guides/prometheus/structure.png) - -Prometheus 从检测作业中直接或通过中间推送网关从短期作业中抓取指标。它在本地存储所有抓取的样本,并对这些数据运行规则,以从现有数据聚合和记录新的时间序列或生成警报。Grafana 或其他 API 使用者可用于可视化收集的数据。 - -## 准备开始 - -在开始之前,我们需要做以下准备工作: - -1、安装 Kusion 工具链 - -我们推荐使用 kusion 的官方安装工具 `kusionup`,可实现 kusion 多版本管理等关键能力。详情信息请参阅[下载和安装](/docs/user_docs/getting-started/install)。 - -2、下载开源 Konfig 大库 - -在本篇指南中,需要用到部分已经抽象实现的 KCL 模型,有关 KCL 语言的介绍,可以参考 [Tour of KCL](/reference/lang/lang/tour.md)。 - -仓库地址: [https://github.com/KusionStack/konfig.git](https://github.com/KusionStack/konfig.git) - -3、可用的 Kubernetes 集群 - -必须要有一个 Kubernetes 集群,同时 Kubernetes 集群最好带有 [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl) 命令行工具。 -如果你还没有集群,你可以通过 [Minikube](https://minikube.sigs.k8s.io/docs/tutorials/multi_node/) 构建一个你自己的集群。 - -4、安装 Prometheus Operator - -Prometheus Operator 安装很简单,只需: - -```bash -kubectl create -f bundle.yaml -``` - -详见 Prometheus Operator [快速开始](https://github.com/prometheus-operator/prometheus-operator#quickstart)。 - -## 组件部署 - -在 konfig 大库的 `prometheus-install` 项目中,保存了设置 Prometheus 和 Alertmanager 的完整的配置: - -- Alertmanager 集群 -- AlertmanagerConfig -- Alertmanager 服务 -- Prometheus 集群 -- Prometheus 依赖的 RBAC -- Prometheus 服务 - -:::info -想要体验快速部署结果,可直接跳到[一键部署](#一键部署)小节。 -::: - -### 配置 Alertmanager - -Alertmanager 默认启动是按照最低配置,这并没什么用处,因为它在接收报警时不会发送任何通知。 - -你有 3 种方式来提供 [Alertmanager 配置](https://prometheus.io/docs/alerting/configuration/): - -1. 使用存储在 Kubernetes 密钥中的本机 Alertmanager 配置文件。 -2. 使用 `spec.alertmanagerConfiguration` 在定义主 Alertmanager 配置的同一命名空间中引用 AlertmanagerConfig 对象。 -3. 定义 `spec.alertmanagerConfigSelector` 和 `spec.alertmanagerConfigNamespaceSelector` 告诉 Operator 应该选择哪些 AlertmanagerConfigs 对象并将其与主 Alertmanager 配置合并。 - -:::tip -在 [`prometheus-install`](https://github.com/KusionStack/konfig/tree/main/base/examples/monitoring/prometheus-install) 项目中使用的第二种方式。 -::: - -1. 以下是 AlertmanagerConfig 配置,Alertmanager 将通知发送到虚构的 webhook 服务: - -```py -_alertmanager_config: monitoringv1alpha1.AlertmanagerConfig{ - metadata = { - name = "main" - namespace = _common_namespace - labels = { - "alertmanagerConfig" = "main" - } - } - spec = { - route = { - groupBy = ["job"] - groupWait = "30s" - groupInterval = "5m" - repeatInterval = "12h" - receiver = "webhook" - } - receivers = [ - { - name = "webhook" - webhookConfigs = [ - { - url = "http://example.com/" - } - ] - } - ] - } -} -``` - -2. 设置 3 个副本的 Alertmanager 集群,并引用 AlertmanagerConfig 对象: - -```py -_alertmanager: monitoringv1.Alertmanager{ - metadata = { - name = "main" - namespace = "default" - } - spec = { - replicas = 3 - # 使用 AlertmanagerConfig 作为全局配置 - alertmanagerConfiguration = { - name = _alertmanager_config.metadata.name - } - } -} -``` - -3. 公开 Alertmanager 服务,用于被后续的 Prometheus 实例集成。 -创建 Kubernetes Service,监听目标端口 `9093`: - -```py -_alertmanager_svc: corev1.Service{ - metadata = { - name = "alertmanager" - namespace = "default" - } - spec = { - selector = { - "alertmanager" = _alertmanager.metadata.name - } - ports = [ - { - name = "web" - port = 9093 - targetPort = "web" - } - { - name = "reloader-web" - port = 8080 - targetPort = "reloader-web" - } - ] - sessionAffinity = "ClientIP" - } -} -``` - -:::tip -详细配置,请查看源码文件: [`prometheus-install/prod/main.k`](https://github.com/KusionStack/konfig/blob/main/base/examples/monitoring/prometheus-install/prod/main.k)。 -::: - -此 Alertmanager 集群现在功能齐全且高可用,但不会针对它触发任何报警。这是因为你还没有设置 Prometheus 应用。 - -### 配置 Prometheus - -在创建 Prometheus 之前,必须先为 Prometheus 服务帐户创建 RBAC 规则。 - -1. Prometheus ClusterRole 配置: - -```py -_prometheus_clusterrole: rbac.ClusterRole { - metadata = { - name = "prometheus" - namespace = "default" - } - rules = [ - { - apiGroups = [""] - resources = ["nodes", "nodes/metrics", "services", "endpoints", "pods"] - verbs = ["get", "list", "watch"] - } - { - apiGroups = [""] - resources = ["configmaps"] - verbs = ["get"] - } - { - apiGroups = ["networking.k8s.io"] - resources = ["ingresses"] - verbs = ["get", "list", "watch"] - } - { - nonResourceURLs = ["/metrics"] - verbs = ["get"] - } - ] -} -``` - -:::tip -RBAC 的完整配置,请查看源码文件:[`prometheus-install/base/base.k`](https://github.com/KusionStack/konfig/blob/main/base/examples/monitoring/prometheus-install/base/base.k)。 -::: - -2. 创建 Prometheus,它将向 Alertmanger 集群发送报警: - -```py -_prometheus: monitoringv1.Prometheus{ - metadata = { - name = "main" - namespace = "default" - } - spec = { - # 指定 ServiceAccount - serviceAccountName = "prometheus" - replicas = 2 - # ruleSelector 为空,表示不选择任何 PrometheusRule。 - ruleSelector = { - matchLabels = { - "role" = "alert-rules" - "prometheus" = "main" - } - } - serviceMonitorSelector = { - matchLabels = { - "prometheus" = "main" - } - } - # 通过 Alertmanager 的公开的 Service,配置 Alertmanager - alerting = { - alertmanagers = [ - { - name = _alertmanager_svc.metadata.name - namespace = _alertmanager_svc.metadata.namespace - port = _alertmanager_svc.spec.ports[0].name - } - ] - } - } -} -``` - -3. 最后,为了方便验证,顺便公开 Prometheus Admin API。 -创建 Kubernetes Service,监听目标端口 `9090`: - -```py -_prometheus_svc: corev1.Service{ - metadata = { - name = "prometheus" - namespace = "default" - } - spec = { - selector = { - "prometheus" = _prometheus.metadata.name - } - ports = [ - { - name = "web" - port = 9090 - targetPort = "web" - } - { - name = "reloader-web" - port = 8080 - targetPort = "reloader-web" - } - ] - sessionAffinity = "ClientIP" - } -} -``` - -Prometheus admin API 允许访问删除某个时间范围内的系列、清理墓碑、捕获快照等。 -有关 admin API 的更多信息可以在 [Prometheus 官方文档](https://prometheus.io/docs/prometheus/latest/querying/api/#tsdb-admin-apis)中找到。 - -:::tip -详细配置,请查看源码文件: [`prometheus-install/prod/main.k`](https://github.com/KusionStack/konfig/blob/main/base/examples/monitoring/prometheus-install/prod/main.k)。 -::: - -### 一键部署 - -目前已经完成所有监控报警相关配置,现在开始一键部署。首先进入 `prometheus-install` stack 目录: - -```bash -cd konfig/base/examples/monitoring/prometheus-install/prod -``` - -再执行 `kusion apply`: - -```bash -kusion apply -``` - -输出类似于: - -``` -✔︎ Compiling in stack prod... - -Stack: prod ID Action - * ├─ rbac.authorization.k8s.io/v1:ClusterRole:default:prometheus Create - * ├─ monitoring.coreos.com/v1:Alertmanager:default:main Create - * ├─ monitoring.coreos.com/v1alpha1:AlertmanagerConfig:default:main Create - * ├─ monitoring.coreos.com/v1:Prometheus:default:main Create - * ├─ rbac.authorization.k8s.io/v1:ClusterRoleBinding:default:prometheus Create - * ├─ v1:ServiceAccount:default:prometheus Create - * ├─ v1:Service:default:alertmanager Create - * └─ v1:Service:default:prometheus Create - -? Do you want to apply these diffs? [Use arrows to move, type to filter] - yes -> details - no -``` - -选择 `yes`,开始部署。部署完成后,执行下面的命令,将本地端口 `30900` 转发到 k8s Service 端口 `9090`: - -```bash -kubectl port-forward svc/prometheus-example 30900:9090 -``` - -现在,你可以打开 [http://127.0.0.1:30900](http://127.0.0.1:30900/),访问 Prometheus 界面,进入 “Status > Runtime & Build Information” 页面,检查 Prometheus 是否发现了 3 个 Alertmanager 示例: - -![](/img/docs/user_docs/guides/prometheus/alertmanager.jpg) - -## PrometheusRule - -自定义资源定义 (CRD) `PrometheusRule` 声明式定义 Prometheus 实例使用的所需 Prometheus 规则,包括记录规则和报警规则。这些规则由 Operator 协调并动态加载,无需重新启动 Prometheus。 - -### 记录规则 - -记录规则可以预先计算经常需要或计算量大的表达式,并将其结果保存为一组新的时间序列。查询预先计算的结果通常比每次需要时执行原始表达式要快得多。这对于仪表板特别有用,仪表板每次刷新时都需要重复查询相同的表达式。 - -下面的代码片段,是以节点信息为例的记录规则: - -```py -_sum_of_node_memory = """\ -sum( - node_memory_MemAvailable_bytes{job="node-exporter"} or - ( - node_memory_Buffers_bytes{job="node-exporter"} + - node_memory_Cached_bytes{job="node-exporter"} + - node_memory_MemFree_bytes{job="node-exporter"} + - node_memory_Slab_bytes{job="node-exporter"} - ) -) by (cluster) -""" - -_node_cpu = """\ -sum(rate(node_cpu_seconds_total{job="node-exporter",mode!="idle",mode!="iowait",mode!="steal"}[5m])) / -count(sum(node_cpu_seconds_total{job="node-exporter"}) by (cluster, instance, cpu)) -""" -``` - -`_sum_of_node_memory` 记录节点可用内存总量,以 byte 为单位。 - -`_node_cpu` 计算每 5 分钟节点 CPU 的平均增长率。 - -:::tip -详细配置, 请查看源码文件: [`prometheus-rules/record/main.k`](https://github.com/KusionStack/konfig/blob/main/base/examples/monitoring/prometheus-rules/record/main.k). -::: - -现在,你可以创建上面的记录规则。 - -1、进入 `prometheus-rules` 项目的 `record` 目录: - -```bash -cd konfig/base/examples/monitoring/prometheus-rules/record -``` - -2、创建规则: - -```bash -kusion apply --yes -``` - -3、检查 Prometheus 已加载规则: - -```bash -kubectl port-forward svc/prometheus-example 30900:9090 -``` - -现在,你可以打开 [http://127.0.0.1:30900](http://127.0.0.1:30900/),访问 Prometheus 界面,进入 “Status > Rules” 页面,检查 Prometheus 是否已加载 `node.rules`: - -![](/img/docs/user_docs/guides/prometheus/node-rules.jpg) - -#### 拓展阅读 - -如果你想看到[记录规则](#记录规则)小节所生成的折线图,你需要在 `default` 命名空间部署 `node-exporter` 服务。 - -:::info -如何安装 node-exporter? 请查看这里: [`node-exporter.yaml`](https://github.com/KusionStack/examples/blob/main/prometheus/node-exporter.yaml) -::: - -那么,你将会看到,节点可用内存的折线图: - -![](/img/docs/user_docs/guides/prometheus/node-memory.jpg) - -和节点 CPU 每 5 分钟平均增长率的折线图: - -![](/img/docs/user_docs/guides/prometheus/node-cpu.jpg) - - -### 报警规则 - -报警规则可以根据 Prometheus 表达式语言表达式定义报警条件,并将有关触发报警的通知发送到外部服务。每当报警表达式在给定时间点产生一个或多个矢量元素时,对于这些元素的标签集,报警就会被视为已激活。 - -下面的代码片段是报警规则的示例: - -```py -_alerts: monitoringv1.PrometheusRule { - metadata = { - name = "example-alert" - namespace = "default" - labels: { - "prometheus": "example", - "role": "alert-rules", - } - } - spec = { - groups = [ - { - name = "alert.rules" - rules = [ - { - alert: "ExampleAlert" - # vector() 函数将标量作为没有标签的向量返回。 - expr: "vector(1)" - } - ] - } - ] - } -} -``` - -示例报警的表达式使用内部函数 `vertor()`,它将总是返回向量 1,即总是会触发报警。 - -:::tip -详细配置, 请查看源码文件: [`prometheus-rules/alert/main.k`](https://github.com/KusionStack/konfig/blob/main/base/examples/monitoring/prometheus-rules/alert/main.k). -::: - -现在,你可以创建报警规则: - -1、进入 `prometheus-rules` 项目的 `alert` 目录: - -```bash -cd konfig/base/examples/monitoring/prometheus-rules/alert -``` - -2、创建规则: - -```bash -kusion apply --yes -``` - -3、检查 Prometheus 已加载规则: - -由于你已经完成了端口转发的步骤,因此只需要刷新 “Status > Rules” 页面,检查 Prometheus 是否已加载 `alert.rules`: - -![](/img/docs/user_docs/guides/prometheus/alert-rules.jpg) - -4、检查 Alertmanager 成功接收报警: - -```bash -kubectl port-forward svc/alertmanager-example 30903:9093 -``` - -现在,你可以打开 [http://127.0.0.1:30903](http://127.0.0.1:30903/),访问 Alertmanager 界面,发现示例报警: - -![](/img/docs/user_docs/guides/prometheus/alert.jpg) - diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/guides/sensitive-data-solution/_category_.json b/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/guides/sensitive-data-solution/_category_.json deleted file mode 100644 index 5a4cbab9..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/guides/sensitive-data-solution/_category_.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "label": "敏感信息管理", - "position": 10 -} diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/guides/sensitive-data-solution/index.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/guides/sensitive-data-solution/index.md deleted file mode 100644 index 3df2076e..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/guides/sensitive-data-solution/index.md +++ /dev/null @@ -1 +0,0 @@ -# 敏感信息管理 diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/guides/sensitive-data-solution/vault-agent.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/guides/sensitive-data-solution/vault-agent.md deleted file mode 100644 index d95a46ea..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/guides/sensitive-data-solution/vault-agent.md +++ /dev/null @@ -1,302 +0,0 @@ -# Vault Agent - -本指南将向你展示,KCL/Kusion 通过集成 Vault,解决敏感信息的传输问题。 -本次演示是将数据库的用户名和密码传输到 Pod 中,涉及 3 个 Kubernetes 资源: - -- 命名空间(Namespace) -- 无状态应用(Deployment) -- 服务账号(ServiceAccount) - -:::tip -本指南要求你对 Kubernetes 有基本的了解。不清楚相关概念的,可以前往 Kubernetes 官方网站,查看相关说明: -- [Learn Kubernetes Basics](https://kubernetes.io/docs/tutorials/kubernetes-basics/) -- [Namespace](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/) -- [Deployment](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/) -- [ServiceAccount](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) -::: - -## 1. 准备开始 - -在开始之前,我们需要做以下准备工作: - -1、安装 Kusion 工具链 - -我们推荐使用 kusion 的官方安装工具 `kusionup`,可实现 kusion 多版本管理等关键能力。 -详情信息请参阅[下载和安装](/docs/user_docs/getting-started/install)。 - -2、下载开源 Konfig 大库 - -在本篇指南中,需要用到部分已经抽象实现的 KCL 模型。 -有关 KCL 语言的介绍,可以参考 [Tour of KCL](/reference/lang/lang/tour.md)。 - -仓库地址: https://github.com/KusionStack/konfig.git - -3、可用的 Kubernetes 集群 - -必须要有一个 Kubernetes 集群,同时 Kubernetes 集群最好带有 -[kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl) 命令行工具。 -如果你还没有集群,你可以通过 [Minikube](https://minikube.sigs.k8s.io/docs/tutorials/multi_node/) -构建一个你自己的集群。 - -4、可用的 Helm CLI - -Helm 工具用来部署 Vault Server 和 Agent Injector。 -如果你还没有安装 Helm,请参阅 [Helm 官方地址](https://helm.sh/docs/intro/install/)。 - -## 2. 安装 Vault - -推荐使用 Helm Chart 在 Kubernetes 上部署 Vault Server 和 Agent。 -[Helm](https://helm.sh/docs/helm/) 是一个包管理器, -它可以安装和配置 Vault 及其相关组件,以不同模式运行。 -Helm Chart 实现了模板的条件化和参数化。这些参数可以通过命令行参数设置或在 YAML 中定义。 - -1、添加 HashiCorp Helm 存储库: -```bash -helm repo add hashicorp https://helm.releases.hashicorp.com -``` - -2、更新所有存储库以确保 helm 缓存了最新版本: -```bash -helm repo update -``` - -3、安装最新版本的 Vault Server 和 Agent,并以开发模式运行: -```bash -helm install vault hashicorp/vault --set "server.dev.enabled=true" -``` -`server.dev.enabled=true` 表示 Vault 在单 Pod 上以开发者模式启动。 - -4、检查 Default 命名空间中的所有 Pod: -```bash -kubectl get pod -``` - -输出类似于: -``` -NAME READY STATUS RESTARTS AGE -vault-0 1/1 Running 0 2d1h -vault-agent-injector-58b6d499-k9x9r 1/1 Running 0 2d1h -``` - -`vault-0` 是以 **dev** 模式运行的 Vault 服务器, -`vault-agent-injector-58b6d499-k9x9r` 是 Agent,会根据 Annotation 执行数据注入。 - -:::caution -本例为了简化演示,使用 **dev** 模式启动 Vault 服务器, -此模式下,Vault 会自动初始化并解封(Unseal)。请勿在生产环境中使用。 -::: - -## 3. 配置 Vault - -Vault 将机密数据保存在自己的数据库中,用户需要先配置相关机密数据,并启用 Vault 的 Kubernetes 认证。 - -### 3.1 配置机密数据 {#set-secret-data} - -在[创建带注解的 Pod](#create-pod-with-annotation) 小节,将会把数据库的用户名和密码作为机密数据注入 Pod, -而 Vault 将此机密数据保存。要创建此类数据,需要 Vault 启用 kv 引擎,并将用户名和密码保存在指定的路径中。 - -1、在 `vault-0` 启动交互式 shell 终端: -```bash -kubectl exec -it vault-0 -- /bin/sh -``` - -2、指定路径 `path=internal` 启动 kv 引擎: -```bash -vault secrets enable -path=internal kv-v2 -``` -输出类似于: -```bash -Success! Enabled the kv-v2 secrets engine at: internal/ -``` - -:::tip -有关 kv secrets 引擎的更多信息,请参阅 -[Static Secrets: Key/Value Secret](https://learn.hashicorp.com/tutorials/vault/static-secrets)。 -::: - -3、在 `internal/database/config` 路径创建 secret,包含 `username` 和 `password`: -```bash -vault kv put internal/database/config username="db-readonly-username" password="db-secret-password" -``` -输出类似于: -``` -Key Value ---- ----- -created_time 2022-03-13T08:40:02.1133715Z -deletion_time n/a -destroyed false -version 1 -``` - -4、检查创建结果: -```bash -vault kv get internal/database/config -``` -输出类似于: -``` -======= Metadata ======= -Key Value ---- ----- -created_time 2022-03-13T08:40:02.1133715Z -custom_metadata -deletion_time n/a -destroyed false -version 1 - -====== Data ====== -Key Value ---- ----- -password db-secret-password -username db-readonly-username -``` -到此,机密数据创建完毕,暂且不需要退出 Pod。 - -### 3.2 启用 kubernetes 身份认证 - -Vault 提供了 Kubernetes 身份验证方法,使客户端能够使用 Kubernetes ServiceAccount 令牌进行身份验证。 -此令牌在创建时提供给每个 Pod。 - -1、继续上一小节的 Terminal,启用 Kubernetes 身份验证: -```bash -vault auth enable kubernetes -``` -输出类似于: -``` -Success! Enabled kubernetes auth method at: kubernetes/ -``` - -2、配置 kubernetes 身份认证规则, -依赖 Kubernetes API 地址、ServiceAccount 令牌、证书以及 Kubernetes ServiceAccount 的颁发者(Kubernetes 1.21+ 需要): -```bash -vault write auth/kubernetes/config \ - kubernetes_host="https://$KUBERNETES_PORT_443_TCP_ADDR:443" \ - token_reviewer_jwt="$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" \ - kubernetes_ca_cert=@/var/run/secrets/kubernetes.io/serviceaccount/ca.crt \ - issuer="https://kubernetes.default.svc.cluster.local" -``` - -输出类似于: -``` -Success! Data written to: auth/kubernetes/config -``` -Kubernetes 创建容器时,将 `token_reviewer_jwt` 和 `kubernetes_ca_cert` 挂载到容器中。 -环境变量 `KUBERNETES_PORT_443_TCP_ADDR` 引用的是 Kubernetes 主机的内部网络地址。 - -3、设置读权限的 _policy_ - -后面要部署的服务,需要读取路径 `internal/database/config` 中保存的机密数据,先给该路径添加读权限: -```bash -vault policy write kcl-vault-agent-agent-policy - < deletion_time: destroyed:false version:1] -``` - -可以看到未格式化的数据库用户名和密码,这也是[配置机密数据](#set-secret-data)小节配置的内容。 - -#### 4.2.2 格式化输出 - -没有格式化的数据显然是不合理的,给业务应用在读取配置方面也添加了不必要的障碍。 -数据格式化,Vault 也提供了一些[模板说明](https://www.vaultproject.io/docs/agent/template)。 -在本例子中,只需要打开 `main.k` 中被注释的部分,再次下发配置即可。 - -以下展示的是 `main.k` 中的部分配置代码: -```py -podMetadata = apis.ObjectMeta { - annotations = { - "vault.hashicorp.com/agent-inject" = "true" - "vault.hashicorp.com/role" = "kcl-vault-agent-role" - "vault.hashicorp.com/agent-inject-secret-database-config.txt" = "internal/data/database/config" - "vault.hashicorp.com/agent-inject-status" = "update" - "vault.hashicorp.com/agent-inject-template-database-config.txt" = """\ -{{- with secret "internal/data/database/config" -}} -postgresql://{{ .Data.data.username }}:{{ .Data.data.password }}@postgres:5432/wizard -{{- end -}}""" -``` - -重新下发配置: -```bash -kusion apply --yes=true -``` - -待 Deployment 滚动更新完成后,检查机密数据注入结果: -```bash -kubectl exec -n kcl-vault-agent \ - $(kubectl get pod -n kcl-vault-agent -l app=kcl-vault-agent-test -o jsonpath="{.items[0].metadata.name}") \ - --container kcl-vault-agent-test -- cat /vault/secrets/database-config.txt -``` - -输出类似于: -``` -postgresql://db-readonly-username:db-secret-password@postgres:5432/wizard -``` -可以看到,不仅成功注入了机密数据,而且按照 Pod 模板中的 Annotation 字段指定的格式渲染结果。 - -到此我们就完成了 KCL/Kusion 集成 Vault Agent Injector 实现了敏感信息的传输。 \ No newline at end of file diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/guides/sensitive-data-solution/vault-csi-provider.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/guides/sensitive-data-solution/vault-csi-provider.md deleted file mode 100644 index 5f60b11d..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/guides/sensitive-data-solution/vault-csi-provider.md +++ /dev/null @@ -1,289 +0,0 @@ -# Vault CSI Provider - -本指南将向你展示,KCL/Kusion 通过集成 Vault CSI Provider,解决敏感信息的传输问题。 -本次演示是将数据库的用户名和密码传输到 Pod 中,涉及 3 个 Kubernetes 内置资源和 1 个 自定义资源: - -- 命名空间(Namespace) -- 无状态应用(Deployment) -- 服务账号(ServiceAccount) -- 自定义资源(SecretProviderClass) - -:::tip -本指南要求你对 Kubernetes 有基本的了解。不清楚相关概念的,可以前往 Kubernetes 官方网站,查看相关说明: -- [Learn Kubernetes Basics](https://kubernetes.io/docs/tutorials/kubernetes-basics/) -- [Namespace](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/) -- [Deployment](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/) -- [ServiceAccount](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) -- [SecretProviderClass](https://secrets-store-csi-driver.sigs.k8s.io/concepts.html#custom-resource-definitions-crds) -::: - -## 1. 准备开始 - -在开始之前,我们需要做以下准备工作: - -1、安装 Kusion 工具链 - -我们推荐使用 kusion 的官方安装工具 `kusionup`,可实现 kusion 多版本管理等关键能力。 -详情信息请参阅[下载和安装](/docs/user_docs/getting-started/install)。 - -2、下载开源 Konfig 大库 - -在本篇指南中,需要用到部分已经抽象实现的 KCL 模型。 -有关 KCL 语言的介绍,可以参考 [Tour of KCL](/reference/lang/lang/tour.md)。 - -仓库地址: https://github.com/KusionStack/konfig.git - -3、可用的 Kubernetes 集群 - -必须要有一个 Kubernetes 集群,同时 Kubernetes 集群最好带有 -[kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl) 命令行工具。 -如果你还没有集群,你可以通过 [Minikube](https://minikube.sigs.k8s.io/docs/tutorials/multi_node/) -构建一个你自己的集群。 - -4、可用的 Helm CLI - -Helm 工具用来部署 Vault Server 和 CSI Driver。 -如果你还没有安装 Helm,请参阅 [Helm 官方地址](https://helm.sh/docs/intro/install/)。 - -## 2. 安装 Vault 和 CSI Driver - -推荐使用 Helm Chart 在 Kubernetes 上部署 Vault Server 和 CSI Driver -[Helm](https://helm.sh/docs/helm/) 是一个包管理器, -它可以安装和配置 Vault 及其相关组件,以不同模式运行。 -Helm Chart 实现了模板的条件化和参数化。这些参数可以通过命令行参数设置或在 YAML 中定义。 - -### 2.1 安装 Vault - -1、添加 HashiCorp Helm 存储库: -```bash -helm repo add hashicorp https://helm.releases.hashicorp.com -``` - -2、更新所有存储库以确保 helm 缓存了最新版本: -```bash -helm repo update -``` - -3、安装最新版本的 Vault Server,以开发模式运行,禁用 Injector 服务并启用 CSI: -```bash -helm install vault hashicorp/vault \ - --set "server.dev.enabled=true" \ - --set "injector.enabled=false" \ - --set "csi.enabled=true" -``` -`server.dev.enabled=true` 表示 Vault 在单 Pod 上以开发者模式启动; -`injector.enabled=false` 表示禁用 Injector 服务; -`csi.enabled=true` 表示启用 Vault CSI Pod。 -如果你已经安装了 Vault,可以使用 `helm upgrade` 命令来更新 Vault 的部署模式。 - -4、检查 Default 命名空间中的所有 Pod: -```bash -kubectl get pod -NAME READY STATUS RESTARTS AGE -vault-0 1/1 Running 0 17m -vault-csi-provider-456hl 1/1 Running 0 17m -``` -等到 `vault-0` 的状态是 `Running` 并且准备就绪(`1/1`),再继续本指南。 - -### 2.2 安装 CSI Driver - -[Secrets Store CSI 驱动程序](https://secrets-store-csi-driver.sigs.k8s.io/introduction.html) -`secrets-store.csi.k8s.io` 允许 Kubernetes 将存储在外部机密存储中的多个机密、密钥和证书作为卷挂载到其 Pod 中。 -附加卷后,其中的数据将被挂载到容器的文件系统中。 - -:::tip -[容器存储接口(CSI)](https://github.com/container-storage-interface/spec/blob/master/spec.md) -是一种标准,用于将任意块和文件存储系统暴露给 Kubernetes 等容器编排系统 (CO) 上的容器化工作负载。 -使用 CSI 第三方存储提供商可以编写和部署插件,在 Kubernetes 中公开新的存储系统,而无需接触核心 Kubernetes 代码。 -::: - -1、添加 CSI 驱动的 Helm 存储库: -```bash -helm repo add secrets-store-csi-driver https://kubernetes-sigs.github.io/secrets-store-csi-driver/charts -``` - -2、安装最新版本的 Kubernetes-Secrets-Store-CSI-Driver: -```bash -helm install csi secrets-store-csi-driver/secrets-store-csi-driver --namespace kube-system -``` -`csi-secrets-store-csi-driver` 是以 DemonSet 形式部署在 `kube-system` 命名空间。 - -3、检查 CSI Driver 的 Pod 是否启动: -```bash -kubectl --namespace=kube-system get pods -l "app=secrets-store-csi-driver" -NAME READY STATUS RESTARTS AGE -csi-secrets-store-csi-driver-2wl2f 3/3 Running 0 2m -``` -等待 `csi-secrets-store-csi-driver-2wl2f` 的状态是 `Running`,并且已经准备就绪(`3/3`),再继续本指南。 - -## 3. 配置 Vault - -Vault 将机密数据保存在自己的数据库中,用户需要先配置相关机密数据,并启用 Vault 的 Kubernetes 认证。 - -### 3.1 配置机密数据 {#set-secret-data} - -在[创建挂载 Vault Secret 的 Pod](#create-pod-with-secret-mounted)小节,挂载到 Pod 中的卷, -引用了保存在 `secret/data/db-pass` 路径下的 secret 。 -Vault 以开发模式运行时,kv 引擎会启用默认路径 `/secret`。 - -1、在 `vault-0` 启动交互式 shell 终端: -```bash -kubectl exec -it vault-0 -- /bin/sh -``` - -2、在 `secret/db-pass` 路径创建带有密码的 secret: -```bash -vault kv put secret/db-pass password="db-secret-password" -``` - -输出类似于: -``` -Key Value ---- ----- -created_time 2022-03-17T07:45:06.3767973Z -custom_metadata -deletion_time n/a -destroyed false -version 1 -``` - -3、验证 secret 在路径 `/secret/db-pass` 上是否可读: -```bash -vault kv get secret/db-pass -``` - -输出类似于: -``` -======= Metadata ======= -Key Value ---- ----- -created_time 2022-03-17T07:45:06.3767973Z -custom_metadata -deletion_time n/a -destroyed false -version 1 - -====== Data ====== -Key Value ---- ----- -password db-secret-password -``` -到此,机密数据创建完毕,暂且不需要退出 Pod。 - -### 3.2 启用 kubernetes 身份认证 - -Vault 提供了 Kubernetes 身份验证方法,使客户端能够使用 Kubernetes ServiceAccount 令牌进行身份验证。 -此令牌在创建时提供给每个 Pod。 - -1、继续上一小节的 Terminal,启用 Kubernetes 身份验证: -```bash -vault auth enable kubernetes -``` -输出类似于: -``` -Success! Enabled kubernetes auth method at: kubernetes/ -``` - -2、配置 kubernetes 身份认证规则,依赖 Kubernetes API 地址、ServiceAccount 令牌、 -证书以及 Kubernetes ServiceAccount 的颁发者(Kubernetes 1.21+ 需要): -```bash -vault write auth/kubernetes/config \ - kubernetes_host="https://$KUBERNETES_PORT_443_TCP_ADDR:443" \ - token_reviewer_jwt="$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" \ - kubernetes_ca_cert=@/var/run/secrets/kubernetes.io/serviceaccount/ca.crt \ - issuer="https://kubernetes.default.svc.cluster.local" -``` - -输出类似于: -``` -Success! Data written to: auth/kubernetes/config -``` -Kubernetes 创建容器时,将 `token_reviewer_jwt` 和 `kubernetes_ca_cert` 挂载到容器中。 -环境变量 `KUBERNETES_PORT_443_TCP_ADDR` 引用的是 Kubernetes 主机的内部网络地址。 - -3、设置读权限的 _policy_ - -Kubernetes-Secrets-Store-CSI-Driver 需要读取密钥,保证它对挂载的卷和卷中密钥有读权限。 - -创建名为 `kcl-vault-csi-policy` 的 _policy_: -```bash -vault policy write kcl-vault-csi-policy - < 不清楚相关概念的,可以前往 Kubernetes 官方网站,查看相关说明: - -- [Learn Kubernetes Basics](https://kubernetes.io/docs/tutorials/kubernetes-basics/) -- [Namespace](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/) -- [Deployment](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/) -- [Service](https://kubernetes.io/docs/concepts/services-networking/service/) - -## 1. 准备工作 - -在开始之前,我们需要做以下准备工作: - -1、安装 Kusion 工具链 - -我们推荐使用 kusion 的官方安装工具 `kusionup`,可实现 kusion 多版本管理等关键能力。详情信息请参阅[下载和安装](/docs/user_docs/getting-started/install)。 - -2、下载开源 Konfig 大库 - -在本篇指南中,需要用到部分已经抽象实现的 KCL 模型,有关 KCL 语言的介绍,可以参考 [Tour of KCL](/reference/lang/lang/tour.md)。 - -仓库地址: [https://github.com/KusionStack/konfig.git](https://github.com/KusionStack/konfig.git) - -3、可用的 Kubernetes 集群 - -必须要有一个 Kubernetes 集群,同时 Kubernetes 集群最好带有 [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl) 命令行工具。 -如果你还没有集群,你可以通过 [Minikube](https://minikube.sigs.k8s.io/docs/tutorials/multi_node/) 构建一个你自己的集群。 - -## 2. 初始化 - -本指南是以 KCL 和 Kusion 的方式部署应用服务,依赖 kusion 工具、Konfig 大库和 Kubernetes 集群。 - -打开 Konfig 大库项目,进入 `appops` 目录,初始化 KCL 项目: - -```bash -cd appops && kusion init -``` - -`kusion init` 命令会提示你输入可能需要的参数,例如项目名称、项目描述,镜像地址等;也可以一路点击 *回车* 使用默认值。输出类似于: - -``` -✔ deployment-single-stack A minimal kusion project of single stack -This command will walk you through creating a new kusion project. - -Enter a value or leave blank to accept the (default), and press . -Press ^C at any time to quit. - -✔ project name: deployment-single-stack -✔ project description: A minimal kusion project of single stack -✔ Stack: dev -✔ ClusterName: kubernetes-dev -✔ Image: gcr.io/google-samples/gb-frontend:v4 -Created project 'deployment-single-stack' -``` - -到此,我们就成功初始化一个 KCL 项目:deployment-single-stack,该代码包含一个 Project 和一个 Stack。 -其中,`project name` 和 `project description` 是每个模板都需要设置的属性,目的是为了模板共享。 -剩余三个字段,是模板中需要用户填入的三个属性,`Stack` 表示配置栈的名称,可以理解为配置的隔离标识; -`ClusterName` 是指集群名称,在本例中暂未使用;`Image` 表示应用的业务容器的镜像地址。 - -> 有关 Project 和 Stack 的设计说明,请参阅 [Project&Stack](/user_docs/concepts/konfig.md)。 - -该项目的目录结构如下: - -``` -deployment-single-stack -├── README.md -├── base -│ └── base.k -├── dev -│ ├── ci-test -│ │ └── settings.yaml -│ ├── kcl.yaml -│ ├── main.k -│ └── stack.yaml -├── kusion.yaml -└── project.yaml - -3 directories, 8 files -``` - -可以看到,目录共分成三层,每层目录都有各自的设计意义。 -根目录下 `project.yaml` 表示项目级别的属性;`kusion.yaml` 是模板的配置文件,与本指南的操作内容无关。 -`base` 目录存放的是公共配置;`dev` 目录存放的是定制化配置,`kcl.yaml` 是静态编译配置,指定了编译文件, -`main.k` 是定制化配置的具体代码,`stack.yaml` 存放的是是配置栈的描述信息; -`dev/ci-test` 目录存放的是动态编译配置和最终输出,默认情况下,编译输出到该目录下的 `stdout.golden.yaml` 文件。 -整体来说,`.k` 文件是 KCL 源码,`.yaml` 是配置文件。 - -## 3. 配置编译 - -到此,已经借助 kusion 提供的内置模板,完成了项目的开发。 -项目的编程语言是 KCL,不是 Kubernetes 认识的 JSON/YAML,因此还需要编译得到最终输出。 - -首先进入到项目的 Stack 目录(`deployment-single-stack/dev`)并执行编译: - -```bash -cd deployment-single-stack/dev && kusion compile -``` - -输出默认保存在 `deployment-single-stack/dev/ci-test/stdout.golden.yaml` 文件中。 - -> 有关 kusion 命令行工具的说明,执行 `kusion -h`,或者参考工具的在线文档 [Overview of Kusion CLI](/docs/reference/cli/kusionctl/overview)。 - -## 4. 配置生效 - -完成编译,现在开始下发配置。通过查看 `stdout.golden.yaml` 文件,可以看到 3 个资源: - -- 一个 name 为 deployment-single-stackdev 的 Deployment -- 一个 name 为 deployment-single-stack 的 Namespace -- 一个 name 为 frontend-service 的 Service - -该文件的内容已经是 Kubernetes 能够识别的配置,可以使用 `kubectl apply -f stdout.golden.yaml` 直接下发配置, -也可以使用 `kusion apply` 完成配置编译并下发(该命令包含了配置编译)。 - -> 推荐使用 kusion 工具,本例中的编译输出是完整的 YAML 声明,但不是所有的 KCL 项目编译结果都是如此。 - -执行命令: - -```bash -kusion apply -``` - -输出类似于: - -``` -SUCCESS Compiling in stack dev... - -Stack: dev Provider Type Name Plan - * ├─ kubernetes v1:Namespace deployment-single-stack[0] Create - * ├─ kubernetes apps/v1:Deployment deployment-single-stackdev[0] Create - * └─ kubernetes v1:Service frontend-service[0] Create - -✔ yes -Start applying diffs...... - SUCCESS Creating Namespace/deployment-single-stack - SUCCESS Creating Deployment/deployment-single-stackdev - SUCCESS Creating Service/frontend-service -Creating Service/frontend-service [3/3] ███████████████████████████████████████████ 100% | 0s - -Apply complete! Resources: 3 created, 0 updated, 0 deleted. -``` - -以上就完成了配置生效,可以使用 `kubectl` 工具检查资源的实际状态。 - -1、 检查 Namespace - -```bash -kubectl get ns -``` - -输出类似于: - -``` -NAME STATUS AGE -argocd Active 59d -default Active 72d -deployment-single-stack Active 10m -``` - -2、检查 Deployment - -```bash -kubectl get deploy -n deployment-single-stack -``` - -输出类似于: - -``` -NAME READY UP-TO-DATE AVAILABLE AGE -deployment-single-stackdev 1/1 1 1 11m -``` - -3、检查 Service - -```bash -kubectl get svc -n deployment-single-stack -``` - -输出类似于: - -``` -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -frontend-service NodePort 10.0.0.0 80:10001/TCP 11m -``` - -4、检查应用 - -使用 `kubecl` 工具,将本机端口 `30000` 映射到 Service 端口 `80` - -```bash -kubectl port-forward svc/frontend-service -n deployment-single-stack-xx 30000:80 -``` - -打开浏览器访问 [http://127.0.0.1:30000](http://127.0.0.1:30000): -![](/img/docs/user_docs/guides/working-with-k8s/app-preview.jpg) diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/guides/working-with-k8s/2-container.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/guides/working-with-k8s/2-container.md deleted file mode 100644 index e10e0f1f..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/guides/working-with-k8s/2-container.md +++ /dev/null @@ -1,30 +0,0 @@ -# 配置业务容器 - -Server 模型中的 mainContainer 属性用于声明应用的业务容器配置,有关业务容器的抽象定义,可以查看 KCL Model 中 [base.pkg.kusion_models.kube.frontend.container](/docs/reference/model/kusion_models/kube/frontend/container/doc_container) 模块的文档。 - -## 1. 准备工作 - -可参考:[部署应用服务/准备工作](./1-deploy-server.md#1-%E5%87%86%E5%A4%87%E5%B7%A5%E4%BD%9C) - -## 2. 业务容器配置样例 - -```py -appConfiguration: frontend.Server { - # 业务容器配置 - mainContainer = container.Main { - # 业务容器名称 - name = "main" - # 环境变量 - env = [ - { - name = "HOST_NAME" - value = "example.com" - } - ] - # 端口号配置 - ports = [ - { containerPort = 80 } - ] - } -} -``` diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/guides/working-with-k8s/3-monitoring.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/guides/working-with-k8s/3-monitoring.md deleted file mode 100644 index 4aa55c15..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/guides/working-with-k8s/3-monitoring.md +++ /dev/null @@ -1,77 +0,0 @@ -# 配置监控 - -本篇指南向你展示,如何使用 KCL 语言与其相对应的 CLI 工具 Kusion,完成一个 Kubernetes 应用 Prometheus 监控部署。本次演示的样例主要由以下组件构成: - -- 命名空间(Namespace) -- 无状态应用(Deployment) -- Pod 监控(PodMonitor) - -> 本指南要求你对 Kubernetes 和 Prometheus 有基本的了解。不清楚相关概念的,可以前往 Kubernetes 和 Prometheus 官方网站,查看相关说明: - -- [Learn Kubernetes Basics](https://kubernetes.io/docs/tutorials/kubernetes-basics/) -- [Prometheus Introduction](https://prometheus.io/docs/introduction/overview/) - -## 1. 准备开始 - -在开始之前,除了参考 [部署应用服务/准备工作](./1-deploy-server.md#1-%E5%87%86%E5%A4%87%E5%B7%A5%E4%BD%9C) 的准备工作,还需要完成如下准备: - -- 在 Kubernetes 集中部署 Prometheus Operator - -根据 [kube-prometheus](https://github.com/prometheus-operator/kube-prometheus) 中的提示步骤在您的集群当中部署 Prometheus Operator - -## 2. 监控配置样例 - -通过将 `enableMonitoring` 设置为 `True` 使能配置,并添加业务容器端口号配置 `8080` - -```py -import base.pkg.kusion_models.kube.frontend -import base.pkg.kusion_models.kube.frontend.container -import base.pkg.kusion_models.kube.frontend.container.env as e -import base.pkg.kusion_models.kube.frontend.container.port as cp -import base.pkg.kusion_models.kube.frontend.container.probe as p - -# The application configuration in stack will overwrite -# the configuration with the same attribute in base. -appConfiguration: frontend.Server { - # Main container configuration - mainContainer: container.Main { - name = "prometheus-example-app" - ports = [ - cp.ContainerPort { - name = "web" - containerPort = 8080 - } - ] - } - enableMonitoring = True -} -``` - -## 3. 配置生效 - -执行命令: - -```bash -kusion apply -``` - -输出类似于: - -``` - SUCCESS Compiling in stack prod... - -Stack: prod Provider Type Name Plan - * ├─ kubernetes v1:Namespace prometheus-example-app[0] Create - * ├─ kubernetes monitoring.coreos.com/v1:PodMonitor prometheus-example-appprod[0] Create - * └─ kubernetes apps/v1:Deployment prometheus-example-appprod[0] Create -``` - -## 4. 查看监控面板 - -可以看到,除了部署 kubernetes `Deployment` 和 `Namespace` 资源外,还额外部署了 `PodMonitor` 资源用于配置 Prometheus 监听应用 Pod,当资源都创建完成时,可以通过如下命令查看 Prometheus 监控面板。 - -``` -kubectl --namespace monitoring port-forward svc/prometheus-k8s 9090 -``` - -最后通过 http://localhost:9090 访问监控面板并查看应用程序的监控指标。 diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/guides/working-with-k8s/4-network.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/guides/working-with-k8s/4-network.md deleted file mode 100644 index 316c90b0..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/guides/working-with-k8s/4-network.md +++ /dev/null @@ -1,51 +0,0 @@ -# 配置网络 - -Server 模型中的 services 属性用于声明应用的网络配置,有关网络的抽象定义,可以查看 KCL Model 中 [base.pkg.kusion_models.kube.frontend.service](/docs/reference/model/kusion_models/kube/frontend/service/doc_service) 模块的文档。 - -## 1. 准备工作 - -可参考:[部署应用服务/准备工作](./1-deploy-server.md#1-%E5%87%86%E5%A4%87%E5%B7%A5%E4%BD%9C) - -## 2. 网络配置样例 - -在样例代码的 dev/main.k 或者 base/base.k 中添加 Service 配置: - -```py -import base.pkg.kusion_models.kube.frontend -import base.pkg.kusion_models.kube.frontend.service - -appConfiguration: frontend.Server { - # 添加 Service 配置 - services = [ - service.Service { - name = "app" - type = "NodePort" - ports = [ - { - "port" = 80 - } - ] - } - ] -} -``` - -上述代码是样例配置,可以根据 [Service](/docs/reference/model/kusion_models/kube/frontend/service/doc_service) 模型定义和实际情况添加自定义配置。 - -## 3. 配置生效 - -再次执行【[配置生效](./1-deploy-server.md#4-%E9%85%8D%E7%BD%AE%E7%94%9F%E6%95%88)】的步骤即可部署新的 Service 配置: - -``` -$ kusion apply -SUCCESS Compiling in stack dev... - -Stack: dev Provider Type Name Plan - * ├─ kubernetes v1:Namespace demo UnChange - * ├─ kubernetes v1:Service demo-service Update - * └─ kubernetes apps/v1:Deployment demodev UnChange - -✔ yes -SUCCESS Updating v1:Service -Updating v1:Service [1/1] ████████████████████████████████ 100% | 0s -``` diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/guides/working-with-k8s/5-image-upgrade.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/guides/working-with-k8s/5-image-upgrade.md deleted file mode 100644 index a1e834d9..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/guides/working-with-k8s/5-image-upgrade.md +++ /dev/null @@ -1,40 +0,0 @@ -# 镜像升级 - -Server 模型中的 image 属性用于声明应用的业务容器镜像,有关镜像的定义,可以查看 KCL Model 中 [base.pkg.kusion_models.kube.frontend.server](/docs/reference/model/kusion_models/kube/frontend/doc_server) 模块的文档。 - -## 1. 准备工作 - -可参考:[部署应用服务/准备工作](./1-deploy-server.md#1-%E5%87%86%E5%A4%87%E5%B7%A5%E4%BD%9C) - -## 2. 镜像升级 - -编辑 dev/main.k 中的 image 的值: - -```py -import base.pkg.kusion_models.kube.frontend - -appConfiguration: frontend.Server { - # 修改 image 的值为要升级的版本 - # 修改前:image = "gcr.io/google-samples/gb-frontend:v4" - # 修改后: - image = "gcr.io/google-samples/gb-frontend:v5" -} -``` - -## 3. 配置生效 - -再次执行【[配置生效](./1-deploy-server.md#4-%E9%85%8D%E7%BD%AE%E7%94%9F%E6%95%88)】的步骤即可升级应用的镜像: - -``` -$ kusion apply -SUCCESS Compiling in stack dev... - -Stack: dev Provider Type Name Plan - * ├─ kubernetes v1:Namespace demo UnChange - * ├─ kubernetes v1:Service demo-service UnChange - * └─ kubernetes apps/v1:Deployment demodev Update - -✔ yes -SUCCESS Updating apps/v1:Deployment -Updating apps/v1:Deployment [1/1] ████████████████████████████████ 100% | 0s -``` diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/guides/working-with-k8s/6-resource.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/guides/working-with-k8s/6-resource.md deleted file mode 100644 index 4c851a57..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/guides/working-with-k8s/6-resource.md +++ /dev/null @@ -1,78 +0,0 @@ -# 应用扩缩容 - -Server 模型中的 schedulingStrategy.resource 属性用于声明应用的业务容器的资源规格,有关资源规格的定义,可以查看 KCL Model 中 [base.pkg.kusion_models.kube.frontend.resource](/docs/reference/model/kusion_models/kube/frontend/resource/doc_resource) 模块的文档。 - -## 1. 准备工作 - -可参考:[部署应用服务/准备工作](./1-deploy-server.md#1-%E5%87%86%E5%A4%87%E5%B7%A5%E4%BD%9C) - -## 2. 扩缩容配置样例 - -通过编辑 schedulingStrategy.resource 的值来设置业务容器的资源规格。 - -有两个方法修改资源规格,一种是修改 resource 表达式中 cpu、memory 的值: - -```py -import base.pkg.kusion_models.kube.frontend -import base.pkg.kusion_models.kube.frontend.resource as res - -appConfiguration: frontend.Server { - # 修改 resource 表达式中 cpu、memory 的值 - # 原值:schedulingStrategy.resource = "cpu=100m,memory=100Mi,disk=1Gi" - # 新的值(应用扩容): - schedulingStrategy.resource = res.Resource { - cpu = 500m - memory = 500Mi - disk = 1Gi - } -} -``` - -另一种是使用预置的 resource 值替代原值来进行应用扩容: - -```py -import base.pkg.kusion_models.kube.frontend -import base.pkg.kusion_models.kube.templates.resource as res_tpl - -appConfiguration: frontend.Server { - # 使用预置的 resource 值替代原值来进行应用扩容: - # 原值:schedulingStrategy.resource = "cpu=100m,memory=100Mi,disk=1Gi" - # 新的值(应用扩容): - schedulingStrategy.resource = res_tpl.large -} -``` - -上述代码是样例配置,可以根据 SchedulingStrategy 模型定义和实际情况添加自定义配置: - -```py -import base.pkg.kusion_models.kube.frontend.resource as res - -schema SchedulingStrategy: - """ SchedulingStrategy represents scheduling strategy. - - Attributes - ---------- - resource: str | res.Resource, default is "1 有关 KCL 语义相关的详细说明,请参阅[表达式](/reference/lang/lang/spec/expressions.md)。 - -## 1. 准备工作 - -可参考:[部署应用服务/准备工作](./1-deploy-server.md#1-%E5%87%86%E5%A4%87%E5%B7%A5%E4%BD%9C) - -## 2. 差异化配置样例 - -`base/bask.k` 中 Pod Label 的配置: - -```py -appConfiguration: frontend.Server { - podMetadata.labels = { - if __META_CLUSTER_NAME in ["minikube", "kind"]: - cluster = __META_CLUSTER_NAME - else: - cluster = "other" - } -} -``` - -通过以上 KCL 代码,我们根据配置大库(Konfig)中的魔术变量判断实际部署时的集群名称来选择性的为应用容器中注入标签,来做到被第三方服务识别或者其他目的。 diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/guides/working-with-k8s/_category_.json b/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/guides/working-with-k8s/_category_.json deleted file mode 100644 index 845c71a9..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/guides/working-with-k8s/_category_.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "label": "Kubernetes", - "position": 2 -} diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/guides/working-with-k8s/index.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/guides/working-with-k8s/index.md deleted file mode 100644 index 8d6340de..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/guides/working-with-k8s/index.md +++ /dev/null @@ -1 +0,0 @@ -# Kubernetes diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/intro/_category_.json b/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/intro/_category_.json deleted file mode 100644 index 41ba677b..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/intro/_category_.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "label": "简介", - "position": 0 -} diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/intro/intro.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/intro/intro.md deleted file mode 100644 index 7cd07308..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/intro/intro.md +++ /dev/null @@ -1,3 +0,0 @@ -# 简介 - -介绍了 KusionStack 是什么、为何要开发 KusionStack,以及 KusionStack 的使用场景。 diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/intro/kusion-intro.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/intro/kusion-intro.md deleted file mode 100644 index 14e84cac..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/intro/kusion-intro.md +++ /dev/null @@ -1,151 +0,0 @@ -# KusionStack 简介 - - -## KusionStack 是什么? - -**KusionStack 是开源的云原生可编程技术栈!** - -KusionStack 是一个可编程、高灵活性的应用交付及运维技术栈,灵感源于融合(Fusion)一词,旨在帮助企业构建的应用运维配置管理平面及 DevOps 生态。 - -1. 融合**专有云**,**混合云**,**多云**混合场景 -2. 融合以**云原生技术**为主,同时采用**多种平台技术**的混合平台技术选型 -3. 融合**多项目**、**多团队**、**多角色**、**多租户**、**多环境**的企业级诉求 - -基于 Platform as Code (平台服务即代码)理念,研发者可以快速收敛围绕**应用运维生命周期**的全量配置定义,面向**混合技术体系及云环境**,完成从应用运维研发到上线的**端到端工作流程**,真正做到**一处编写,随处交付**。 - -![](/img/docs/user_docs/intro/kusion-stack-0.png) - -KusionStack 源于蚂蚁集团内部的规模化工程化运维实践,已广泛应用在蚂蚁多云应用交付运维,计算及数据基础设施交付,建站运维,数据库运维等多个业务领域,助力蚂蚁完成从传统运维管理模式到可编程 DevOps 运维模式的转型。 - - -## 为什么开发 KusionStack? - -以 Kubernetes 为代表的云原生技术正成为现代企业及云厂商的一方技术,并快速形成面向全球的社区生态。Kubernetes API 支持面向终态、数据化(as Data)的定义方式,声明式、版本化、面向资源的可扩展 API,可终止、可预测的服务端模拟验证机制,这些优秀的设计使其天然具有一致的接入方式,完善的资源定义和扩展方式,以及客户端友好的验证、执行机制,原生具备了传统三方 API 接入层技术产品的典型能力。但从另一个角度看,现代应用也将同时长期依赖未云原生化的 PaaS、IaaS 层服务,这使得企业内应用交付运维依赖分散割裂的技术和平台,复杂混乱的工作界面和流程。此外,面向大量的非专家型应用研发者,Kubernetes 自身的技术复杂性和大量面向资源的使用界面制约了在企业内部以开放、民主方式建设更为广泛的 DevOps 生态。最后,平台侧缺少有效手段让大量平台能力简单、可控地开放给应用研发者,通过灵活的高抽象度降低研发者参与运维工作的认知负担,使其自助完成业务交付运维的研发及操作。 - -在蚂蚁我们经历了以上这些问题,我们构建了基于[裸金属](https://en.wikipedia.org/wiki/Bare-metal_server)的超大规模 Kubernetes 集群,基于 [MOSN](https://github.com/mosn/mosn) sidecar 的规模化 mesh 体系,同时也大量依赖中间件等 PaaS 服务及部分 IaaS 云服务。在运维侧,我们做了很多尝试来适应复杂性和快速变化,如加入云原生元素改良经典运维平台,如采用 Kubernetes 社区原生的 YAML-based DevOps 运维方案,但都无法满足内部以应用为中心的高效率、低感知,并兼顾开放性、灵活性、可扩展性、可移植性的规模化交付运维需求。 - - -![](/img/docs/user_docs/intro/kusion-stack-1.png) - - -## 为什么使用 KusionStack? - -KusionStack 通过 engineering(工程化)的理念和技术栈融合平台方,研发者和 [SRE](https://en.wikipedia.org/wiki/Site_reliability_engineering),让平台方更简单灵活的开放平台能力,让应用研发者以应用为中心工作,同时降低研发者在参与运维过程中对基础设施的认知负担,同时又赋予研发者充分的灵活性。总的来说,KusionStack 致力于构建**以应用为中心的抽象界面**,**一致的管理工具及自动化支持**和**更简单的使用体验和工作流程**,并希望趋近这样的理性状态: - -+ 融合**平台技术**:面向大量云原生及经典平台技术及服务,在差异化的 Platform API、IaaS API 层之上形成应用运维生命周期配置定义,并结合镜像机制,使得应用可以一处编写,随处交付 -+ 融合**多种角色**:助力企业内的各平台团队、应用研发团队打破边界,各自向前,通过“平台方生成配置组件,应用方定义应用配置”的工程化方式分工协同,共同形成围绕应用价值交付的 Devops 体系 -+ 融合**工作流程**:面向多样化的运维场景,通过可扩展的工程结构和一致的研发技术栈提供持续的业务研发扩展性,结合 GitOps、可配置 CI、CD、CDRA 技术形成端到端的工作流程 -+ 融合**运维方案**:以工具链、自定义流水线、服务层、GUI 产品形成梯度运维方案,按需供给,灵活取用,兼顾内部专有云及外部混合云、多云场景需求,以弹性运维方案逐渐汰换割裂的 "烟囱" 式产品 -+ 融合**技术理念**:通过开放的运维理念、文化及开源技术在面对多样化的技术和角色创造更多可能性 - - -## KusionStack 技术栈的组成 - -KusionStack 通过 engineering(工程化)的理念,技术和工作流程融合平台方和应用方,以达到平台能力开放和自助运维效率的平衡。KusionStack 以专用语言和工具链为原点,构建了可编程、可扩展、可移植的运维技术栈,其核心组件包括: - -+ [KCL](https://github.com/KusionStack/KCLVM):面向应用研发者的配置策略专用高级编程语言,及其协议组,工具链及 IDE 插件 -+ [Kusion](https://github.com/KusionStack/kusion):运维引擎、工具链、服务层,IDE 工作空间及社区技术集成套件 -+ [Konfig](https://github.com/KusionStack/konfig):应用配置及基础模型共享仓库,及面向 GitOps 工作流程(如 GitHub Actions)的自定义 CI 套件 - -也可以从语言协议层、运维能力层、用户界面层的视角进行划分,如下图所示: - -![](/img/docs/user_docs/intro/kusion-arch.png) - - -## KusionStack 核心特征 - -**灵活组织,按需建模** - -KusionStack 采用纯客户端的工作方式,通过目录、文件的简单工程方式,基于 **project**、**stack** 设计的可扩展工程结构,支持按需组织的灵活管理方式。研发者可以通过记录及函数语言 KCL 编写**配置(config)**、**类型(schema)**、**函数(lambda)**及**规则(rule)**,通过分块配置编写及丰富的合并策略满足企业内多租户、多环境的配置编写需求,灵活应对复杂场景和快速变化。KusionStack 同时提供了开箱即用的云原生应用配置模型,支持用户快速开始云原生交付运维之旅。此外,KusionStack 不局限于的单一、固定模型或标准,研发者可以通过 KCL 模块化的语言特性构建模型组件和应用配置模型,通过 “搭积木” 的方式帮助平台方快速透出平台能力,应用方按需定义应用配置模型。 - -**一处编写,随处交付** - -KusionStack 帮助应用研发者集中收敛围绕应用的全量配置定义,并通过 "容器镜像 + KCL 代码" 的方式将应用交付到专有云、混合云、多云环境。研发者面向应用维度的抽象配置工作,并通过面向不同运行时不同平台的 renders 解释并生成面向平台的低维度资源配置。此外,通过 Kusion 运维引擎管理含 Kubernetes 在内的多种运行时的 hybrid-resource,原生支持多集群的 Kubernetes 资源管理,并通过集成 Terraform 管理 non-Kubernetes 资源。最后,KusionStack 原生支持命令行执行、GitOps 工作流、服务(待开源)调用、GUI 产品(待开源)等自动化机制,通过灵活的自动化方案满足任意交付需求。 - -**企业实践,生态集成** - -依托蚂蚁内部的实践积累,KusionStack 提供了面向 Platform API 从研发态到交付运维态的端到端工作流程: - -1. 平台集成:通过 KCL-OpenAPI 工具自助生成 KCL schema 代码 -2. 研发辅助:通过 KCL IDE 插件,lint,vet,compile,test 工具快速研发、测试 -3. CI 流程:通过 KCL dep,test 工具实现精确依赖分析及自动化集成测试验证 -4. CD 执行:通过 KusionCtl 工具实现身份验证、RBAC 权限配置,变更预览、生效、观察、销毁的执行流程 - -此外,KCL 提供了 CRUD API,多语言 SDK 及 plugin 动态扩展机制,以满足企业内个性化自动化需求。KusionStack 将持续提升运维工具及引擎扩展性,并与更多的社区技术集成。 - - -## KusionStack vs. X - -KusionStack 是一个纯客户端的云原生亲和的可编程运维技术栈,旨在定义以应用为中心的抽象界面及管理机制。相比其他技术,其特点可以总结为: - -+ **以应用为中心**: 满足差异化应用配置抽象、定义需求 -+ **纯客户端方案**: 轻量级、高可扩展性、灵活性、可移植性,前置于运行时左移的稳定性保证 -+ **混合资源及云环境**: 满足多云多运行时的资源抽象与管理功能 -+ **高度可自动化**: 提供丰富的 API、SDK、扩展机制满足面向研发者和平台的自动化需求 - -![](/img/docs/user_docs/intro/kusion-vs-x.png) - -其中 KCL 是一种现代高级编程语言,相比于面向运维人员的声明式语言,KCL 面向有编程能力的研发人员。KCL 是编译型静态强类型语言,通过记录及函数语言设计为研发者提供配置(config)、建模抽象(schema)、函数逻辑(lambda)、环境规则(rule)为核心元素的编写能力。 - -KCL 提供运行时无关的研发能力,不原生提供线程、IO 等系统功能,支持面向云原生运维场景的语言功能,为解决领域问题提供稳定、安全、低噪音、低副作用、易于自动化、易于治理的编程支持。 - -不同于通用语言编写的客户端运行时,KCL 通常编译运行并产生 low-level 数据并集成到运行时访问工具(如 KusionCtl),在推进到运行时前通过独立的 KCL 代码测试验证保证左移的稳定性。此外,KCL 还可以被编译为 wasm 模块,在通过重复测试后被服务端运行时集成。 - -![](/img/docs/user_docs/intro/kcl.png) - -KusionStack 提供与 KCL 完全解耦的运维引擎及 API 层,其面向混合资源工作。Kusion 引擎原生支持对 Kubernetes API machinery 能力的充分利用,如支持基于 3-way diff 的 preview,运行时 dry-run 等必要的云原生管理能力等。对于非 Kubernetes 控制面的服务(如 IaaS 资源),KusionStack 通过集成 Terraform 工具链完成自动化管理,将 Terraform 视为一种运行时资源 provision 工具。 - -![](/img/docs/user_docs/intro/kusion-engine.png) - - -**vs. Terraform** - -Terraform 是一种广泛应用在云资源交付场景的可编程运维产品,以动态解释型语言 HCL 编写的配置块为入口,解释并驱动运维引擎及 Provider 框架工作,以其特有的 API 接入机制降低了云厂商参差的命令式 API 的使用难度,结合简洁的工作流程,提供良好的声明式运维体验。 - -相比于面向运维人员的领域语言,KCL 是一种为有编程能力的应用研发者设计的现代编程语言,旨在编写围绕应用的建模抽象、业务逻辑和环境规则。KusionStack 原生支持对Kubernetes API machinery 能力的完整使用,无需额外编写 providers。KusionStack 使用 Terraform 管理非 Kubernetes 资源。KusionStack 更适用于规模化场景,提供丰富的规模化场景功能支持和更快的自动执行效率。 - -**vs. Pulumi** - -Pulumi 是一种组合了通用编程语言 SDK 和 Terraform 技术框架的可编程技术栈。在编程能力上 Pulumi 提供了设计良好的通用语言编写的多语言客户端 SDK,同时完全实现了类 Terraform 的引擎和 Provider 框架。 - -基于经典的 C/S 模式,Pulumi 提供了一种对客户端运行时的研发机制,由客户端运行时直接访问服务端,并通过自研引擎转换并复用 Terraform Providers 生态。相比而言,KusionStack 提供了一种独立于运行时的编写机制,提供了更好的客户端稳定性和 Kubernetes API 亲和性。Pulumi 免去了语言学习成本,但通用语言功能过强,噪音、副作用、稳定性问题、安全性问题和治理成本是难以避免的问题;而 KusionStack 面向领域的语言、技术栈和实践方式,有一定的学习成本,更适用于在规模化问题场景中使用。 - -**vs. CD 系统(如 KubeVela, ArgoCD)** - -CD 系统通常以某种定义方式为源头,通过 Pull,Push 或两者结合的方式完成自动化的集群部署交付和配置漂移调和。Kusion 引擎可以视为一种在 push 模式下工作的 CD 引擎。如果您已采纳了 CD 系统,KusionStack 可以与其配合使用,如通过 ArgoCD 调和生效 KCL 定义,如将 KusionCtl 与 KubeVela 配合使用等。 - -**vs. Helm** - -Helm 理念源于操作系统的包管理机制,是一种基于模板化 YAML 的包管理机制,并支持对包内资源的生效和管理。KusionStack 提供了 Helm 能力的超集,对于已采纳 Helm 的用户,可以将 KusionStack 中 Stack 编译结果以 Helm 格式打包后使用。 - -**vs. OAM** - -OAM 是一种开源开放的应用模型规范,主要应用在云原生 CD 控制面 KubeVela,以云原生技术 CRD,operator 为载体,并支持以 payload 方式承载任意自定义资源。KusionStack 提供了开箱即用的应用模型,亦可以成为一种技术载体在客户端完成 OAM 模型定义,并与 KubeVela 结合使用。 - -**vs. CrossPlane** - -CrossPlane 是一种开源的云原生控制面框架,以 Kubernetes API machinery 规范的扩展方式和技术手段对命令式 API 做重定义、调和和管理。相比而言,在 API 接入方式上 KusionStack 采用了客户端方式提供统一的资源抽象,原生提供了面向 Kubernetes API machinery 的工作机制,通过集成并复用 Terraform 能力完成对非云原生资源的管理,不强求 Kubernetes API 层面的重定义,降低在规模场景下对 Kubernetes API 扩展机制的压力。由于 KusionStack 原生支持更好的与 Kubernetes API Server 配合工作,所以可以与 CrossPlane 无缝配合使用。 - -**vs. Kubernetes** - -Kubernetes 是一种在世界范围内广泛采用的容器调度与管理运行时,一种面向容器集群的“操作系统”。面向 Kubernetes API,KusionStack 旨在为多种运行提供**抽象**,**管理**方式和更好的**使用体验**和**工作流程**,并提供了诸多能力帮助研发者更简单的完成应用交付运维: - -+ KCL:向下兼容 YAML 规范,可将 YAML 作为 KCL 的一种下游静态数据格式 -+ KCL-OpenAPI 工具:原生 OpenAPI,CRD 规范支持,支持自动生成 KCL schema 代码 -+ Konfig 仓库:提供 Kubernetes,prometheus 等常用模型,并提供面向应用的抽象模型库 -+ Kusion:提供面向 Kubernetes 的登录、RABC、敏感信息管理、3-way 预览、执行、观察、销毁等常用工作流支持 - -**vs. App PaaS** - -相比于应用部署运维平台或产品,KusionStack 是一种客户端的可编程运维技术栈,提供了技术组件,自动化支持和推荐的工作流程,基于在蚂蚁内部的大量实践满足企业级的开放运维诉求。KusionStack 可以成为应用运维平台的基础技术,其中 Konfig 仓库可以成为其可编程界面和业务层载体。 - - -## 开发状态 - -KusionStack 处于开源早期,你可以在这里看到 [Kusion](https://github.com/KusionStack/kusion/releases) 和 [KCLVM](https://github.com/KusionStack/KCLVM/releases) 的发布版本,也可以通过 [社区](https://github.com/KusionStack/community) 加入我们。 - - -## 下一步 - -+ [安装使用 KusionCtl](/docs/user_docs/getting-started/install/kusionup) -+ 了解[核心概念](/docs/user_docs/concepts/konfig)和[技术架构](/docs/user_docs/concepts/arch) \ No newline at end of file diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/intro/vs-x.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/intro/vs-x.md deleted file mode 100644 index 36db9cbf..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/intro/vs-x.md +++ /dev/null @@ -1,257 +0,0 @@ -# KusionStack 与社区工具异同 - -## 1. Terraform - -[Terraform](https://learn.hashicorp.com/terraform) 是基础设施代码化领域最成功的案例之一,技术产品简单易用,推广度非常好,拥有庞大的社区生态。Terraform 用户常通过 HCL 编写 IaaS 资源规格,通过 provider 框架接入不同的云厂商,并以此种方式完成对 Kubernetes 的接入支持。Terraform 通过 Terraform Enterprise 提供满足企业级需求的 SaaS 服务。​ - -首先,Kusion 自身定位与 Terraform 的关键差异在于 Kusion 核心定位解决规模化云原生运维(PaaS 领域)场景的问题,PaaS 场景下**业务逻辑更复杂更易出错**,更多的**自动化场景**,参与**编写人员更多**,对**多租多环境多集群**支持的需求更强烈,这些特点导致 Kusion 在语言设计、工程结构设计、框架设计上与 Terraform 有较大的差异。 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
KusionTerraform
领域语言 -
    -
  • 静态类型、强校验规则、可测试
  • -
  • 外置类型结构定义,支持继承
  • -
  • 声明式同名配置数据自动合并机制
  • -
  • 代码自动化修改、查询机制
  • -
-
-
    -
  • HCL 重点为 JSON 动态化、语言化,无类型、无校验能力
  • -
  • 缺少代码复用及团队协同编写支持
  • -
  • 缺少语言级自动化能力支持
  • -
-
工程结构 -
    -
  • 支持面向 PaaS 领域多项目、多环境、多集群的工程结构支持
  • -
-
Kubernetes 支持 -
    -
  • 根据Kubernetes Declarative,Event,Reconcile的特点构建Workflow,而不是简单的CRUD(开发中)
  • -
  • 100%兼容Kubernetes原生API,包括Patch、Watch等(开发中)
  • -
  • 支持Kubernetes CRD
  • -
  • 支持资源发布顺序声明、变量动态引用
  • -
  • 提供专用的 kusion Kubernetes clients, kube2kcl 等工具
  • -
-
-
    -
  • 通过经典 provider 机制对接,限制了Kubernetes原生接口的能力
  • -
  • 支持多资源顺序声明
  • -
-
策略支持 -
    -
  • Kusion 原生支持策略定义
  • -
-
-
    -
  • 策略通过 sentinel 语言支持,不开源
  • -
-
- -以下示例为 KCL 编写的 Kubernetes Deployment 示例,其中数据定义将在编译时被 v1.Deployment 结构及校验规则校验: - -```py -deployment = v1.Deployment { - metadata.name = "nginx−deployment" - labels.app = "nginx" - spec = { - replicas = 3 - selector.matchLabels.app = "nginx" - template = { - metadata.labels.app = "nginx" - spec.containers = [{ - name = "nginx" - image = "nginx:1.14.2" - ports = [{ containerPort = 80 }] - }] - } - } -} -``` - -相对应的 TF 表示无直接校验能力,代码示例如下: - -```Terraform -resource "kubernetes_deployment" "deployment" { - metadata { - name = "nginx-deployment" - labels = { - "app" = "ngnix" - } - } - spec { - replicas = 3 - selector { - match_labels = { - app = "nginx" - } - } - template { - metadata { - labels = { - app = "nginx" - } - } - spec { - container { - image = "nginx:1.14.2" - name = "nginx" - port { - container_port = 80 - } - } - } - } - } -} -``` - -​ -Kusion 针对非 Kubernetes 的三方场景(以 IaaS 为主)提供了 provider 框架支持,并兼容 Terraform 格式的存量 providers。通过这样的技术手段,Terraform 用户可以比较简单的使用 Kusion。 -​ - -最后,Kusion 同时可以使用在云原生策略(Policy as Code)场景,Terraform 并未在开源版做策略支持,在 Terraform Cloud,Terraform Enterprise 中进行了支持。 - -## 2. Pulumi - -[Pulumi](https://www.pulumi.com/) 是近几年快速发展基础设施即代码平台,通过通用编程语言的 SDK 方式配置、描述基础设施,提供了一种便利的调配、更新和管理方式。Pulumi 通过 Pulumi Team,Pulumi Enterprise 提供差异化的企业级 SaaS 服务。 -​ - -Pulumi 相比 Terraform 最大的区别和特点在于 Pulumi 采用了基于高级编程语言提供专用 SDK 的方式来描述技术设施,支持的语言包括 TypeScript,JavaScript,Python,Go 和 .NET 等,用户可以选择他们熟悉的语言进行配置描述,这降低了上手门槛,同时也让配置编写有了极大的灵活性。配置描述的场景与 Terraform 类似,以单一资源描述为主,在简单的、小规模的场景有快速启动优势。在对接多种云服务方面,Pulumi 兼容 Terraform 的 Provider 生态,支持对 Terraform 存量 providers 的无缝接入。 -​ - -Pulumi 与 Kusion 的主要差异在于语言选择和云原生支持方面。 -​ - -通用编程语言通常是多编程范式复合能力的,对于配置编写来说语言表达过强、噪音过大、随意性难以**约束**,对于有一定用户规模的编写场景难以做到统一的、长期的编写范式约束和代码**治理**,其次通用编程语言在编写**稳定**性(类型安全约束、内存管理、校验能力完全依赖通用语言能力)、**安全**性(无法限制系统调用、网络访问等)等方面缺少保障,同时难以支持语言级的**自动化**(修改、查询)以满足企业内大量的自动化功能场景。此外,Pulumi 在策略支持方面较为单薄,这也受限于通用编程语言的选型。 -​ - -其次,Pulumi 对 Kubernetes 的支持方式与 Terraform 相同,将 Kubernetes 作为一种 IaaS 云服务商,不同与 Kusion 将 Kubernetes 作为一等公民的设计思路。 -​ - -这两个主要区别导致了 Kusion 在语言设计、工程结构设计、框架设计上与 Pulumi 有较大的差异。 -​ - - - - - - - - - - - - - - - - - - - - - - - - -
KusionPulumi
领域语言 -
    -
  • 静态类型、强校验规则、可测试
  • -
  • 外置类型结构定义,支持继承
  • -
  • 声明式同名配置数据自动合并机制
  • -
  • 代码自动化修改、查询机制
  • -
  • 策略定义支持
  • -
-
-
    -
  • 多种高级编程语言
  • -
  • 无语言级别的编程范式约束
  • -
  • 编写能力过强,噪音过大
  • -
  • 稳定性、安全性没有保障
  • -
  • 缺少语言级自动化能力支持
  • -
  • 缺少策略能力支持
  • -
-
工程结构 -
    -
  • 支持面向 PaaS 领域多项目、多环境、多集群的工程结构支持
  • -
-
-
    -
  • 有项目、环境的考虑,但无明确的工程架构设计
  • -
-
Kubernetes 支持 -
    -
  • 根据Kubernetes Declarative,Event,Reconcile的特点构建Workflow,而不是简单的CRUD(开发中)
  • -
  • 100%兼容Kubernetes原生API,包括Patch、Watch等(开发中)
  • -
  • 支持Kubernetes CRD
  • -
  • 支持资源发布顺序声明、变量动态引用
  • -
  • 提供专用的 kusion Kubernetes clients, kube2kcl 等工具
  • -
-
-
    -
  • 通过经典 provider 机制对接,限制了Kubernetes原生接口的能力
  • -
  • 支持多资源顺序声明
  • -
-
-​ - -## 3. KCL 与其他配置语言的对比 - -在大规模高度可配置场景下,如何让每个人能**编写**、**测试**、**组织**配置代码成为一个新的问题,从配置系统的发展来看,起初是零散在各业务代码中的 key-value 对配置,随后通过模板语言进行增强,之后把点状的业务系统性的提炼形成完整的可扩展抽象体系,最终发展成高度可配置的语言系统及对应的技术工具生态。但是面对云原生基础设施运维等大规模配置策略场景,多数系统都是围绕 JSON 或 YAML 语言设计或者多种语言、脚本、领域语言的技术拼盘,协同研发、自动化管理与治理困难,容易出错且效率不高。 - -KCL 旨在通过语言化的技术与统一的编程界面管理大规模配置与策略,通过版本化、可移植、可复制的代码适配多云、多基础设施、多运行时、多环境的运维诉求,提升大规模配置策略场景下的协同运维效率;同时通过语言层机制约束编写范式、防止编写出错,提升稳定性。因此相比于其他配置语言,KCL 针对大规模的配置策略场景进行了增强,下表显示了 KCL 特性与其他配置语言特性的比较结果: - -| | KCL | GCL | CUE | Jsonnet | HCL | Dhall | -| ----------- | ------ | ------ | ------ | ------- | ------ | ------ | -| 变量 | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | -| 引用 | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | -| 数据类型 | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | -| 算术&逻辑 | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | -| 循环 | 推导式 | 推导式 | 推导式 | 推导式 | 推导式 | 生成式 | -| 条件 | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | -| 内置函数 | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | -| 函数定义 | ✅ | ❌ | ❌ | ✅ | ❌ | ✅ | -| 模块/包导入 | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | -| 类型检查 | ✅ | ✅ | ✅ | ❌ | ❌ | ✅ | -| 测试 | ✅ | ❌ | ❌ | ✅ | ❌ | ✅ | -| 结构定义 | ✅ | ✅ | ✅ | ✅ | ❌ | ✅ | -| 继承 | schema | tuple | ❌ | object | data | data | -| 数据集成 | ✅ | ❌ | ❌ | ✅ | ❌ | ❌ | -| 配置合并 | ✅ | ❌ | ✅ | ✅ | ✅ | ❌ | -| 动态配置 | ✅ | ❌ | ❌ | ✅ | ❌ | ❌ | -| 策略编写 | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ | - -可以看出几乎所有配置语言都支持变量、引用、数据类型、算术、逻辑、条件、内置函数和导入等特性,遵循声明性原则。这些语言对函数定义和循环提供最少的支持或不提供支持。 例如,大多数语言只提供一个推导表达式来构造一个列表和一个字典,而不是一个 for/while 语句。 KCL 和其他一些语言一样提供了用于定义抽象配置的数据类型 schema。不同之处在于 KCL 支持 schema 继承,而大多数配置语言是直接基于结构化数据进行继承或合并的。此外,KCL 在**强约束**、**协同编写**、**自动化**、**高性能**和**策略编写**等方面也进行了增强,以更好地方便开发者使用更多的**工程特性**进行**高效稳定的协作**。 - -* **强约束**:KCL 代码基于用户角色划分并保证语义简单,配置类型和配置数据是分离定义的,允许为变量/属性定义类型。 KCL 常用的基本数据类型包括`bool`、`int`、`float`、`string`、`list` 和 `dict`,KCL 还提供了结构类型、联合类型、字面值类型等。KCL 支持静态编译时类型分析、检查和推断,有助于提高配置代码的健壮性。同时 KCL 支持变量的默认不可变性与声明式校验,满足不同条件的约束。此外 KCL 还支持使用测试工具完成对自身代码的测试,能够很好地检测不同代码片段的运行情况,进行输入输出结果比对,保证了代码的稳定性; -* **协同编写**:在 KCL 中提供了模块化的能力,KCL 的代码文件以包(目录)和模块(文件)的形式进行管理。此外在 KCL 中分别提供了配置定义、配置代换和配置自动合并的能力。通过这样的方式,能够很好地检测处出不同开发人员对同一份配置操作冲突的问题,并且能够极大地实现配置复用,在保证稳定性的情况下能够很好地提升协同效率; -* **自动化**:在 KCL 中提供了很多自动化相关的能力,主要包括工具和多语言 API。 通过统一的模式支持对任意配置键值的索引,从而完成对任意键值的增删改查,方便上层自动化系统集成,提升配置自动处理的效率; -* **高性能**:配合 LLVM 优化器,KCL 支持编译到本地代码和 WASM 代码并高效执行; -* **策略编写**:在 KCL 中支持通过 rule 关键字定义相应的规则,并且支持规则的复用与自由组装,通过编写策略代码,能够帮助开发人员快速实现数据查询过滤等功能。 diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/support/_category_.json b/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/support/_category_.json deleted file mode 100644 index 9af634a7..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/support/_category_.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "label": "常见问题", - "position": 6 -} diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/support/_faq-concepts.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/support/_faq-concepts.md deleted file mode 100644 index 82494eaf..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/support/_faq-concepts.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -sidebar_position: 1 ---- - -# 概念 & 定义 - -## 1. Kusion - -F**usion** Cloud Native on **K**ubernetes. Kusion 一词来源于 fusion(意为融合) + kubernetes,是基于云原生基础设施,通过定义云原生可编程接入层,提供包括配置语言、模型界面、自动化工具、最佳实践在内的一整套解决方案,连通云原生基础设施与业务应用,连接定义和使用基础设施的各个团队,串联应用生命周期的研发、测试、集成、发布各个阶段,服务于云原生自动化系统建设,加速云原生落地。我们平时提到 Kusion,一般是对这一整套解决方案的统称;而 Kusion 生态工具链则包含了 kcl 命令行工具、KusionCtl 命令行工具、KCL IDE 插件等贯穿 Kusion 解决方案各个部分的自动化工具。 - - -## 2. 大写的 KCL 语言 - -**K**usion **C**onfiguration **L**anguage. 是由云原生工程化系统团队设计和研发的**专用于配置定义、校验的动态强类型配置语言**,重点服务于配置(configuration)& 策略(policy programing)场景,以服务云原生配置系统为设计目标,但作为一种配置语言不限于云原生领域。KCL 吸收了声明式、OOP 编程范式的理念设计,针对云原生配置场景进行了大量优化和功能增强。KCL 最初受 Python3 脚本语言启发,依托 Python 的语言生态,目前已经发展为独立的面向配置策略的语言。 - -## 3. 小写的 kcl 命令 - -[kcl](/docs/reference/cli/kcl/overview) 命令行工具。一般使用全大写字母的 KCL 代指 KCL 语言,而用全小写的 kcl 代指能将 KCL 代码编译为低层次数据输出(如 YAML, JSON 等)的 kcl 命令行工具。 - -## 4. KCLVM - -**V**irtual **M**achine to parse and compile KCL。指开发 kcl 命令行工具的工程项目,也是 kcl 命令行工具的代码仓库名称,KCLVM 使用 Python、Rust 等多种语言混合开发。 - -## 5. KusionCtl - -Kusion Kubernetes Client。[KusionCtl](/docs/reference/cli/kusionctl/overview) 命令行工具旨在简化用户对 K8S 的使用,内置支持对 KCL 的编译、通过登录功能原生支持 Identity 能力,支持多集群访问,提供资源状态汇总及相应的白屏展示、对用户变更模型及其关联模型的变更追踪、链路可视化、live 对比、关键资源可视化、异常定位等功能。 - -## 6. Konfig - -**K**usion **C**onfig. Konfig 是一个 KCL 代码仓库,其中组织了蚂蚁域内各应用的基础设施配置。依据团队协同的层次,Konfig 仓库划分为"基础配置代码"和"业务配置代码"两部分,采用主干开发、分支发布的分支策略。 diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/support/faq-cli.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/support/faq-cli.md deleted file mode 100644 index 63a87444..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/support/faq-cli.md +++ /dev/null @@ -1,112 +0,0 @@ ---- -sidebar_position: 5 ---- - -# 命令行工具 - -### 1. Konfig 大库应用目录下的 settings.yaml 文件的作用是什么? - -KCL 中 settings.yaml 文件表示 KCL 命令行工具的配置参数文件,可以将编译的配置放入其中进行调用比如需要编译的文件,需要输入的 option 动态参数 `-d`,是否需要忽略掉空值 None `-n` 等配置。 - -比如对于如下的命令行运行参数 - -``` -kcl main.k -D key=value -n -r -``` - -就可以使用如下的命令行参数和 settings.yaml 配置文件代替 - -``` - -kcl -Y settings.yaml -``` - -settings.yaml - -```yaml -kcl_cli_configs: - files: - - main.k - disable_none: true - strict_range_check: true -kcl_options: - - key: key - value: value -``` - -- `kcl_cli_configs` 表示可以配置的编译参数,`file` 用于配置编译的 KCL 文件,`disable_none` 表示是否使用 `-n` 参数,`strict_range_check` 表示是否使用 `-r` 参数。 -- `kcl_options` 表示可以配置的动态参数,`key` 表示动态参数的名称,`value` 表示动态参数的值 - -注意:settings.yaml 的文件名称可替换,只要其中的配置结构满足规定即可 - -### 2. 如何传入动态参数?如何在代码中获取命令行传入的动态参数? - -KCL 支持多种方式传入动态参数 - -- `-D`: 使用 KCL 命令行的-D 参数可以直接传入动态参数,支持基本数据类型 str/int/float/bool, 以及结构数据类型 list/dict - -``` -kcl main.k -D env-type=TEST -D deploy-topology='[{"cluster":"sigma-eu126-mybank-staging","idc":"cn-hangzhou-test-eu126","replicas":2,"workspace":"middlewarehzcloudsit","zone":"CellAEU126"}]' -``` - -- `-Y`: 使用 KCL 命令行的-Y 参数可以间接通过配置文件传入动态参数: - -```yaml -kcl_options: -- key: env-type - value: TEST -- key: deploy-topology - value: - - cluster: sigma-eu126-mybank-staging - idc: cn-hangzhou-test-eu126 - replicas: 2 - workspace: middlewarehzcloudsit - zone: CellAEU126 -``` - -在代码中使用内置的 option 函数获取即可 - -```python -env = option("env-type") -deploy_topology = option("deploy-topology") -``` - -输出 YAML - -```yaml -env: TEST -deploy_topology: -- cluster: sigma-eu126-mybank-staging - idc: cn-hangzhou-test-eu126 - replicas: 2 - workspace: middlewarehzcloudsit - zone: CellAEU126 -``` - -### 3. 如何使用 kcl 的多文件编译特性? - -- 使用 KCL 命令行工具直接书写多文件编译 - -``` -kcl file1.k file2.k file3.k -``` - -- 在配置文件中配置并配合 KCL 命令行工具参数 `-Y` 使用 - -settings.yaml - -```yaml -kcl_cli_configs: - files: - - file1.k - - file2.k - - file3.k -``` - -``` -kcl -Y settings.yaml -``` - -### 4. Konfig 大库应用目录下的 stack.yaml 文件的定位是什么? - -Stack 是项目中的一个隔离的逻辑工作区。Stack 唯一地属于一个开发组,例如 Web 项目中的前端开发组,并且唯一表示特定的开发阶段,例如开发、测试、生产。从开发的角度看,Stack 是 Kusion 项目的基本配置单元。从执行的角度来看,KCL 代码单元被部署到一个 Stack 中。 diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/support/faq-kcl.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/support/faq-kcl.md deleted file mode 100644 index 4cce3316..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/support/faq-kcl.md +++ /dev/null @@ -1,2291 +0,0 @@ ---- -sidebar_position: 2 ---- - -# KCL 语法 - -## 1. 如何用 KCL 写一个简单的 key-value 对配置 - -创建一个名为 `config.k` 的文件 - -```python -cpu = 256 -memory = 512 -image = "nginx:1.14.2" -service = "my-service" -``` - -上述 KCL 代码中,定义了 4 个变量 `cpu` 和 `memory` 被声明为整数类型,并且它们的值为 `256` 和 `512`,而 `image` 和 `service` 是字符串类型,它们的值为 `image` 和 `service` - -使用如下命令可以将上述 KCL 文件编译为 YAML 进行输出 - -``` -kcl config.k -``` - -得到的 YAML 输出为: - -```yaml -cpu: 256 -memory: 512 -image: nginx:1.14.2 -service: my-service -``` - -如果想要输出到文件,可以使用 `-o|--output` 参数: - -``` -kcl config.k -o config.yaml -``` - -## 2. KCL 中有哪些基本的数据类型? - -KCL 目前的基本数值类型和值包含: - -- 整数类型 `int` - - 举例: 十进制正整数 `1`, 十进制负整数 `-1`, 十六进制整数 `0x10`, 八进制整数 `0o10`, 二进制整数 `0b10` -- 浮点数类型 `float` - - 举例: 正浮点数 `1.10`, `1.0`, 负浮点数 `-35.59`, `-90.`, 科学记数法浮点数 `32.3e+18`, `70.2E-12` -- 布尔类型 `bool` - - 举例: 真值 `True`, 假值 `False` -- 字符串类型 `str` - 使用引号 `'`, `"` 标记 - - 举例: 双引号字符串 `"string"`, `"""string"""`, 单引号字符串 `'string'`, `'''string'''` -- 列表类型 `list` - 使用 `[`, `]` 标记 - - 举例: 空列表 `[]`, 字符串列表 `["string1", "string2", "string3"]` -- 字典类型 `dict` - 使用 `{`, `}` 标记 - - 举例: 空字典 `{}`, 键值均为字符串类型的字典 `{"key1": "value1", "key2": "value2"}` -- 结构类型 `schema` - 使用关键字 `schema` 定义,并使用相应的 schema 名称进行实例化 -- 空值类型 `None` - 用于表示一个变量的值为空,与输出 YAML 的 `null` 值对应 -- 未定义值类型 `Undefined` - 用于表示一个变量未被赋值,值为 `Undefined` 的变量不会被输出到 YAML 中 - -```python -schema Person: - name: str - age: int - -alice = Person { - name = "Alice" - age = 18 -} -bob = Person { - name = "Bob" - age = 10 -} -``` - -注意: 所有 KCL 类型的变量均可赋值为空值 `None` 和未定义的值 `Undefined` - -## 3. 有些 KCL 变量名带 `_` 下划线前缀表示什么?和不带 `_` 下划线前缀的区别是什么?分别适合什么场景下使用? - -KCL 中带下划线前缀的变量表示一个**隐藏**的,**可变**的变量,**隐藏**表示带下划线前缀的变量不会被输出到 YAML 当中,包括包级别的下划线前缀变量和 schema 当中的下划线前缀变量。**可变**表示带下划线前缀的变量可被多次重复赋值,不带下划线前缀的变量被赋值后不可变。 - -带 `_` 下划线前缀的变量与不带 `_` 下划线前缀变量的区别是: 不带 `_` 下划线前缀变量默认是导出到 YAML 当中的,并且具有强不可变性;带 `_` 下划线前缀变量是不导出的,可变的。 - -```python -name = 'Foo' # 导出变量,不可变变量 -name = 'Bar' # 错误:导出变量只能设置一次 -``` - -```python -_name = 'Foo' # 隐藏变量,可变变量 -_name = 'Bar' - -schema Person: - _name: str # hidden and mutable -``` - -## 4. 如何向 dict 中添加元素? - -可以使用 union 运算符 `|`, 或者 dict 解包运算符 `**` 来向 dict 中添加一个元素,并且可以使用 `in`,`not in` 等关键字判断 dict 变量当中是否包含某一个键值 - -```python -_left = {key = {key1 = "value1"}, intKey = 1} # 注意使用 = 表示覆盖 -_right = {key = {key2 = "value2"}, intKey = 2} -dataUnion = _left | _right # {"key": {"key1": "value1", "key2": "value2"}, "intKey": 2} -dataUnpack = {**_left, **_right} # {"key": {"key1": "value1", "key2": "value2"}, "intKey": 2} -``` - -输出 YAML 为: - -```yaml -dataUnion: - key: - key1: value1 - key2: value2 -dataUnpack: - key: - key2: value2 -``` - -此外还可以使用 `字符串插值` 或者字符串 `format` 成员函数特性向 kcl dict 添加变量键值对 - -```python -dictKey1 = "key1" -dictKey2 = "key2" -data = { - "${dictKey1}" = "value1" - "{}".format(dictKey2) = "value2" -} -``` - -输出 YAML 为: - -```yaml -dictKey1: key1 -dictKey2: key2 -data: - key1: value1 - key2: value2 -``` - -## 5. 如何修改 dict 中的元素? - -我们可以使用 union 运算符 `|`, 或者解包运算符 `**` 修改 dict 当中的元素 - -```python -_data = {key = "value"} # {"key": "value"} -_data = _data | {key = "override_value1"} # {"key": "override_value1"} -_data = {**_data, **{key = "override_value2"}} # {"key": "override_value2"} -``` - -如果想要删除 dict 中某个键为 `key` 的值,可以使用解包运算符 `**{key = Undefined}` 或者合并运算符 `| {key = Undefined}` 进行覆盖,覆盖后 key 的值为 Undefined,不会进行 YAML 输出。 - -## 6. 如何向 list 中添加元素? - -在 list 中添加元素有两种方式: - -- 使用 `+`, `+=` 和 slice 切片连接组装 list 变量达到向 list 中添加元素的目的 - -```python -_args = ["a", "b", "c"] -_args += ["end"] # 在list尾部添加元素"end", ["a", "b", "c", "end"] -_args = _args[:2] + ["x"] + _args[2:] # 在list索引为2的地方插入元素"x", ["a", "b", "x", "c", "end"] -_args = ["start"] + _args # 在list头部添加元素"start", ["start", "a", "b", "x", "c", "end"] -``` - -- 使用 `*` 解包运算符连接合并 list - -```python -_args = ["a", "b", "c"] -_args = [*_args, "end"] # 在list尾部添加元素"end", ["a", "b", "c", "end"] -_args = ["start", *_args] # 在list头部添加元素"start", ["start", "a", "b", "c", "end"] -``` - -注意:当接连的变量为 `None/Undefined` 时,使用 `+` 可能会发生错误,这时使用 list 解包运算符 `*` 或者使用 `or` 运算符取 list 的默认值可以避免空值判断 - -```python -data1 = [1, 2, 3] -data2 = None -data3 = [*data1, *data2] # Right [1, 2, 3] -data4 = data1 + data2 or [] # Right [1, 2, 3], 使用 or 取 data2 的默认值为 [], 当 data2 为 None/Undefined 时,取空列表 [] 进行计算 -data5 = data1 + data2 # Error: can only concatenate list (not "NoneType") to list -``` - -## 7. 如何修改/删除 list 中的元素? - -修改 list 中的元素分为两种方式: - -- 直接修改 list 某个索引处的值,使用 slice 切片 - -```python -_index = 1 -_args = ["a", "b", "c"] -_args = _args[:index] + ["x"] + _args[index+1:] # 修改list索引为1的元素为"x", ["a", "x", "c"] -``` - -- 根据某个条件修改 list 当中的元素,使用 list comprehension 列表推导式 - -```python -_args = ["a", "b", "c"] -_args = ["x" if a == "b" else a for a in _args] # 将list当中值为"b"的值都修改为"x", ["a", "x", "c"] -``` - -删除 list 中的元素分为两种方式: - -- 使用 list for 推导表达式中 if 过滤条件 -- 使用 filter 表达式对 list 进行元素过滤 - -比如想要删除一个列表 `[1, 2, 3, 4, 5]` 中大于 2 的数字,则在 KCL 中可以写为: - -```python -originList = [1, 2, 3, 4, 5] -oneWayDeleteListItem = [item for item in originList if item <= 2] -anotherWayDeleteListItem = filter item in originList { - item <= 2 -} -``` - -输出如下结果 - -```yaml -originList: -- 1 -- 2 -- 3 -- 4 -- 5 -oneWayDeleteListItem: -- 1 -- 2 -anotherWayDeleteListItem: -- 1 -- 2 -``` - -## 8. 怎样写 for 循环?怎样理解和使用 list comprehension 列表推导式 和 dict comprehension 字典推导式 ? - -KCL 目前仅支持函数式/声明式的推导式 for 循环方式,可以按照如下方式遍历 dict 和 list 变量: - -list 推导式具体形式为(其中推导式两边使用方括号 `[]`): - -```txt -[expression for expr in sequence1 - if condition1 - for expr2 in sequence2 - if condition2 - for expr3 in sequence3 ... - if condition3 - for exprN in sequenceN - if conditionN] -``` - -dict 推导式具体形式为(其中推导式两边使用花括号 `{}`): - -```txt -{expression for expr in sequence1 - if condition1 - for expr2 in sequence2 - if condition2 - for expr3 in sequence3 ... - if condition3 - for exprN in sequenceN - if conditionN} -``` - -上述推导式中的 `if` 表示过滤条件,满足条件的表达式 `expr` 才会生成到新的 list 或 dict 中 - -list 推导式举例: - -```python -_listData = [1, 2, 3, 4, 5, 6] -_listData = [l * 2 for l in _listData] # _listData中所有元素都乘以2,[2, 4, 6, 8, 10, 12] -_listData = [l for l in _listData if l % 4 == 0] # 筛选出_listData中可以被4整除的所有元素,[4, 8, 12] -_listData = [l + 100 if l % 8 == 0 else l for l in _listData] # 遍历_listData, 当其中的元素可以被8整除时,将该元素加100,否则保持不变, [4, 108, 12] -``` - -注意上述代码中第 3 行和第 4 行两个 `if` 的区别: - -- 第一个 `if` 表示 list 变量 `_listData` 本身的推导式过滤条件,后不能跟 `else`,满足该过滤条件的元素会继续放在该列表中,不满足条件的元素被剔除,有可能会使列表长度发生变化 -- 第二个 `if` 表示 list 迭代变量 `l` 的选择条件,表示 `if-else` 三元表达式,后必须跟 `else`,不论是否满足该条件,产生的元素仍然在该列表中,列表长度不变 - -dict 推导式举例: - -```python -_dictData = {key1 = "value1", key2 = "value2"} -_dictData = {k = _dictData[k] for k in _dictData if k == "key1" and _dictData[k] == "value1"} # 将_dictData中key为"key1", value为"value1"的元素筛选出来, {"key1": "value1"} -``` - -使用推导式获得 dict 所有 key: - -```python -dictData = {key1 = "value1", key2 = "value2"} -dictDataKeys = [k for k in _dictData] # ["key1", "key2"] -``` - -使用推导式对 dict 按照 key 的字典序升序进行排序: - -```python -dictData = {key3 = "value3", key2 = "value2", key1 = "value1"} # {'key3': 'value3', 'key2': 'value2', 'key1': 'value1'} -dictSortedData = {k = dictData[k] for k in sorted(dictData)} # {'key1': 'value1', 'key2': 'value2', 'key3': 'value3'} -``` - -多级推导式举例: - -```python -array1 = [1, 2, 3] -array2 = [4, 5, 6] -data = [a1 + a2 for a1 in array1 for a2 in array2] # [5, 6, 7, 6, 7, 8, 7, 8, 9] len(data) == len(array1) * len(array2) -``` - -双变量循环(for 推导表达式支持 list 的索引迭代以及 dict 的 value 迭代,可以简化 list/dict 迭代过程代码书写): - -- list - -```python -data = [1000, 2000, 3000] -# 单变量循环 -dataLoop1 = [i * 2 for i in data] # [2000, 4000, 6000] -dataLoop2 = [i for i in data if i == 2000] # [2000] -dataLoop3 = [i if i > 2 else i + 1 for i in data] # [1000, 2000, 3000] -# 双变量循环 -dataLoop4 = [i + v for i, v in data] # [1000, 2001, 3002] -dataLoop5 = [v for i, v in data if v == 2000] # [2000] -# 使用_忽略循环变量 -dataLoop6 = [v if v > 2000 else v + i for i, v in data] # [1000, 2001, 3000] -dataLoop7 = [i for i, _ in data] # [0, 1, 2] -dataLoop8 = [v for _, v in data if v == 2000] # [2000] -``` - -- dict - -```python -data = {key1 = "value1", key2 = "value2"} -# 单变量循环 -dataKeys1 = [k for k in data] # ["key1", "key2"] -dataValues1 = [data[k] for k in data] # ["value1", "value2"] -# 双变量循环 -dataKeys2 = [k for k, v in data] # ["key1", "key2"] -dataValues2 = [v for k, v in data] # ["value1", "value2"] -dataFilter = {k = v for k, v in data if k == "key1" and v == "value1"} # {"key1": "value1"} -# 使用_忽略循环变量 -dataKeys3 = [k for k, _ in data] # ["key1", "key2"] -dataValues3 = [v for _, v in data] # ["value1", "value2"] -``` - -## 9. 怎样写 if 条件语句? - -KCL 支持两种方式书写 if 条件语句: - -- if-elif-else 块语句,其中 elif 和 else 块均可省略,并且 elif 块可以使用多次 - -```python -success = True -_result = "failed" -if success: - _result = "success" -``` - -```python -success = True -if success: - _result = "success" -else: - _result = "failed" -``` - -```python -_result = 0 -if condition == "one": - _result = 1 -elif condition == "two": - _result = 2 -elif condition == "three": - _result = 3 -else: - _result = 4 -``` - -- 条件表达式 ` if else `, 类似于 C 语言当中的 ` ? : ` 三元表达式 - -```python -success = True -_result = "success" if success else "failed" -``` - -注意:在书写 if-elif-else 块语句时注意书写 if 条件后的冒号 `:` 以及保持缩进的统一 - -除此之外,还可以在 list 或者 dict 结构中直接书写条件表达式(不同的是,在结构中书写的 if 表达式中需要书写的值而不是语句): - -- list - -```python -env = "prod" -data = [ - "env_value" - ":" - if env == "prod": - "prod" # 书写需要添加到 data 中的值,而不是语句 - else: - "other_prod" -] # ["env_value", ":", "prod"] -``` - -- dict - -```python -env = "prod" -config = { - if env == "prod": - MY_PROD_ENV = "prod_value" # 书写需要添加到 config 中的键-值对,而不是语句 - else: - OTHER_ENV = "other_value" -} # {"MY_PROD_ENV": "prod_value"} -``` - -## 10. 怎样表达 "与" "或" "非" 等逻辑运算? - -在 KCL 中,使用 `and` 表示"逻辑与", 使用 `or` 表示"逻辑或", 使用 `not` 表示"非", 与 C 语言当中的 `&&`, `||` 和 `~` 语义一致; - -```python -done = True -col == 0 -if done and (col == 0 or col == 3): - ok = 1 -``` - -对于整数的"按位与", "按位或"和"按位异或",在 KCL 中使用 `&`, `|` 和 `^` 运算符表示, 与 C 语言当中的 `&`, `|` 和 `^` 语义一致; - -```python -value = 0x22 -bitmask = 0x0f - -assert (value & bitmask) == 0x02 -assert (value & ~bitmask) == 0x20 -assert (value | bitmask) == 0x2f -assert (value ^ bitmask) == 0x2d -``` - -"逻辑或" `or` 的妙用:当需要书写诸如 `A if A else B` 类似的模式时,可以使用 `A or B` 进行简化,比如如下代码: - -```python -value = [0] -default = [1] -x0 = value if value else default -x1 = value or default # 使用 value or default 代替 value if value else default -``` - -## 11. 如何判断变量是否为 None/Undefined、字符串/dict/list 是否为空? - -请注意,在 if 表达式的条件判断中,`False`、`None`、`Undefined`、数字 `0`、空列表 `[]`、空字典 `{}` 和空字符串 `""`, `''`, `""""""`, `''''''` 都被视为值为 `假` 的表达式。 - -比如判断一个字符串变量 `strData` 既不为 `None/Undefined` 也不为空字符串时(字符串长度大于 0),就可以简单的使用如下表达式: - -```python -strData = "value" -if strData: - isEmptyStr = False -``` - -空字典和空列表判断举例: - -```python -_emptyList = [] -_emptyDict = {} -isEmptyList = False if _emptyList else True -isEmptyDict = False if _emptyDict else True -``` - -YAML 输出为: - -```yaml -isEmptyList: true -isEmptyDict: true -``` - -或者使用布尔函数 `bool` 进行判断 - -```python -_emptyList = [] -_emptyDict = {} -isEmptyList = bool(_emptyList) -isEmptyDict = bool(_emptyDict) -``` - -## 12. 字符串怎样拼接、怎样格式化字符串、怎样检查字符串前缀、后缀?怎样替换字符串内容? - -- KCL 中可以使用 `+` 运算符连接两个字符串 - -```python -data1 = "string1" + "string2" # "string1string2" -data2 = "string1" + " " + "string2" # "string1 string2" -``` - -- KCL 中目前存在两种格式化字符串的方式: - - 字符串变量的 format 方法 `"{}".format()` - - 字符串插值 `${}` - -```python -hello = "hello" -a = "{} world".format(hello) -b = "${hello} world" -``` - -注意,如果想在 `"{}".format()` 中单独使用 `{` 字符或者 `}`, 则需要使用 `{{` 和 `}}` 分别对 `{` 和 `}` 进行转义,比如转义一个 JSON 字符串如下代码: - -```python -data = "value" -jsonData = '{{"key": "{}"}}'.format(data) -``` - -输出 YAML 为: - -```yaml -data: value -jsonData: '{"key": "value"}' -``` - -注意,如果想在 `${}` 插值字符串中单独使用 `$` 字符,则需要使用 `$$` 对 `$` 进行转义 - -```python -world = "world" -a = "hello {}".format(world) # "hello world" -b = "hello ${world}" # "hello world" -c = "$$hello ${world}$$" # "$hello world$" -c2 = "$" + "hello ${world}" + "$" # "$hello world$" -``` - -输出 YAML 为: - -```yaml -world: world -a: hello world -b: hello world -c: $hello world$ -c2: $hello world$ -``` - -- KCL 中使用字符串的 `startswith` 和 `endswith` 方法检查字符串的前缀和后缀 - -```python -data = "length" -isEndsWith = data.endswith("th") # True -isStartsWith = "length".startswith('len') # True -``` - -- KCL 中使用字符串的 replace 方法或者 regex.replace 函数替换字符串的内容 - -```python -import regex -data1 = "length".replace("len", "xxx") # 使用"xxx"替换"len", "xxxgth" -data2 = regex.replace("abc123", r"\D", "0") # 替换"abc123"中的所有非数字为"0", "000123" -``` - -其中,`r"\D"` 表示不需要使用 `\\` 转义 `\D` 中的反斜杠 `\`,多用于正则表达式字符串中 - -此外,我们可以在字符串格式化表达式中插入索引占位符或者关键字占位符用于格式化多个字符串 - -- 索引占位符 - -```python -x = '{2} {1} {0}'.format('directions', 'the', 'Read') -y = '{0} {0} {0}'.format('string') -``` - -输出为: - -```yaml -x: Read the directions -y: string string string -``` - -- 关键字占位符 - -```python -x = 'a: {a}, b: {b}, c: {c}'.format(a = 1, b = 'Two', c = 12.3) -``` - -输出为: - -```yaml -x: 'a: 1, b: Two, c: 12.3' -``` - -## 13. 字符串中使用单引号和双引号的区别是什么? - -KCL 单引号和双引号字符串几乎没有区别。唯一的区别是,不需要在单引号字符串中使用 `\"` 转义双引号 `"`,不需要在双引号字符串中使用 `\'` 转义单引号引号 `'`。 - -```python -singleQuotedString = 'This is my book named "foo"' # Don’t need to escape double quotes in single quoted strings. -doubleQuotedString = "This is my book named 'foo'" # Don’t need to escape single quotes in double quoted strings. -``` - -此外在 KCL 中,使用三个单引号或者三个双引号组成的长字符串,无需在其中对单引号或者三引号进行转义 (除字符串首尾),比如如下例子: - -```python -longStrWithQuote0 = """Double quotes in long strings "(not at the beginning and end)""" -longStrWithQuote1 = '''Double quotes in long strings "(not at the beginning and end)''' -longStrWithQuote2 = """Single quotes in long strings '(not at the beginning and end)""" -longStrWithQuote3 = '''Single quotes in long strings '(not at the beginning and end)''' -``` - -输出 YAML: - -```yaml -longStrWithQuote0: Double quotes in long strings "(not at the beginning and end) -longStrWithQuote1: Double quotes in long strings "(not at the beginning and end) -longStrWithQuote2: Single quotes in long strings '(not at the beginning and end) -longStrWithQuote3: Single quotes in long strings '(not at the beginning and end) -``` - -## 14. 如何编写跨行的长字符串? - -KCL 中可以使用单引号字符串 + 换行符 `\n` 或者三引号字符串书写一个多行字符串,并且可以借助续行符 `\` 优化 KCL 字符串的形式,比如对于如下代码中的三个多行字符串变量,它们的制是相同的: - -```python -string1 = "The first line\nThe second line\nThe third line\n" -string2 = """The first line -The second line -The third line -""" -string3 = """\ -The first line -The second line -The third line -""" # 推荐使用 string3 长字符串的书写形式 -``` - -输出 YAML 为: - -```yaml -string1: | - The first line - The second line - The third line -string2: | - The first line - The second line - The third line -string3: | - The first line - The second line - The third line -``` - -## 15. 如何使用正则表达式? - -通过在 KCL 中导入正则表达式库 `import regex` 即可使用正则表达式,其中包含了如下函数: - -- **match**: 正则表达式匹配函数,根据正则表达式对输入字符串进行匹配,返回 bool 类型表示是否匹配成功 -- **split**: 正则表达式分割函数,根据正则表达式分割字符串,返回分割字串的列表 -- **replace**: 正则表达式替换函数,替换字符串中所有满足正则表达式的子串,返回被替换的字符串 -- **compile**: 正则表达式编译函数,返回 bool 类型表示是否是一个合法的正则表达式 -- **search**: 正则表达式搜索函数,搜索所有满足正则表达式的子串,返回子串的列表 - -使用举例: - -```python -regex_source = "Apple,Google,Baidu,Xiaomi" -regex_split = regex.split(regex_source, ",") -regex_replace = regex.replace(regex_source, ",", "|") -regex_compile = regex.compile("$^") -regex_search = regex.search("aaaa", "a") -regex_find_all = regex.findall("aaaa", "a") -regex_result = regex.match("192.168.0.1", "^(1\\d{2}|2[0-4]\\d|25[0-5]|[1-9]\\d|[1-9])\\."+"(1\\d{2}|2[0-4]\\d|25[0-5]|[1-9]\\d|\\d)\\."+"(1\\d{2}|2[0-4]\\d|25[0-5]|[1-9]\\d|\\d)\\."+"(1\\d{2}|2[0-4]\\d|25[0-5]|[1-9]\\d|\\d)$") # 判断是否是一个IP字符串 -regex_result_false = regex.match("192.168.0,1", "^(1\\d{2}|2[0-4]\\d|25[0-5]|[1-9]\\d|[1-9])\\."+"(1\\d{2}|2[0-4]\\d|25[0-5]|[1-9]\\d|\\d)\\."+"(1\\d{2}|2[0-4]\\d|25[0-5]|[1-9]\\d|\\d)\\."+"(1\\d{2}|2[0-4]\\d|25[0-5]|[1-9]\\d|\\d)$") # 判断是否是一个IP字符串 -``` - -输出 YAML: - -```yaml -regex_source: Apple,Google,Baidu,Xiaomi -regex_split: -- Apple -- Google -- Baidu -- Xiaomi -regex_replace: Apple|Google|Baidu|Xiaomi -regex_compile: true -regex_search: true -regex_find_all: -- a -- a -- a -- a -regex_result: true -regex_result_false: false -``` - -对于比较长的正则表达式,还可以使用 r-string 忽略 `\` 符号的转义简化正则表达式字符串的书写: - -```python -isIp = regex.match("192.168.0.1", r"^(1\d{2}|2[0-4]\d|25[0-5]|[1-9]\d|[1-9])."+r"(1\d{2}|2[0-4]\d|25[0-5]|[1-9]\d|\d)."+r"(1\d{2}|2[0-4]\d|25[0-5]|[1-9]\d|\d)."+r"(1\d{2}|2[0-4]\d|25[0-5]|[1-9]\d|\d)$") # 判断是否是一个IP字符串 -``` - -更多举例: - -```python -import regex - -schema Resource: - cpu: str = "1" - memory: str = "1024Mi" - disk: str = "10Gi" - check: - regex.match(cpu, r"^([+-]?[0-9.]+)([m]*[-+]?[0-9]*)$"), "cpu must match specific regular expression" - regex.match(memory, r"^([1-9][0-9]{0,63})(E|P|T|G|M|K|Ei|Pi|Ti|Gi|Mi|Ki)$"), "memory must match specific regular expression" - regex.match(disk, r"^([1-9][0-9]{0,63})(E|P|T|G|M|K|Ei|Pi|Ti|Gi|Mi|Ki)$"), "disk must match specific regular expression" -``` - -```python -import regex - -schema Env: - name: str - value?: str - check: - len(name) <= 63, "a valid env name must be no more than 63 characters" - regex.match(name, r"[A-Za-z_][A-Za-z0-9_]*"), "a valid env name must start with alphabetic character or '_', followed by a string of alphanumeric characters or '_'" -``` - -## 16. KCL 当中的 schema 是什么含义? - -schema 是 KCL 中一种语言元素,用于定义配置数据的类型,像 C 语言中的 struct 或者 Java 中的 class 一样,在其中可以定义属性,每种属性具有相应的类型。 - -## 17. 如何声明 schema? - -KCL 中使用 schema 关键字可以定义一个结构,在其中可以申明 schema 的各个属性 - -```python -# 一个Person结构,其中具有属性字符串类型的firstName, 字符串类型的lastName, 整数类型的age -schema Person: - firstName: str - lastName: str - # age属性的默认值为0 - age: int = 0 -``` - -一个复杂例子: - -```python -schema Deployment: - name: str - cpu: int - memory: int - image: str - service: str - replica: int - command: [str] - labels: {str:str} -``` - -在上面的代码中,`cpu` 和 `memory` 被定义为整数 int 类型;`name`,`image` 和 `service` 是字符串 str 类型; `command` 是字符串类型的列表; labels 是字典类型,其键类型和值类型均为字符串。 - -## 18. 如何为 schema 属性添加 "不可变"、"必选" 约束? - -KCL 中使用 `?` 运算符定义一个 schema 的"可选"约束,schema 属性默认都是"必选"的 - -```python -# 一个Person结构,其中具有属性字符串类型的firstName, 字符串类型的lastName, 整数类型的age -schema Person: - firstName?: str # firstName是一个可选属性,可以赋值为None/Undefined - lastName?: str # age是一个可选属性,可以赋值为None/Undefined - # age属性的默认值为0 - age: int = 18 # age是一个必选属性,不能赋值为None/Undefined,并且是一个不可变属性 - age = 10 # Error, age是一个不可变的属性 -``` - -## 19. 如何为 schema 中的属性编写校验规则? - -在 schema 定义当中可以使用 check 关键字编写 schema 属性的校验规则, 如下所示,check 代码块中的每一行都对应一个条件表达式,当满足条件时校验成功,当不满足条件时校验失败, 条件表达式后可跟 `, "check error message"` 表示当校验失败时需要显示的信息 - -```python -import regex - -schema Sample: - foo: str # Required, 不能为None/Undefined, 且类型必须为str - bar: int # Required, 不能为None/Undefined, 且类型必须为int - fooList: [int] # Required, 不能为None/Undefined, 且类型必须为int列表 - color: "Red" | "Yellow" | "Blue" # Required, 字面值联合类型,且必须为"Red", "Yellow", "Blue"中的一个,枚举作用 - id?: int # Optional,可以留空,类型必须为int - - check: - bar >= 0 # bar必须大于等于0 - bar < 100 # bar必须小于100 - len(fooList) > 0 # fooList不能为None/Undefined,并且长度必须大于0 - len(fooList) < 100 # fooList不能为None/Undefined,并且长度必须小于100 - regex.match(foo, "^The.*Foo$") # regex 正则表达式匹配 - bar in range(100) # range, bar范围只能为1到99 - bar in [2, 4, 6, 8] # enum, bar只能取2, 4, 6, 8 - bar % 2 == 0 # bar必须为2的倍数 - all foo in fooList { - foo > 1 - } # fooList中的所有元素必须大于1 - any foo in fooList { - foo > 10 - } # fooList中至少有一个元素必须大于10 - abs(id) > 10 if id # check if 表达式,当 id 不为空时,id的绝对值必须大于10 -``` - -此外,上述 check 当中比较表达式还可以简写为: - -```python -0 <= bar < 100 -0 < len(fooList) < 100 -``` - -综上所述,KCL Schema 中支持的校验类型为: - -| 校验类型 | 使用方法 | -| -------- | ------------------------------------------------------- | -| 范围校验 | 使用 `<`, `>` 等比较运算符 | -| 正则校验 | 使用 `regex` 系统库中的 `match` 等方法 | -| 长度校验 | 使用 `len` 内置函数,可以求 `list/dict/str` 类型的变量长度 | -| 枚举校验 | 使用字面值联合类型 | -| 非空校验 | 使用 schema 的可选/必选属性 | -| 条件校验 | 使用 check if 条件表达式 | - -## 20. 如何为 schema 及其属性添加文档注释? - -一个完整的 schema 属性注释使用三引号字符串表示,其中的结构如下所示: - -```python -schema Person: - """The schema person definition - - Attributes - ---------- - name : str - The name of the person - age : int - The age of the person - - See Also - -------- - Son: - Sub-schema Son of the schema Person. - - Examples - -------- - person = Person { - name = "Alice" - age = 18 - } - """ - name: str - age: int - -person = Person { - name = "Alice" - age = 18 -} -``` - -## 21. 如何基于 schema 编写配置?多个配置之间如何复用公共的配置? - -在 schema 实例化的过程中可以使用解包运算符 `**` 对公共的配置进行展开 - -```python -schema Boy: - name: str - age: int - hc: int - -schema Girl: - name: str - age: int - hc: int - -config = { - age = 18 - hc = 10 -} - -boy = Boy { - **config - name = "Bob" -} -girl = Girl { - **config - name = "Alice" -} -``` - -输出 YAML 为: - -```yaml -config: - age: 18 - hc: 10 -boy: - name: Bob - age: 18 - hc: 10 -girl: - name: Alice - age: 18 - hc: 10 -``` - -## 22. 基于 schema 编写配置时如何覆盖 schema 属性的默认值? - -在定义 schema 后,可以使用 schema 名称实例化相应的配置,使用 `:` 运算符对 schema 默认值进行 union, 使用 `=` 对 schema 默认值进行覆盖。对于 int/float/bool/str 类型的 schema 属性,union 和覆盖的效果相同; 对于 list/dict/schema 类型的 schema 属性,union 和覆盖的效果不同; - -```python -schema Meta: - labels: {str:str} = {"key1" = "value1"} - annotations: {str:str} = {"key1" = "value1"} - -meta = Meta { - labels: {"key2": "value2"} - annotations = {"key2" = "value2"} -} -``` - -输出 YAML 为: - -```yaml -meta: - labels: - key1: value1 - key2: value2 - annotations: - key2: value2 -``` - -## 23. 如何通过继承来复用 schema 定义? - -可以在 schema 定义处声明 schema 需要继承的 schema 名称: - -```python -# A person has a first name, a last name and an age. -schema Person: - firstName: str - lastName: str - # The default value of age is 0 - age: int = 0 - -# An employee **is** a person, and has some additional information. -schema Employee(Person): - bankCard: int - nationality: str - -employee = Employee { - firstName = "Bob" - lastName = "Green" - age = 18 - bankCard = 123456 - nationality = "China" -} -``` - -输出 YAML 为: - -```yaml -employee: - firstName: Bob - lastName: Green - age: 18 - bankCard: 123456 - nationality: China -``` - -注意: KCL 只允许 schema 单继承 - -## 24. 如何通过组合复用 schema 逻辑? - -可以使用 KCL schema mixin 复用 schema 逻辑,mixin 一般被用于 schema 内部属性的分离数据,和数据映射等功能,可以使 KCL 代码更具模块化和声明性。注意不同的 mixin 之间的混入属性不建议定义依赖关系,会使得 mixin 使用方式复杂,一般一个 mixin 中作不超过三个属性混入即可。 - -```python -schema Person: - mixin [FullNameMixin, UpperMixin] - - firstName: str - lastName: str - fullName: str - upper: str - -schema FullNameMixin: - fullName = "{} {}".format(firstName, lastName) - -schema UpperMixin: - upper = fullName.upper() - -person = Person { - firstName = "John" - lastName = "Doe" -} -``` - -输出 YAML 为: - -```yaml -person: - firstName: John - lastName: Doe - fullName: John Doe - upper: JOHN DOE -``` - -## 25. 如何导入其他 KCL 文件? - -通过 import 关键字可以导入其他 KCL 文件,KCL 配置文件被组织为模块。单个 KCL 文件被视为一个模块,目录被视为一个包,作为一个特殊的模块。import 关键字支持相对路径导入和绝对路径导入两种方式 - -比如对于如下目录结构: - -``` -. -└── root - ├── kcl.mod - ├── model - │ ├── model1.k - | ├── model2.k - │ └── main.k - ├── service - │ │── service1.k - │ └── service2.k - └── mixin - └── mixin1.k -``` - -对于 `main.k`, 相对路径导入和绝对路径导入分别可以表示为: - -```python -import service # 绝对路径导入, 根目录为kcl.mod所在的路径 -import mixin # 绝对路径导入, 根目录为kcl.mod所在的路径 - -import .model1 # 相对路径导入, 当前目录模块 -import ..service # 相对路径导入, 父目录 -import ...root # 相对路径导入, 父目录的父目录 -``` - -注意,对于 KCL 的入口文件 `main.k`, 其不能导入自身所在的文件夹,否则会发生循环导入错误: - -```python -import model # Error: recursively loading -``` - -## 26. 什么情况下可以省略 import ? - -除了 main 包当中的同一文件夹下的 KCL 可以相互引用而不需通过 import 相互引用,比如对于如下目录结构: - -``` -. -└── root - ├── kcl.mod - ├── model - │ ├── model1.k - | ├── model2.k - │ └── main.k - ├── service - │ │── service1.k - │ └── service2.k - └── mixin - └── mixin1.k -``` - -当 main.k 作为 KCL 命令行入口文件时, model 文件夹中的 main.k, model1.k 和 model2.k 中的变量不能相互引用,需要通过 import 导入,但是 service 文件夹中的 service1.k 和 service2.k 当中的变量可以互相引用,忽略 import - -service1.k - -```python -schema BaseService: - name: str - namespace: str -``` - -service2.k - -```python -schema Service(BaseService): - id: str -``` - -## 27. 有一行代码太长了,如何在语法正确的情况下优雅地换行? - -在 KCL 中可以使用续行符 `\` 进行换行, 并且在字符串中也可以使用 `\` 表示续行 - -长字符串连接续行举例: - -```python -longString = "Too long expression " + \ - "Too long expression " + \ - "Too long expression " -``` - -推导表达式续行举例: - -```python -data = [1, 2, 3, 4] -dataNew = [ - d + 2 \ - for d in data \ - if d % 2 == 0 -] -``` - -if 表达式续行举例: - -```python -condition = 1 -data1 = 1 \ - if condition \ - else 2 -data2 = 2 \ -if condition \ -else 1 -``` - -三引号字符串内部续行举例: - -```python -longString = """\ -The first line\ -The continue second line\ -""" -``` - -注意: 使用续行符 `\` 的同时缩进的保持, 如下所示: - -错误用例: - -```python -data1 = [ - 1, 2, - 3, 4 \ -] # Error, 需要保持右方括号]的缩进 - -data2 = [ - 1, 2, - 3, 4 -] # Error, 需要数字1和3的缩进统一 -``` - -正确用例: - -```python -data1 = [ - 1, 2, - 3, 4 -] # Right, 带缩进的列表定义 - -data2 = [ \ - 1, 2, \ - 3, 4 \ -] # Right, 使用续行符的列表定义, 实际效果是单行列表 - -data3 = [ \ - 1, 2, \ - 3, 4 \ -] # Right, 使用续行符的列表定义, 无需保持缩进, 实际效果是单行列表 -``` - -## 28. **, * 这些符号是什么意思? - -- `**`, `*` 出现在 dict/list 外部时分别表示乘方运算符和乘法运算符 - -```python -data1 = 2 ** 4 # 2的4次方等于16 -data2 = 2 * 3 # 2乘以3等于6 -``` - -- `**`, `*` 出现在 dict/list 内部时表示解包运算符,经常用于 list/dict 的解包和合并, 与 Python 当中的解包运算符用法相同 - -dict 的解包: - -```python -data = {"key1" = "value1"} -dataUnpack = {**data, "key2" = "value2"} # 将data解包合并入dataUnpack中, {"key1": "value1", "key2": "value2"} -``` - -list 的解包: - -```python -data = [1, 2, 3] -dataUnpack = [*data, 4, 5, 6] # 将data解包合并入dataUnpack中, [1, 2, 3, 4, 5, 6] -``` - -## 29. 如何取 list/dict/schema 的子元素 - -在 KCL 中可以使用 select 表达式或者 subscript 表达式取 list/dict/schema 的子元素 - -- 对于 list 类型,可以使用 `[]` 取列表中的某一个元素或者某一些元素 - -```python -data = [1, 2, 3] # 定义一个整数类型的数组 -theFirstItem = data[0] # 取数组中索引为0的元素,即第一个元素 1 -theSecondItem = data[1] # 取数组中索引为1的元素,即第一个元素 2 -``` - -注意:索引的取值不能超出列表的长度,否则会发生错误,可以使用 `len` 函数获得数组的长度 - -```python -data = [1, 2, 3] -dataLength = len(data) # 数组长度为3 -item = data[3] # 发生数组索引越界错误 -``` - -此外,还可以使用负数索引倒序获得列表中的元素 - -```python -data = [1, 2, 3] -item1 = data[-1] # 取数组中索引为-1的元素,即最后一个元素 3 -item2 = data[-2] # 取数组中索引为-2的元素,即倒数第二个元素 2 -``` - -综上,列表索引的取值范围为 `[-len, len - 1]` - -当想要取得列表的一部分时,可以在 `[]` 中使用切片表达式,其具体语法为 `[<列表开始索引>:<列表终止索引>:<列表遍历步长>]`,注意索引开始终止的取值区间为 `左闭右开[<列表开始索引>, <列表终止索引>)`,注意三个参数均可省略不写 - -```python -data = [1, 2, 3, 4, 5] -dataSlice0 = data[1:2] # 取列表中索引开始为 1, 终止索引为 2 的元素集合 [2] -dataSlice1 = data[1:3] # 取列表中索引开始为 1, 终止索引为 3 的元素集合 [2, 3] -dataSlice2 = data[1:] # 取列表中索引开始为 1, 终止索引为 最后一个索引 的元素集合 [2, 3, 4, 5] -dataSlice3 = data[:3] # 取列表中索引开始为 第一个索引, 终止索引为 3 的元素集合 [1, 2, 3] -dataSlice4 = data[::2] # 取列表中索引开始为 第一个索引, 终止索引为 最后一个索引 的元素集合(步长为2) [1, 3, 5] -dataSlice5 = data[::-1] # 反转一个列表,[5, 4, 3, 2, 1] -dataSlice6 = data[2:1] # 当开始,终止,步长三个参数组合不满足条件时返回空列表 [] - -``` - -- 对于 dict/schema 类型,可以使用 `[]` 和 `.` 两种方式取 dict/schema 中的子元素 - -```python -data = {key1: "value1", key2: "value2"} -data1 = data["key1"] # "value1" -data2 = data.key1 # "value1" -data3 = data["key2"] # "value2" -data4 = data.key2 # "value2" -``` - -```python -schema Person: - name: str = "Alice" - age: int = 18 - -person = Person {} -name1 = person.name # "Alice" -name2 = person["name"] # "Alice" -age1 = person.age # 18 -age2 = person.age # 18 -``` - -当键值在 dict 中不存在时,返回未定义值 `Undefined` - -```python -data = {key1 = "value1", key2 = "value2"} -data1 = data["not_exist_key"] # Undefined -data2 = data.not_exist_key # Undefined -``` - -可以使用 `in` 关键字判断某个键值是否在 dict/schema 中存在 - -```python -data = {key1 = "value1", key2 = "value2"} -exist1 = "key1" in data # True -exist2 = "not_exist_key" in data # False -``` - -当键值中存在 `.` 时或者需要运行时取一个键值变量对应的值时,只能使用 `[]` 方式,如无特殊情况,使用 `.` 即可: - -```python -name = "key1" -data = {key1 = "value1", key2 = "value2", "contains.dot" = "value3"} -data1 = data[name] # "value1" -data2 = data["contains.dot"] # "value3" -# 注意这样子是不对的 data3 = data.contains.dot -``` - -注意:上述取子元素的运算符不能对非 list/dict/schema 集合类型的值进行操作,比如整数,空值等。 - -```python -data = 1 -data1 = 1[0] # error -``` - -```python -data = None -data1 = None[0] # error -``` - -在取集合类型的子元素时往往要进行非空或者长度判断: - -```python -data = [] -item = data[0] if data else None -``` - -可以使用非空判断符 `?` 添加在 `[]`, `.` 的前面表示进行 if 非空判断,当不满足条件时返回 None,比如上述代码可以简化为: - -```python -data = [] -item1 = data?[0] # 当data为空时,返回空值 None -item2 = data?[0] or 1 # 当data为空时,返回空值 None, 如果不想返回 None, 还可与 or 运算符连用返回其他默认值 -``` - -使用 `?` 可以进行递归调用, 避免复杂繁琐的非空判断 - -```python -data = {key1.key2.key3 = []} -item = data?.key1?.key2?.key3?[0] -``` - -## 30. 如何在 KCL 代码中判断变量的类型 - -KCL typeof built-in 函数可以在该函数执行时立即返回一个变量的类型(字符串表示)用于类型断言 - -用法举例: - -```python -import sub as pkg - -_a = 1 - -t1 = typeof(_a) -t2 = typeof("abc") - -schema Person: - name?: any - -_x1 = Person{} -t3 = typeof(_x1) - -_x2 = pkg.Person{} -t4 = typeof(_x2) -t5 = typeof(_x2, full_name=True) - -t6 = typeof(_x1, full_name=True) - -# 输出 -# t1: int -# t2: str -# t3: Person -# t4: Person -# t5: sub.Person -# t6: __main__.Person -``` - -## 31. 关键字和 KCL 变量名冲突了可以怎么解决? - -对于与关键字冲突的标识符,可以在标识符前添加 `$` 前缀用于定义一个关键字标识符,比如如下代码中使用了 `if`, `else` 等关键字作为标识符并且可以得到相应的 YAML 输出 - -```python -$if = 1 -$else = "s" - -schema Data: - $filter: str = "filter" - -data = Data {} -``` - -输出 YAML: - -```yaml -data: - filter: filter -if: 1 -else: s -``` - -注意:在非关键字标识符前添加 `$` 前缀的效果与不添加相同 - -```python -_a = 1 -$_a = 2 # 等效于 `_a = 2` -``` - -## 32. KCL 的内置类型是 KCL 的关键字吗?是否可用于变量的定义 - -KCL 的内置类型包括 `int`, `float`, `bool` 和 `str` 四种类型,它们不是 KCL 的关键字,可用于变量的定义,比如如下代码: - -```py -int = 1 -str = 2 -``` - -输出 YAML 为: - -```yaml -int: 1 -str: 2 -``` - -注意:如无特殊需求,不建议变量的名称取这些内置类型,因为在有些语言当中,它们作为关键字存在 - -## 33. 如何在 KCL 中实现类似 Enum 枚举的功能 - -有两种方式可以在 KCL 中实现 Enum 枚举的方式 - -- (推荐)使用**字面值类型**的**联合类型** - -```python -schema Person: - name: str - gender: "Male" | "Female" - -person = Person { - name = "Alice" - gender = "Male" # gender 只能为 "Male" 或者 "Female" -} -``` - -一个复杂例子 - -```python -schema Config: - colors: ["Red" | "Yellow" | "Blue"] # colors 是一个枚举数组 - -config = Config { - colors = [ - "Red" - "Blue" - ] -} -``` - -- 使用 schema 的 check 表达式 - -```python -schema Person: - name: str - gender: "Male" | "Female" - - check: - gender in ["Male", "Female"] - -person = Person { - name = "Alice" - gender = "Male" # gender 只能为 "Male" 或者 "Female" -} -``` - -## 34. 如何求字典 dict 的长度 - -在 KCL 中可以使用 `len` 内置函数直接求 dict 的长度 - -```python -len1 = len({k1: "v1"}) # 1 -len2 = len({k1: "v1", k2: "v2"}) # 2 -varDict = {k1 = 1, k2 = 2, k3 = 3} -len3 = len(varDict) # 3 -``` - -此外,使用 `len` 函数还可以求 `str` 和 `list` 类型长度 - -```python -len1 = len("hello") # 5 -len2 = len([1, 2, 3]) # 3 -``` - -## 35. 如何在 KCL 中编写带条件的配置 - -在 KCL 中,除了支持在顶级的语句中书写 `if-elif-else` 条件表达式以外,还支持在 KCL 复杂结构(list/dict/schema)中书写条件表达式,支持带条件的配置书写。 - -```python -x = 1 -# List 结构中的 if 条件语句 -dataList = [ - if x == 1: 1 -] -# Dict 结构中的 if 条件语句 -dataDict = { - if x == 1: key1 = "value1" # 可以同一行书写 - elif x == 2: - key2 = "value2" # 可以跨行书写 -} -# Schema 结构中的 if 条件语句 -schema Config: - id?: int -env = "prod" -dataSchema = Config { - if env == "prod": - id = 1 - elif env == "pre": - id = 2 - elif env == "test": - id = 3 -} -``` - -## 36. KCL 中的 == 运算符会作深度比较嘛? - -KCL 中的 `==` 运算符 - -- 对于基本类型 `int`, `float`, `bool`, `str` 的变量是直接比较它们的值是否相等 -- 对于复合类型 `list`, `dict`, `schema` 的变量会深度递归地比较其中的子元素是否相等 - - `list` 类型深度递归递归比较每个索引的值以及长度 - - `dict`/`schema` 类型深度递归比较每个属性的值(与属性出现的顺序无关) - -```python -print([1, 2] == [1, 2]) # True -print([[0, 1], 1] == [[0, 1], 1]) # True -print({k1 = 1, k2 = 2} == {k2 = 2, k1 = 1}) # True - -print([1, 2] == [1, 2, 3]) # False -print({k1 = 1, k2 = 2, k3 = 3} == {k2 = 2, k1 = 1}) # False -``` - -## 37. 如何对 KCL 中已有的配置块进行修改 - -在 KCL 中,存在三种**属性运算符** `=`、`+=`、`:`,可以用来对已有配置块进行修改,并且可以使用**解包运算符** `**` 等"继承"一个配置块的所有属性字段和值。 - -- `=` 属性运算符表示覆盖,使用 `=` 运算符可以对属性进行有优先级的覆盖/删除,(如果是用 `Undefined` 覆盖则表示删除) -- `+=` 属性运算符表示添加,一般用于对 list 类型的属性添加子元素,`+=` 属性运算符后跟的操作数类型也只能为 list 类型 -- `:` 属性运算符表示幂等合并,当值发生冲突时进行报错,不冲突时进行合并 - -### 覆盖属性运算符= - -最常使用的属性运算符是 `=`,表示一个属性的赋值,多次对同一个属性进行使用时表示覆盖,对于 `{}` 外的全局变量或者 `{}` 内的属性均表示使用值覆盖这个全局变量或者属性 - -```python -data = { # 定义一个字典类型的变量 data - a = 1 # 使用 = 在 data 中声明一个值为 1 的属性 a - b = 2 # 使用 = 在 data 中声明一个值为 1 的属性 b -} # 最终 data 的值为 {"a": 1, "b": 1} -``` - -在 schema 实例化处也可以使用覆盖属性运算符实现对 schema 默认值的覆盖效果,一般在创建新的 schema 实例时如无特殊的需求,一般使用 `=` 即可 - -```python -schema Person: - name: str = "Alice" # schema Person 的 name 属性具有默认值 "Alice" - age: int = 18 # schema Person 的 age 属性具有默认值 18 - -bob = Person { - name = "Bob" # "Bob" -> "Alice", 属性 name 的值 "Bob" 的值会覆盖 schema Person name 属性的默认值 "Alice" - age = 10 # 10 -> 18, 属性 age 的值 10 的值会覆盖 schema Person age 属性的默认值 18 -} # 最终 bob 的值为 {"name": "Bob", age: 10} -``` - -### 插入属性运算符 += - -插入属性运算符表示对一个属性的值进行原地添加,比如向一个 list 类型的属性添加新的元素 - -```python -data = { - args = ["kcl"] # 使用 = 在 data 中声明一个值为 ["kcl"] 的属性 args - args += ["-Y", "settings.yaml"] # 使用 += 运算符向属性 args 中添加两个元素"-Y", "settings.yaml" -} # 最终 data 的值为 {"args": ["kcl", "-Y", "settings.yaml"]} -``` - -### 合并属性运算符: - -合并属性运算符表示对一个属性的不同配置块值进行幂等的合并,当需要合并的值发生冲突时进行报错,多用于复杂配置合并场景 - -```python -data = { - labels: {key1: "value1"} # 定义一个 labels, 它的类型为 dict, 值为 {"key1": "value1"} - labels: {key2: "value2"} # 使用 : 将 labels 不同的配置值进行合并 -} # 最终 data 的值为 {"labels": {"key1": "value1", "key2": "value2"}} -``` - -合并属性运算符属于幂等运算符,需要合并的配置块的书写顺序不影响其最终结果,比如上述例子中的两个 `labels` 属性也可以调换顺序书写 - -```python -data = { # 同一个属性 labels 的合并书写顺序不影响最终结果 - labels: {key2: "value2"} # 定义一个 labels, 它的类型为 dict, 值为 {"key2": "value2"} - labels: {key1: "value1"} # 使用 : 将 labels 不同的配置值进行合并 -} # 最终 data 的值为 {"labels": {"key1": "value1", "key2": "value2"}} -``` - -注意:合并属性运算符会对合并的值进行冲突检查,当需要合并的配置值发生冲突时进行报错 - -```python -data = { - a: 1 # a 的值为 1 - a: 2 # Error: a 的值 2 不能与 a 的值 1 进行合并,因为其结果存在冲突,且合并是不可交换的 -} -``` - -```python -data = { - labels: {key: "value"} - labels: {key: "override_value"} # Error: 两个 labels 的 key 属性的值 "value" 和 "override_value" 是冲突的,不可合并 -} -``` - -合并运算符对不同类型的使用方式不同 - -- 不同类型的属性不能进行合并 -- 当属性为 int/float/str/bool 等基本类型时,运算符会判断需要合并的值是否相等,不相等时发生合并冲突错误 - -```python -data = { - a: 1 - a: 1 # Ok - a: 2 # Error -} -``` - -- 当属性为 list 类型时 - - 当需要合并的两个 list 长度不相等时,发生合并冲突错误 - - 当需要合并的两个 list 长度相等时,按照索引递归地合并 list 当中的每一个元素 - -```python -data = { - args: ["kcl"] - args: ["-Y", "settings.yaml"] # Error: 两个 args 属性的长度不相同,不能进行合并 - env: [{key1: "value1"}] - env: [{key2: "value2"}] # Ok: 最终 env 属性的值为 [{"key1": "value1"}, {"key2": "value2"}] -} -``` - -- 当属性为 dict/schema 类型时,按照 key 递归地合并 dict/schema 当中的每一个元素 - -```python -data = { - labels: {key1: "value1"} - labels: {key2: "value2"} - labels: {key3: "value3"} -} # 最终 data 的值为 {"labels": {"key1": "value1", "key2": "value2", "key3": "value3"}} -``` - -- 任意类型的属性与 None/Undefined 合并的结果都是其自身 - -```python -data = { - args: ["kcl"] - args: None # Ok - args: Undefined #Ok -} # 最终 data 的值为 {"args": ["kcl"]} -``` - -支持顶级变量使用 `:` 属性声明与合并(仍然可使用 `config = Config {}` 的方式声明一个配置块) - -```python -schema Config: - id: int - value: str - -config: Config { - id: 1 -} -config: Config { - value: "1" -} -""" -此处定义了两个 Config 配置块,使用 : 运算符将可以两个配置块合并在一起,其合并的等效代码如下: -config: Config { - id: 1 - value: "1" -} -""" -``` - -综上所述,合并属性运算符 `:` 的使用场景主要为复杂数据结构 list/dict/schema 的合并操作,一般情况如无特殊需求使用 `=` 和 `+=` 两种属性运算符即可,因此属性运算符的最佳实践如下 - -- 对于基本类型,采用 `=` 运算符 -- 对于 list 类型,一般采用 `=` 和 `+=` 运算符,使用 `=` 表示完全覆盖 list 属性,使用 `+=` 表示向 list 中添加元素 -- 对于 dict/schema 类型,一般采用 `:` 运算符 - -此外,当已经存在一个配置时,可以使用解包运算符 `**` 获得此配置的所有字段值并对其中的字段使用不同属性运算符进行修改,并获得一个新的配置 - -```python -configBase = { - intKey = 1 # 一个 int 类型的属性 - floatKey = 1.0 # 一个 float 类型的属性 - listKey = [0] # 一个 list 类型的属性 - dictKey = {key1: "value1"} # 一个 dict 类型的属性 -} -configNew = { - **configBase # 将 configBase 解包内联到 configNew 中 - intKey = 0 # 使用 覆盖属性运算符 = 将 intKey 属性覆盖为 1 - floatKey = Undefined # 使用 覆盖属性运算符 = 删除 floatKey 属性 - listKey += [1] # 使用 添加属性运算符 += 为 listKey 属性尾部添加一个属性 1 - dictKey: {key2: "value2"} # 使用 合并属性运算符 : 为 dictKey 属性扩展一个键-值对 -} -``` - -输出的 YAML 结果为: - -```yaml -configBase: - intKey: 1 - floatKey: 1.0 - listKey: - - 0 - dictKey: - key1: value1 -configNew: - intKey: 0 - listKey: - - 0 - - 1 - dictKey: - key1: value1 - key2: value2 -``` - -或者可以使用 `|` 运算符对两个配置块合并: - -```python -configBase = { - intKey = 1 # 一个 int 类型的属性 - floatKey = 1.0 # 一个 float 类型的属性 - listKey = [0] # 一个 list 类型的属性 - dictKey = {key1: "value1"} # 一个 dict 类型的属性 -} -configNew = configBase | { # 使用 | 进行合并 - intKey = 0 # 使用 覆盖属性运算符 = 将 intKey 属性覆盖为 1 - floatKey = Undefined # 使用 覆盖属性运算符 = 删除 floatKey 属性 - listKey += [1] # 使用 添加属性运算符 += 为 listKey 属性尾部添加一个属性 1 - dictKey: {key2: "value2"} # 使用 合并属性运算符 : 为 dictKey 属性扩展一个键-值对 -} -``` - -输出的 YAML 结果为: - -```yaml -configBase: - intKey: 1 - floatKey: 1.0 - listKey: - - 0 - dictKey: - key1: value1 -configNew: - intKey: 0 - listKey: - - 0 - - 1 - dictKey: - key1: value1 - key2: value2 -``` - -### KCL 发生 conflicting values on the attribute 'attr' between {value1} and {value2} 错误的解决方式 - -当 KCL 发生类似 conflicting values on the attribute 'attr' between {value1} and {value2} 错误时,一般是合并属性运算符 `:` 的使用问题,表明 `value1` 和 `value2` 配置进行合并时在属性 `attr` 处发生了冲突错误。一般情况将 value2 的 attr 属性修改为其他属性运算符即可,使用 `=` 表示覆盖,使用 `+=` 表示添加 - -比如对于如下代码: - -```python -data = {k: 1} | {k: 2} # Error: conflicting values on the attribute 'k' between {'k': 1} and {'k': 2} -``` - -则可以使用 `=` 属性运算符修改为如下形式 - -```python -data = {k: 1} | {k = 2} # Ok: the value 2 will override the value 1 through the `=` operator -``` - -## 38. KCL 中如何同时遍历多个元素 - -KCL 中可以使用 for 推导表达式遍历多个元素 - -- 举例 1: 使用 for 进行 2 维元素遍历 - -```python -dimension1 = [1, 2, 3] # dimension1 列表的长度是 3 -dimension2 = [1, 2, 3] # dimension2 列表的长度是 3 -matrix = [x + y for x in dimension1 for y in dimension2] # matrix 列表的长度是 9 = 3 * 3 -``` - -输出结果如下: - -```yaml -dimension1: -- 1 -- 2 -- 3 -dimension2: -- 1 -- 2 -- 3 -matrix: -- 2 -- 3 -- 4 -- 3 -- 4 -- 5 -- 4 -- 5 -- 6 -``` - -- 举例 2: 使用 for 循环配合 zip 内置函数按照索引一一对应对多个列表进行遍历 - -```python -dimension1 = [1, 2, 3] # dimension1 列表的长度是 3 -dimension2 = [1, 2, 3] # dimension2 列表的长度是 3 -dimension3 = [d[0] + d[1] for d in zip(dimension1, dimension2)] # dimension3 列表的长度是 3 -``` - -输出结果如下: - -```yaml -dimension1: -- 1 -- 2 -- 3 -dimension2: -- 1 -- 2 -- 3 -dimension3: -- 2 -- 4 -- 6 -``` - -## 39. KCL 中如何为 option 函数设定默认值 - -在 KCL 中,当 option 属性的值为 None/Undefined 空时,可以使用逻辑或 `or` 直接指定一个默认值 - -```python -value = option("key") or "default_value" # 当 key 的值存在时,取 option("key") 的值,否则取 "default_value" -``` - -或者使用 option 函数的 default 参数 - -```python -value = option("key", default="default_value") # 当 key 的值存在时,取 option("key") 的值,否则取 "default_value" -``` - -## 40. KCL 中 schema 怎么检查多个属性不能同时为空 - -在 KCL 中,对于 schema 的单个属性不能为空可以使用属性非空标记 - -```python -schema Person: - name: str # required. name 不能为空 - age: int # required. age 不能为空 - id?: int # optional. id 可以留空 -``` - -而对于需要检查 schema 属性不能同时为空或者只能有一者为空的情况时,需要借助 schema check 表达式进行书写,下面以同一个 schema Config 的两个属性 a, b 为例进行说明 - -- Config 的 a, b 属性不能同时为空 - -```python -schema Config: - a?: str - b?: str - - check: - a or b, "a属性和b属性不能同时为空" -``` - -- Config 的 a, b 属性只能有一个为空或者都为空(不能同时存在或不为空) - -```python -schema Config: - a?: str - b?: str - - check: - not a or not b, "a属性和b属性不能同时填写" -``` - -## 41. KCL 中 import 了某个文件但是找不到其同目录下其他 KCL 文件定义的 schema 可能是什么原因 - -可能是与使用 import 仅导入了这个文件夹的这一个文件导致,在 KCL 中,import 支持导入整个文件夹,也支持导入某一个文件夹下的的某一个 KCL 文件,比如对于如下目录结构 - -``` -. -├── kcl.mod -├── main.k -└── pkg - ├── pkg1.k - ├── pkg2.k - └── pkg3.k -``` - -在根目录下存在入口文件 main.k,可以在 main.k 中书写如下代码导入整个 pkg 文件夹,此时 pkg 文件夹下的所有 schema 定义互相可见 - -```python -import pkg -``` - -还可以书写如下代码导入单个文件 pkg/pkg1.k,此时 pkg1.k 不能找到其他文件即 pkg2.k/pkg3.k 下的 schema 定义 - -```python -import pkg.pkg1 -``` - -## 42. KCL 中的缩进是如何处理的? - -在 KCL 中,在出现冒号 `:`、中括号对 `[]` 以及大括号对 `{}` 时,一般需要使用换行 + 缩进,同一缩进级的缩进空格数需要保持一致,一个缩进级一般采用 4 个空格表示 - -- 冒号 `:` 后跟换行 + 缩进 - -```python -"""if 语句中的缩进""" -_a = 1 -_b = 1 -if _a >= 1: # 冒号后跟换行+缩进 - if _a > 8: - _b = 2 - elif a > 6: - _b = 3 - -"""schema 定义中的缩进""" -schema Person: # 冒号后跟换行+缩进 - name: str - age: int -``` - -- 中括号对 `[]` 后跟换行 + 缩进 - -```python -data = [ # 左中括号 [ 后跟换行+缩进 - 1 - 2 - 3 -] # 右中括号 ] 前取消缩进 -``` - -```python -data = [ # 左中括号 [ 后跟换行+缩进 - i * 2 for i in range(5) -] # 右中括号 ] 前取消缩进 -``` - -- 大括号对 `{}` 后跟换行 + 缩进 - -```python -data = { # 左大括号 { 后跟换行+缩进 - k1 = "v1" - k2 = "v2" -} # 右大括号 } 前取消缩进 -``` - -```python -data = { # 左大括号 { 后跟换行+缩进 - str(i): i * 2 for i in range(5) -} # 右大括号 } 前取消缩进 -``` - -## 43. 如何为 KCL 代码编写简单的测试? - -KCL 目前的版本还不支持内部程序调试,可以使用 assert 语句以及 print 函数实现数据的断言和打印查看 - -```python -a = 1 -print("The value of a is", a) -assert a == 1 -``` - -此外,还可以借助 kcl-test 测试工具编写 KCL 内部编写测试用例 - -假设有 hello.k 文件,代码如下: - -```python -schema Person: - name: str = "kcl" - age: int = 1 - -hello = Person { - name = "hello kcl" - age = 102 -} -``` - -构造 hello_test.k 测试文件,内容如下: - -```python -schema TestPerson: - a = Person{} - assert a.name == 'kcl' - -schema TestPerson_age: - a = Person{} - assert a.age == 1 - -schema TestPerson_ok: - a = Person{} - assert a.name == "kcl" - assert a.age == 1 -``` - -然后在目录下执行 kcl-test 命令: - -``` -$ kcl-test -ok /pkg/to/app [365.154142ms] -$ -``` - -## 44. KCL 中如何定义函数或定义方法? - -schema 结构在一定程度上充当了函数的功能,并且这个函数具有多个输入参数和多个输出参数的能力,比如如下代码可以实现一个斐波那契数列的功能: - -```python -schema Fib: - n: int - value: int = 1 if n <= 2 else (Fib {n: n - 1}).value + (Fib {n: n - 2}).value - -fib8 = (Fib {n: 8}).value -``` - -输出结果为: - -```yaml -fib8: 21 -``` - -一个合并列表为字典的 schema 函数 - -```python -schema UnionAll[data, n]: - _?: [] = data - value?: {:} = ((UnionAll(data=data, n=n - 1) {}).value | data[n] if n > 0 else data[0]) if data else {} - -schema MergeList[data]: - """Union一个列表中的所有元素返回合并字典 - - [{"key1": "value1"}, {"key2": "value2"}, {"key3": "value3"}] -> {"key1": "value1", "key2": "value2", "key3": "value3"} - """ - _?: [] = data - value?: {:} = (UnionAll(data=data, n=len(data) - 1) {}).value if data else {} -``` - -此外,KCL 支持使用 `lambda` 关键字定义一个函数: - -```python -func = lambda x: int, y: int -> int { - x + y -} -a = func(1, 1) # 2 -``` - -lambda 函数具有如下特性: - -- lambda 函数将最后一个表达式的值作为函数的返回值,空函数体返回 None。 -- 返回值类型注解可以省略,返回值类型为最后一个表达式值的类型 -- 函数体中没有与顺序无关的特性,所有的表达式都是按顺序执行的 - -```python -_func = lambda x: int, y: int -> int { - x + y -} # Define a function using the lambda expression -_func = lambda x: int, y: int -> int { - x - y -} # Ok -_func = lambda x: int, y: int -> str { - str(x + y) -} # Error (int, int) -> str can't be assigned to (int, int) -> int -``` - -lambda 函数对象不能参与任何计算,只能在赋值语句和调用语句中使用。 - -```python -func = lambda x: int, y: int -> int { - x + y -} -x = func + 1 # Error: unsupported operand type(s) for +: 'function' and 'int(1)' -``` - -```python -a = 1 -func = lambda x: int { - x + a -} -funcOther = lambda f, para: int { - f(para) -} -r = funcOther(func, 1) # 2 -``` - -输出为: - -```python -a: 1 -r: 2 -``` - -可以定义一个匿名函数并直接调用 - -```python -result = (lambda x, y { - z = 2 * x - z + y -})(1, 1) # 3 -``` - -可以在 for 循环使用使用匿名函数 - -```python -result = [(lambda x, y { - x + y -})(x, y) for x in [1, 2] for y in [1, 2]] # [2, 3, 3, 4] -``` - -可以在 KCL schema 中定义并使用函数 - -```python -_funcOutOfSchema = lambda x: int, y: int { - x + y -} -schema Data: - _funcInSchema = lambda x: int, y: int { - x + y - } - id0: int = _funcOutOfSchema(1, 1) - id1: int = _funcInSchema(1, 1) - id2: int = (lambda x: int, y: int { - x + y - })(1, 1) -``` - -输出 YAML 为: - -```yaml -data: - id0: 2 - id1: 2 - id2: 2 -``` - -## 45. 为什么变量赋值为枚举类型(字面值联合类型)时会报错 - -在 KCL 中,被定义为字面值联合类型的属性,在赋值时仅允许接收一个字面值或者同为字面值联合类型的变量,比如如下代码是正确的: - -```python -schema Data: - color: "Red" | "Yellow" | "Blue" - -data = Data { - color = "Red" # Ok, 赋值为 "Red"、"Yellow" 和 "Blue" 均可 -} -``` - -然而以下代码是错误的: - -```python -schema Data: - color: "Red" | "Yellow" | "Blue" - -_color = "Red" - -data = Data { - color = _color # Error: expect str(Red)|str(Yellow)|str(Blue), got str -} -``` - -这是因为没有为变量 `_color` 申明一个类型,它会被 KCL 编译器推导为 `str` 字符串类型,因此当一个 “较大” 的类型 `str` 赋值为一个 “较小” 的类型时 `"Red" | "Yellow" | "Blue"` 会报错,一个解决方式是为 `_color` 变量声明一个类型,以下代码是正确的: - -```python -schema Data: - color: "Red" | "Yellow" | "Blue" - -_color: "Red" | "Yellow" | "Blue" = "Red" - -data = Data { - color = _color # Ok -} -``` - -进一步地,我们可以使用类型别名来简化枚举(字面值联合类型的书写),比如如下代码: - -```python -type Color = "Red" | "Yellow" | "Blue" # 定义一个类型别名,可以在不同的地方重复使用,降低代码书写量 - -schema Data: - color: Color - -_color: Color = "Red" - -data = Data { - color = _color # Ok -} -``` - -## 46. 过程式的 for 循环 - -KCL 中为何不支持过程式的 for 循环! - -KCL 提供了推导表达式以及 all/any/map/filter 表达式等用于对一个集合元素进行处理,满足大部分需求,提供过程式的 for 循环体从目前场景看需求暂时不强烈,因此暂未提供过程式的 for 循环支持 - -此外,KCL 中虽然没有支持过程式的 for 循环,但是可以通过 for 循环和 lambda 函数“构造”相应的过程式 for 循环 - -```python -result = [(lambda x: int, y: int -> int { - # 在其中书写过程式的 for 循环逻辑 - z = x + y - x * 2 -})(x, y) for x in [1, 2] for y in [1, 2]] # [2, 2, 4, 4] -``` - -## 47. 默认变量不可变 - -KCL 变量不可变性是指 KCL 顶层结构中的非下划线 `_` 开头的导出变量初始化后不能被改变。 - -```python -schema Person: - name: str - age: int - -a = 1 # a会输出到YAML中,一旦赋值不可修改 -_b = 1 # _b变量以下划线开头命名,不会输出到YAML中, 可多次赋值修改 -_b = 2 -alice = Person { - name = "Alice" - age = 18 -} -``` - -规定变量不可变的方式分为两类: - -- schema 外的非下划线顶层变量 - -```python -a = 1 # 不可变导出变量 -_b = 2 # 可变非导出变量 -``` - -## 48. 如何通过编写 KCL 插件进行扩展? - -KCL 插件在 KCLVM 的 plugins 子目录(通常安装在 `$HOME/.kusion/kclvm/plugins` 目录),或者通过 `$KCL_PLUGINS_ROOT` 环境变量设置(环境变量优先级更高)。对于插件开发人员,插件都在 [Git 仓库](https://github.com/KusionStack/kcl-plugin)管理,可以将插件仓库克隆到该目录进行开发。 - -KCL 内置了 kcl-plugin 脚手架命令用于辅助用户使用 Python 语言编写 KCL 插件,以便在 KCL 文件当中调用相应的插件对 KCL 语言本身进行增强,比如访问网络,读写 IO,CMDB 查询和加密解密等功能。 - -``` -usage: kcl-plugin [-h] {list,init,info,gendoc,test} ... - -positional arguments: - {list,init,info,gendoc,test} - kcl plugin sub commands - list list all plugins - init init a new plugin - info show plugin document - gendoc gen all plugins document - test test plugin - -optional arguments: - -h, --help show this help message and exit -``` - -比如想要开发一个名为 io 插件,就可以使用如下命令成功新建一个 io 插件 - -``` -kcl-plugin init io -``` - -然后可以使用如下命令获得 plugin 的根路径并 cd 到相应的 io 插件目录进行开发 - -``` -kcl-plugin info -``` - -比如想要开发一个读文件的函数 read_file,就可以在 `$plugin_root/io` 的 `plugin.py` 中进行 python 代码编写: - -```python -# Copyright 2020 The KCL Authors. All rights reserved. - -import pathlib - -INFO = { - 'name': 'io', - 'describe': 'my io plugin description test', - 'long_describe': 'my io plugin long description test', - 'version': '0.0.1', -} - - -def read_file(file: str) -> str: - """Read string from file""" - return pathlib.Path(file).read_text() - -``` - -另外可以在 `plugin_test.py` 中编写相应的测试函数,也可以直接编写如下所示 KCL 文件进行测试: - -```python -import kcl_plugin.io - -text = io.read_file('test.txt') -``` - -还可以使用 info 命令查看 io 插件的信息 - -``` -kcl-plugin info io -``` - -``` -{ - "name": "io", - "describe": "my io plugin description test", - "long_describe": "my io plugin long description test", - "version": "0.0.1", - "method": { - "read_file": "Read string from file" - } -} -``` - -最后将编写测试完成的插件在 `kcl_plugins` 仓库提 MR 合并即可 - diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/support/faq-yaml.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/support/faq-yaml.md deleted file mode 100644 index a6200f7e..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/support/faq-yaml.md +++ /dev/null @@ -1,104 +0,0 @@ ---- -sidebar_position: 3 ---- - -# YAML 语法 - -## 1. YAML 字符串使用单引号和双引号的区别是什么? - -- YAML 双引号字符串是唯一能够表达任意字符串的样式,通过使用 `\` 转义字符,比如使用 `\"` 转义双引号 `"`,使用 `\\` 转义反斜杠 `\`,并且可以使用单个反斜杠 `\` 作为双引号字符串的续行符 -- YAML 单引号字符串与 YAML 双引号字符串不同的是可以自由地使用 `\` 和 `"` 而不需要转义,但是使用两个单引号 `''` 转义单引号 `'` 字符 - -比如对于如下的例子,三个字符串变量的内容是相同的 - -```yaml -string1: 'here '' s to "quotes"' -string2: "here's to \"quotes\"" -string3: here's to "quotes" -``` - -因此,KCL 输出 YAML 字符串的策略是当字符串内容出现单引号时,优先输出无引号字符串或双引号字符串,其他情况输出单引号字符串以避免理解上的负担。 - -更多细节可参考: [YAML 规范 v1.2](https://yaml.org/spec/1.2.1/) - -## 2. YAML 中出现的 | - + > 等符号是什么含义? - -在使用 KCL 多行字符串(使用三引号括起来的字符串),输出的 YAML 经常会携带一些特殊的记号,如 `|`,`-`,`+` 和 `>` 等,这些记号通常为 YAML 多行字符串的表示方法,比如对于如下 KCL 代码: - -```python -data = """This is a KCL multi line string (the first line) -This is a KCL multi line string (the second line) -This is a KCL multi line string (the third line) - - -""" -var = 1 -``` - -输出 YAML 为: - -```yaml -data: |+ - This is a KCL multi line string (the first line) - This is a KCL multi line string (the second line) - This is a KCL multi line string (the third line) - - -var: 1 -``` - -- `|` 表示**块字符串样式**,用于表示一个多行字符串,其中的所有换行符都表示字符串真实的换行; -- `>` 表示**块折叠样式**,在其中所有的换行符将被空格替换; -- `+` 和 `-` 用于控制在字符串末尾使用换行符的情况。默认情况为字符串末尾保留单个换行符,如果要删除所有换行符,可以在样式指示符 `|` 或 `>` 后面放置一个 `-` 来完成,如果要保留末尾的换行符,则需要在 `|` 或 `>` 后面放置一个 `+` - -更多细节可参考: [YAML 多行字符串](https://yaml-multiline.info/) 和 [YAML 规范 v1.2](https://yaml.org/spec/1.2.1/) - -## 3. YAML 中在 | - + > 等符号之后出现的数字是什么含义? - -数字表示 YAML 当中的**显式缩进指示符**。对于 YAML 中的长字符串,YAML 通常第一个非空行确定字符串的缩进级别,而当第一个非空行前面具有非前导字符时,比如换行符,YAML 要求必须使用**显式缩进指示符**来指定内容的缩进级别,比如 `|2` 和 `|1` 等 - -比如对于如下 KCL 代码: - -```python -longStringStartWithEndline = """ -This is the second line -This is the third line -""" - -``` - -```yaml -longStringStartWithEndline: |2 - - This is the second line - This is the third line -``` - -如果不需要长字符串开头的空行或换行符,则可以以如下两种方式进行 KCL 长字符串书写 - -- 长字符串从第 1 行开始书写 - -```python -longString = """This is the second line -This is the third line -""" -``` - -- 使用续行符 - -```python -longString = """\ -This is the second line -This is the third line -""" -``` - -以上两种方式输出的 YAML 均为: - -```yaml -longString: | - This is the second line - This is the third line -``` - -更多细节可参考: [YAML 规范 v1.2](https://yaml.org/spec/1.2.1/) diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/support/install-error.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/support/install-error.md deleted file mode 100644 index fec49aaf..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/support/install-error.md +++ /dev/null @@ -1,35 +0,0 @@ -# 安装问题 - -## 1. 找不到 `libintl.dylib` - -这个问题是有些工具底层依赖了 Gettext 库,但是 macOS 默认没有这个库导致。可以尝试通过以下方式解决: - -1. (非 macOS m1 跳过该步)对于 macOS m1 操作系统,确保你有一个 homebrew arm64e-version 安装在 /opt/homebrew, 否则通过如下命令安装 arm 版本的 brew - -``` -/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" -# 添加到 path -export PATH=/opt/homebrew/bin:$PATH -``` - -2. `brew install gettext` -3. 确保 `/usr/local/opt/gettext/lib` 目录存在 `libintl.8.dylib` -4. 如果 brew 安装到其他目录下,可以通过拷贝等方式创建库到对应目录下 - -## 2. macOS 系统 SSL 相关错误 - -Openssl dylib 库找不到或 SSL module is not available 的问题 - -1. (非 macOS m1 跳过该步)对于 macOS m1 操作系统,确保你有一个 homebrew arm64e-version 安装在 /opt/homebrew, 否则通过如下命令安装 arm 版本的 brew - -``` -/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" -# 添加到 path -export PATH=/opt/homebrew/bin:$PATH -``` - -2. 通过 brew 安装 openssl 1.1 版本 - -``` -brew install openssl@1.1 -``` diff --git a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/support/support.md b/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/support/support.md deleted file mode 100644 index 59edb274..00000000 --- a/i18n/zh-CN/docusaurus-plugin-content-docs/current/user_docs/support/support.md +++ /dev/null @@ -1,4 +0,0 @@ -# 常见问题 - -KusionStack 安装、使用过程中遇到的常见问题,包括基本概念解释、KCL 语法、KCL 语言设计、命令行工具和 YAML 等常见问题。 - diff --git a/i18n/zh-CN/docusaurus-theme-classic/footer.json b/i18n/zh-CN/docusaurus-theme-classic/footer.json deleted file mode 100644 index 966021aa..00000000 --- a/i18n/zh-CN/docusaurus-theme-classic/footer.json +++ /dev/null @@ -1,42 +0,0 @@ -{ - "link.title.Document": { - "message": "文档", - "description": "The title of the footer links column with title=Document in the footer" - }, - "link.title.Resource": { - "message": "资源", - "description": "The title of the footer links column with title=Resource in the footer" - }, - "link.title.More": { - "message": "更多", - "description": "The title of the footer links column with title=More in the footer" - }, - "link.item.label.Github": { - "message": "Github", - "description": "The label of footer link with label=Github linking to https://github.com/KusionStack" - }, - "link.item.label.Slack": { - "message": "Slack", - "description": "The label of footer link with label=Slack linking to https://KusionStack.slack.com" - }, - "link.item.label.Changelog": { - "message": "更新日志", - "description": "The label of footer link with label=Changelog linking to /changelog" - }, - "link.item.label.Introduction": { - "message": "简介", - "description": "The label of footer link with label=Introduction linking to https://kusionstack.io/zh-CN/docs/user_docs/intro/kusion-intro" - }, - "link.item.label.Blog": { - "message": "博客", - "description": "The label of footer link with label=Blog linking to https://kusionstack.io/zh-CN/blog" - }, - "link.item.label.FAQ": { - "message": "常见问题", - "description": "The label of footer link with label=FAQ linking to https://kusionstack.io/zh-CN/docs/user_docs/support" - }, - "copyright": { - "message": "版权 © 2022 KusionStack Authors", - "description": "The footer copyright" - } -} diff --git a/i18n/zh-CN/docusaurus-theme-classic/navbar.json b/i18n/zh-CN/docusaurus-theme-classic/navbar.json deleted file mode 100644 index b723d723..00000000 --- a/i18n/zh-CN/docusaurus-theme-classic/navbar.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "title": { - "message": "KusionStack", - "description": "The title in the navbar" - }, - "item.label.UserDoc": { - "message": "使用文档", - "description": "Navbar item with label UserDoc" - }, - "item.label.Reference": { - "message": "参考手册", - "description": "Navbar item with label Reference" - }, - "item.label.DevDoc": { - "message": "开发文档", - "description": "Navbar item with label DevDoc" - }, - "item.label.Governance": { - "message": "治理", - "description": "Navbar item with label Governance" - }, - "item.label.Blog": { - "message": "博客", - "description": "Navbar item with label Blog" - } -} diff --git a/i18n/zh/code.json b/i18n/zh/code.json new file mode 100644 index 00000000..84c5ac51 --- /dev/null +++ b/i18n/zh/code.json @@ -0,0 +1,463 @@ +{ + "Getting Started": { + "message": "Getting Started", + "description": "homepage getting started button" + }, + "Install": { + "message": "Install", + "description": "homepage install button" + }, + "home.quickstart": { + "message": "All About Your Modern Apps by Platform Engineering" + }, + "home.easyshipping": { + "message": "Dynamic Configuration Management" + }, + "home.easyshipping.1": { + "message": "Manage all application operations in one place, in a unified easy way" + }, + "home.easyshipping.2": { + "message": "Environment-agnostic application configurations" + }, + "home.easyshipping.3": { + "message": "Standardized and flexible platform configurations" + }, + "home.easyshipping.4": { + "message": "Kubernetes-first, lightweight and user-friendly" + }, + "home.platformengineering": { + "message": "Enable Developer Self-Service" + }, + "home.platformengineering.1": { + "message": "Fulfill the customized needs with reusable building blocks" + }, + "home.platformengineering.2": { + "message": "A growing open module ecosystem integrated with various cloud-native infrastructures" + }, + "home.platformengineering.3": { + "message": "An efficient collaboration paradigm between App Developers and Platform Engineers" + }, + "home.platformengineering.4": { + "message": "Building the golden path for end-to-end DevOps lifecycle management" + }, + "home.enterpriseops": { + "message": "Built-in Security and Compliance" + }, + "home.enterpriseops.1": { + "message": "From the first line of codes to production runtime" + }, + "home.enterpriseops.2": { + "message": "Codified shift-left validation to detect configuration risks" + }, + "home.enterpriseops.3": { + "message": "Extended check stages for workload lifecycle" + }, + "home.enterpriseops.4": { + "message": "Enterprise-grade fine-grained cluster control for Kubernetes" + }, + "home.whoisusing": { + "message": "Adopted by" + }, + "theme.ErrorPageContent.title": { + "message": "This page crashed.", + "description": "The title of the fallback page when the page crashed" + }, + "theme.NotFound.title": { + "message": "Page Not Found", + "description": "The title of the 404 page" + }, + "theme.NotFound.p1": { + "message": "We could not find what you were looking for.", + "description": "The first paragraph of the 404 page" + }, + "theme.NotFound.p2": { + "message": "Please contact the owner of the site that linked you to the original URL and let them know their link is broken.", + "description": "The 2nd paragraph of the 404 page" + }, + "theme.admonition.note": { + "message": "note", + "description": "The default label used for the Note admonition (:::note)" + }, + "theme.admonition.tip": { + "message": "tip", + "description": "The default label used for the Tip admonition (:::tip)" + }, + "theme.admonition.danger": { + "message": "danger", + "description": "The default label used for the Danger admonition (:::danger)" + }, + "theme.admonition.info": { + "message": "info", + "description": "The default label used for the Info admonition (:::info)" + }, + "theme.admonition.caution": { + "message": "caution", + "description": "The default label used for the Caution admonition (:::caution)" + }, + "theme.blog.archive.title": { + "message": "Archive", + "description": "The page & hero title of the blog archive page" + }, + "theme.blog.archive.description": { + "message": "Archive", + "description": "The page & hero description of the blog archive page" + }, + "theme.BackToTopButton.buttonAriaLabel": { + "message": "Scroll back to top", + "description": "The ARIA label for the back to top button" + }, + "theme.blog.paginator.navAriaLabel": { + "message": "Blog list page navigation", + "description": "The ARIA label for the blog pagination" + }, + "theme.blog.paginator.newerEntries": { + "message": "Newer Entries", + "description": "The label used to navigate to the newer blog posts page (previous page)" + }, + "theme.blog.paginator.olderEntries": { + "message": "Older Entries", + "description": "The label used to navigate to the older blog posts page (next page)" + }, + "theme.blog.post.paginator.navAriaLabel": { + "message": "Blog post page navigation", + "description": "The ARIA label for the blog posts pagination" + }, + "theme.blog.post.paginator.newerPost": { + "message": "Newer Post", + "description": "The blog post button label to navigate to the newer/previous post" + }, + "theme.blog.post.paginator.olderPost": { + "message": "Older Post", + "description": "The blog post button label to navigate to the older/next post" + }, + "theme.blog.post.plurals": { + "message": "One post|{count} posts", + "description": "Pluralized label for \"{count} posts\". Use as much plural forms (separated by \"|\") as your language support (see https://www.unicode.org/cldr/cldr-aux/charts/34/supplemental/language_plural_rules.html)" + }, + "theme.blog.tagTitle": { + "message": "{nPosts} tagged with \"{tagName}\"", + "description": "The title of the page for a blog tag" + }, + "theme.tags.tagsPageLink": { + "message": "View All Tags", + "description": "The label of the link targeting the tag list page" + }, + "theme.colorToggle.ariaLabel": { + "message": "Switch between dark and light mode (currently {mode})", + "description": "The ARIA label for the navbar color mode toggle" + }, + "theme.colorToggle.ariaLabel.mode.dark": { + "message": "dark mode", + "description": "The name for the dark color mode" + }, + "theme.colorToggle.ariaLabel.mode.light": { + "message": "light mode", + "description": "The name for the light color mode" + }, + "theme.docs.breadcrumbs.navAriaLabel": { + "message": "Breadcrumbs", + "description": "The ARIA label for the breadcrumbs" + }, + "theme.docs.DocCard.categoryDescription": { + "message": "{count} items", + "description": "The default description for a category card in the generated index about how many items this category includes" + }, + "theme.docs.paginator.navAriaLabel": { + "message": "Docs pages", + "description": "The ARIA label for the docs pagination" + }, + "theme.docs.paginator.previous": { + "message": "Previous", + "description": "The label used to navigate to the previous doc" + }, + "theme.docs.paginator.next": { + "message": "Next", + "description": "The label used to navigate to the next doc" + }, + "theme.docs.tagDocListPageTitle.nDocsTagged": { + "message": "One doc tagged|{count} docs tagged", + "description": "Pluralized label for \"{count} docs tagged\". Use as much plural forms (separated by \"|\") as your language support (see https://www.unicode.org/cldr/cldr-aux/charts/34/supplemental/language_plural_rules.html)" + }, + "theme.docs.tagDocListPageTitle": { + "message": "{nDocsTagged} with \"{tagName}\"", + "description": "The title of the page for a docs tag" + }, + "theme.docs.versionBadge.label": { + "message": "Version: {versionLabel}" + }, + "theme.docs.versions.unreleasedVersionLabel": { + "message": "This is unreleased documentation for {siteTitle} {versionLabel} version.", + "description": "The label used to tell the user that he's browsing an unreleased doc version" + }, + "theme.docs.versions.unmaintainedVersionLabel": { + "message": "This is documentation for {siteTitle} {versionLabel}, which is no longer actively maintained.", + "description": "The label used to tell the user that he's browsing an unmaintained doc version" + }, + "theme.docs.versions.latestVersionSuggestionLabel": { + "message": "For up-to-date documentation, see the {latestVersionLink} ({versionLabel}).", + "description": "The label used to tell the user to check the latest version" + }, + "theme.docs.versions.latestVersionLinkLabel": { + "message": "latest version", + "description": "The label used for the latest version suggestion link label" + }, + "theme.common.editThisPage": { + "message": "Edit this page", + "description": "The link label to edit the current page" + }, + "theme.common.headingLinkTitle": { + "message": "Direct link to {heading}", + "description": "Title for link to heading" + }, + "theme.lastUpdated.atDate": { + "message": " on {date}", + "description": "The words used to describe on which date a page has been last updated" + }, + "theme.lastUpdated.byUser": { + "message": " by {user}", + "description": "The words used to describe by who the page has been last updated" + }, + "theme.lastUpdated.lastUpdatedAtBy": { + "message": "Last updated{atDate}{byUser}", + "description": "The sentence used to display when a page has been last updated, and by who" + }, + "theme.navbar.mobileVersionsDropdown.label": { + "message": "Versions", + "description": "The label for the navbar versions dropdown on mobile view" + }, + "theme.tags.tagsListLabel": { + "message": "Tags:", + "description": "The label alongside a tag list" + }, + "theme.AnnouncementBar.closeButtonAriaLabel": { + "message": "Close", + "description": "The ARIA label for close button of announcement bar" + }, + "theme.blog.sidebar.navAriaLabel": { + "message": "Blog recent posts navigation", + "description": "The ARIA label for recent posts in the blog sidebar" + }, + "theme.CodeBlock.copied": { + "message": "Copied", + "description": "The copied button label on code blocks" + }, + "theme.CodeBlock.copyButtonAriaLabel": { + "message": "Copy code to clipboard", + "description": "The ARIA label for copy code blocks button" + }, + "theme.CodeBlock.copy": { + "message": "Copy", + "description": "The copy button label on code blocks" + }, + "theme.CodeBlock.wordWrapToggle": { + "message": "Toggle word wrap", + "description": "The title attribute for toggle word wrapping button of code block lines" + }, + "theme.DocSidebarItem.toggleCollapsedCategoryAriaLabel": { + "message": "Toggle the collapsible sidebar category '{label}'", + "description": "The ARIA label to toggle the collapsible sidebar category" + }, + "theme.NavBar.navAriaLabel": { + "message": "Main", + "description": "The ARIA label for the main navigation" + }, + "theme.TOCCollapsible.toggleButtonLabel": { + "message": "On this page", + "description": "The label used by the button on the collapsible TOC component" + }, + "theme.blog.post.readMore": { + "message": "Read More", + "description": "The label used in blog post item excerpts to link to full blog posts" + }, + "theme.blog.post.readMoreLabel": { + "message": "Read more about {title}", + "description": "The ARIA label for the link to full blog posts from excerpts" + }, + "theme.blog.post.readingTime.plurals": { + "message": "One min read|{readingTime} min read", + "description": "Pluralized label for \"{readingTime} min read\". Use as much plural forms (separated by \"|\") as your language support (see https://www.unicode.org/cldr/cldr-aux/charts/34/supplemental/language_plural_rules.html)" + }, + "theme.docs.breadcrumbs.home": { + "message": "Home page", + "description": "The ARIA label for the home page in the breadcrumbs" + }, + "theme.navbar.mobileLanguageDropdown.label": { + "message": "Languages", + "description": "The label for the mobile language switcher dropdown" + }, + "theme.docs.sidebar.collapseButtonTitle": { + "message": "Collapse sidebar", + "description": "The title attribute for collapse button of doc sidebar" + }, + "theme.docs.sidebar.collapseButtonAriaLabel": { + "message": "Collapse sidebar", + "description": "The title attribute for collapse button of doc sidebar" + }, + "theme.docs.sidebar.navAriaLabel": { + "message": "Docs sidebar", + "description": "The ARIA label for the sidebar navigation" + }, + "theme.docs.sidebar.closeSidebarButtonAriaLabel": { + "message": "Close navigation bar", + "description": "The ARIA label for close button of mobile sidebar" + }, + "theme.navbar.mobileSidebarSecondaryMenu.backButtonLabel": { + "message": "← Back to main menu", + "description": "The label of the back button to return to main menu, inside the mobile navbar sidebar secondary menu (notably used to display the docs sidebar)" + }, + "theme.docs.sidebar.toggleSidebarButtonAriaLabel": { + "message": "Toggle navigation bar", + "description": "The ARIA label for hamburger menu button of mobile navigation" + }, + "theme.docs.sidebar.expandButtonTitle": { + "message": "Expand sidebar", + "description": "The ARIA label and title attribute for expand button of doc sidebar" + }, + "theme.docs.sidebar.expandButtonAriaLabel": { + "message": "Expand sidebar", + "description": "The ARIA label and title attribute for expand button of doc sidebar" + }, + "theme.SearchPage.documentsFound.plurals": { + "message": "One document found|{count} documents found", + "description": "Pluralized label for \"{count} documents found\". Use as much plural forms (separated by \"|\") as your language support (see https://www.unicode.org/cldr/cldr-aux/charts/34/supplemental/language_plural_rules.html)" + }, + "theme.SearchPage.existingResultsTitle": { + "message": "Search results for \"{query}\"", + "description": "The search page title for non-empty query" + }, + "theme.SearchPage.emptyResultsTitle": { + "message": "Search the documentation", + "description": "The search page title for empty query" + }, + "theme.SearchPage.inputPlaceholder": { + "message": "Type your search here", + "description": "The placeholder for search page input" + }, + "theme.SearchPage.inputLabel": { + "message": "Search", + "description": "The ARIA label for search page input" + }, + "theme.SearchPage.algoliaLabel": { + "message": "Search by Algolia", + "description": "The ARIA label for Algolia mention" + }, + "theme.SearchPage.noResultsText": { + "message": "No results were found", + "description": "The paragraph for empty search result" + }, + "theme.SearchPage.fetchingNewResults": { + "message": "Fetching new results...", + "description": "The paragraph for fetching new search results" + }, + "theme.SearchBar.label": { + "message": "Search", + "description": "The ARIA label and placeholder for search button" + }, + "theme.SearchModal.searchBox.resetButtonTitle": { + "message": "Clear the query", + "description": "The label and ARIA label for search box reset button" + }, + "theme.SearchModal.searchBox.cancelButtonText": { + "message": "Cancel", + "description": "The label and ARIA label for search box cancel button" + }, + "theme.SearchModal.startScreen.recentSearchesTitle": { + "message": "Recent", + "description": "The title for recent searches" + }, + "theme.SearchModal.startScreen.noRecentSearchesText": { + "message": "No recent searches", + "description": "The text when no recent searches" + }, + "theme.SearchModal.startScreen.saveRecentSearchButtonTitle": { + "message": "Save this search", + "description": "The label for save recent search button" + }, + "theme.SearchModal.startScreen.removeRecentSearchButtonTitle": { + "message": "Remove this search from history", + "description": "The label for remove recent search button" + }, + "theme.SearchModal.startScreen.favoriteSearchesTitle": { + "message": "Favorite", + "description": "The title for favorite searches" + }, + "theme.SearchModal.startScreen.removeFavoriteSearchButtonTitle": { + "message": "Remove this search from favorites", + "description": "The label for remove favorite search button" + }, + "theme.SearchModal.errorScreen.titleText": { + "message": "Unable to fetch results", + "description": "The title for error screen of search modal" + }, + "theme.SearchModal.errorScreen.helpText": { + "message": "You might want to check your network connection.", + "description": "The help text for error screen of search modal" + }, + "theme.SearchModal.footer.selectText": { + "message": "to select", + "description": "The explanatory text of the action for the enter key" + }, + "theme.SearchModal.footer.selectKeyAriaLabel": { + "message": "Enter key", + "description": "The ARIA label for the Enter key button that makes the selection" + }, + "theme.SearchModal.footer.navigateText": { + "message": "to navigate", + "description": "The explanatory text of the action for the Arrow up and Arrow down key" + }, + "theme.SearchModal.footer.navigateUpKeyAriaLabel": { + "message": "Arrow up", + "description": "The ARIA label for the Arrow up key button that makes the navigation" + }, + "theme.SearchModal.footer.navigateDownKeyAriaLabel": { + "message": "Arrow down", + "description": "The ARIA label for the Arrow down key button that makes the navigation" + }, + "theme.SearchModal.footer.closeText": { + "message": "to close", + "description": "The explanatory text of the action for Escape key" + }, + "theme.SearchModal.footer.closeKeyAriaLabel": { + "message": "Escape key", + "description": "The ARIA label for the Escape key button that close the modal" + }, + "theme.SearchModal.footer.searchByText": { + "message": "Search by", + "description": "The text explain that the search is making by Algolia" + }, + "theme.SearchModal.noResultsScreen.noResultsText": { + "message": "No results for", + "description": "The text explains that there are no results for the following search" + }, + "theme.SearchModal.noResultsScreen.suggestedQueryText": { + "message": "Try searching for", + "description": "The text for the suggested query when no results are found for the following search" + }, + "theme.SearchModal.noResultsScreen.reportMissingResultsText": { + "message": "Believe this query should return results?", + "description": "The text for the question where the user thinks there are missing results" + }, + "theme.SearchModal.noResultsScreen.reportMissingResultsLinkText": { + "message": "Let us know.", + "description": "The text for the link to report missing results" + }, + "theme.SearchModal.placeholder": { + "message": "Search docs", + "description": "The placeholder of the input of the DocSearch pop-up modal" + }, + "theme.SearchBar.seeAll": { + "message": "See all {count} results" + }, + "theme.ErrorPageContent.tryAgain": { + "message": "Try again", + "description": "The label of the button to try again rendering when the React error boundary captures an error" + }, + "theme.common.skipToMainContent": { + "message": "Skip to main content", + "description": "The skip to content label used for accessibility, allowing to rapidly navigate to main content with keyboard tab/enter navigation" + }, + "theme.tags.tagsPageTitle": { + "message": "Tags", + "description": "The title of the tag list page" + } +} diff --git a/i18n/zh-CN/docusaurus-plugin-content-blog/options.json b/i18n/zh/docusaurus-plugin-content-blog/options.json similarity index 100% rename from i18n/zh-CN/docusaurus-plugin-content-blog/options.json rename to i18n/zh/docusaurus-plugin-content-blog/options.json diff --git a/i18n/zh/docusaurus-plugin-content-docs-community/current.json b/i18n/zh/docusaurus-plugin-content-docs-community/current.json new file mode 100644 index 00000000..dd30528d --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-community/current.json @@ -0,0 +1,6 @@ +{ + "version.label": { + "message": "Next", + "description": "The label for version current" + } +} diff --git a/i18n/zh/docusaurus-plugin-content-docs-ctrlmesh/current.json b/i18n/zh/docusaurus-plugin-content-docs-ctrlmesh/current.json new file mode 100644 index 00000000..7e461960 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-ctrlmesh/current.json @@ -0,0 +1,10 @@ +{ + "version.label": { + "message": "v0.2 🚧", + "description": "The label for version current" + }, + "sidebar.ctrlmesh.category.Getting Started": { + "message": "Getting Started", + "description": "The label for category Getting Started in sidebar ctrlmesh" + } +} diff --git a/i18n/zh/docusaurus-plugin-content-docs-ctrlmesh/version-v0.1.json b/i18n/zh/docusaurus-plugin-content-docs-ctrlmesh/version-v0.1.json new file mode 100644 index 00000000..08190601 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-ctrlmesh/version-v0.1.json @@ -0,0 +1,10 @@ +{ + "version.label": { + "message": "v0.1", + "description": "The label for version v0.1" + }, + "sidebar.ctrlmesh.category.Getting Started": { + "message": "Getting Started", + "description": "The label for category Getting Started in sidebar ctrlmesh" + } +} diff --git a/i18n/zh/docusaurus-plugin-content-docs-docs/current.json b/i18n/zh/docusaurus-plugin-content-docs-docs/current.json new file mode 100644 index 00000000..c56cec03 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-docs/current.json @@ -0,0 +1,102 @@ +{ + "version.label": { + "message": "v0.13 🚧", + "description": "The label for version current" + }, + "sidebar.kusion.category.What is Kusion?": { + "message": "What is Kusion?", + "description": "The label for category What is Kusion? in sidebar kusion" + }, + "sidebar.kusion.category.Getting Started": { + "message": "Getting Started", + "description": "The label for category Getting Started in sidebar kusion" + }, + "sidebar.kusion.category.Concepts": { + "message": "Concepts", + "description": "The label for category Concepts in sidebar kusion" + }, + "sidebar.kusion.category.Project": { + "message": "Project", + "description": "The label for category Project in sidebar kusion" + }, + "sidebar.kusion.category.Stack": { + "message": "Stack", + "description": "The label for category Stack in sidebar kusion" + }, + "sidebar.kusion.category.Kusion Module": { + "message": "Kusion Module", + "description": "The label for category Kusion Module in sidebar kusion" + }, + "sidebar.kusion.category.Configuration Walkthrough": { + "message": "Configuration Walkthrough", + "description": "The label for category Configuration Walkthrough in sidebar kusion" + }, + "sidebar.kusion.category.User Guides": { + "message": "User Guides", + "description": "The label for category User Guides in sidebar kusion" + }, + "sidebar.kusion.category.Cloud Resources": { + "message": "Cloud Resources", + "description": "The label for category Cloud Resources in sidebar kusion" + }, + "sidebar.kusion.category.Kubernetes": { + "message": "Kubernetes", + "description": "The label for category Kubernetes in sidebar kusion" + }, + "sidebar.kusion.category.Automated Observability": { + "message": "Automated Observability", + "description": "The label for category Automated Observability in sidebar kusion" + }, + "sidebar.kusion.category.Secrets Management": { + "message": "Secrets Management", + "description": "The label for category Secrets Management in sidebar kusion" + }, + "sidebar.kusion.category.Reference": { + "message": "Reference", + "description": "The label for category Reference in sidebar kusion" + }, + "sidebar.kusion.category.Kusion Commands": { + "message": "Kusion Commands", + "description": "The label for category Kusion Commands in sidebar kusion" + }, + "sidebar.kusion.category.Kusion Modules": { + "message": "Kusion Modules", + "description": "The label for category Kusion Modules in sidebar kusion" + }, + "sidebar.kusion.category.Developer Schemas": { + "message": "Developer Schemas", + "description": "The label for category Developer Schemas in sidebar kusion" + }, + "sidebar.kusion.category.database": { + "message": "database", + "description": "The label for category database in sidebar kusion" + }, + "sidebar.kusion.category.internal": { + "message": "internal", + "description": "The label for category internal in sidebar kusion" + }, + "sidebar.kusion.category.container": { + "message": "container", + "description": "The label for category container in sidebar kusion" + }, + "sidebar.kusion.category.monitoring": { + "message": "monitoring", + "description": "The label for category monitoring in sidebar kusion" + }, + "sidebar.kusion.category.workload": { + "message": "workload", + "description": "The label for category workload in sidebar kusion" + }, + "sidebar.kusion.category.Workspace Configs": { + "message": "Workspace Configs", + "description": "The label for category Workspace Configs in sidebar kusion" + }, + "sidebar.kusion.category.networking": { + "message": "networking", + "description": "The label for category networking in sidebar kusion" + }, + "sidebar.kusion.category.FAQ": { + "message": "FAQ", + "description": "The label for category FAQ in sidebar kusion" + } +} diff --git a/i18n/zh/docusaurus-plugin-content-docs-docs/version-v0.10.json b/i18n/zh/docusaurus-plugin-content-docs-docs/version-v0.10.json new file mode 100644 index 00000000..4ff8b1cc --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-docs/version-v0.10.json @@ -0,0 +1,110 @@ +{ + "version.label": { + "message": "v0.10", + "description": "The label for version v0.10" + }, + "sidebar.kusion.category.What is Kusion?": { + "message": "What is Kusion?", + "description": "The label for category What is Kusion? in sidebar kusion" + }, + "sidebar.kusion.category.Getting Started": { + "message": "Getting Started", + "description": "The label for category Getting Started in sidebar kusion" + }, + "sidebar.kusion.category.Concepts": { + "message": "Concepts", + "description": "The label for category Concepts in sidebar kusion" + }, + "sidebar.kusion.category.Project": { + "message": "Project", + "description": "The label for category Project in sidebar kusion" + }, + "sidebar.kusion.category.Stack": { + "message": "Stack", + "description": "The label for category Stack in sidebar kusion" + }, + "sidebar.kusion.category.Configuration Walkthrough": { + "message": "Configuration Walkthrough", + "description": "The label for category Configuration Walkthrough in sidebar kusion" + }, + "sidebar.kusion.category.User Guides": { + "message": "User Guides", + "description": "The label for category User Guides in sidebar kusion" + }, + "sidebar.kusion.category.Cloud Resources": { + "message": "Cloud Resources", + "description": "The label for category Cloud Resources in sidebar kusion" + }, + "sidebar.kusion.category.Kubernetes": { + "message": "Kubernetes", + "description": "The label for category Kubernetes in sidebar kusion" + }, + "sidebar.kusion.category.Automated Observability": { + "message": "Automated Observability", + "description": "The label for category Automated Observability in sidebar kusion" + }, + "sidebar.kusion.category.GitHub Actions": { + "message": "GitHub Actions", + "description": "The label for category GitHub Actions in sidebar kusion" + }, + "sidebar.kusion.category.Secrets Management": { + "message": "Secrets Management", + "description": "The label for category Secrets Management in sidebar kusion" + }, + "sidebar.kusion.category.Reference": { + "message": "Reference", + "description": "The label for category Reference in sidebar kusion" + }, + "sidebar.kusion.category.Kusion Commands": { + "message": "Kusion Commands", + "description": "The label for category Kusion Commands in sidebar kusion" + }, + "sidebar.kusion.category.Kusion Modules": { + "message": "Kusion Modules", + "description": "The label for category Kusion Modules in sidebar kusion" + }, + "sidebar.kusion.category.Catalog Models": { + "message": "Catalog Models", + "description": "The label for category Catalog Models in sidebar kusion" + }, + "sidebar.kusion.category.database": { + "message": "database", + "description": "The label for category database in sidebar kusion" + }, + "sidebar.kusion.category.internal": { + "message": "internal", + "description": "The label for category internal in sidebar kusion" + }, + "sidebar.kusion.category.container": { + "message": "container", + "description": "The label for category container in sidebar kusion" + }, + "sidebar.kusion.category.network": { + "message": "network", + "description": "The label for category network in sidebar kusion" + }, + "sidebar.kusion.category.monitoring": { + "message": "monitoring", + "description": "The label for category monitoring in sidebar kusion" + }, + "sidebar.kusion.category.trait": { + "message": "trait", + "description": "The label for category trait in sidebar kusion" + }, + "sidebar.kusion.category.workload": { + "message": "workload", + "description": "The label for category workload in sidebar kusion" + }, + "sidebar.kusion.category.Workspace Configs": { + "message": "Workspace Configs", + "description": "The label for category Workspace Configs in sidebar kusion" + }, + "sidebar.kusion.category.networking": { + "message": "networking", + "description": "The label for category networking in sidebar kusion" + }, + "sidebar.kusion.category.FAQ": { + "message": "FAQ", + "description": "The label for category FAQ in sidebar kusion" + } +} diff --git a/i18n/zh/docusaurus-plugin-content-docs-docs/version-v0.11.json b/i18n/zh/docusaurus-plugin-content-docs-docs/version-v0.11.json new file mode 100644 index 00000000..7d892af9 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-docs/version-v0.11.json @@ -0,0 +1,102 @@ +{ + "version.label": { + "message": "v0.11", + "description": "The label for version v0.11" + }, + "sidebar.kusion.category.What is Kusion?": { + "message": "What is Kusion?", + "description": "The label for category What is Kusion? in sidebar kusion" + }, + "sidebar.kusion.category.Getting Started": { + "message": "Getting Started", + "description": "The label for category Getting Started in sidebar kusion" + }, + "sidebar.kusion.category.Concepts": { + "message": "Concepts", + "description": "The label for category Concepts in sidebar kusion" + }, + "sidebar.kusion.category.Project": { + "message": "Project", + "description": "The label for category Project in sidebar kusion" + }, + "sidebar.kusion.category.Stack": { + "message": "Stack", + "description": "The label for category Stack in sidebar kusion" + }, + "sidebar.kusion.category.Kusion Module": { + "message": "Kusion Module", + "description": "The label for category Kusion Module in sidebar kusion" + }, + "sidebar.kusion.category.Configuration Walkthrough": { + "message": "Configuration Walkthrough", + "description": "The label for category Configuration Walkthrough in sidebar kusion" + }, + "sidebar.kusion.category.User Guides": { + "message": "User Guides", + "description": "The label for category User Guides in sidebar kusion" + }, + "sidebar.kusion.category.Cloud Resources": { + "message": "Cloud Resources", + "description": "The label for category Cloud Resources in sidebar kusion" + }, + "sidebar.kusion.category.Kubernetes": { + "message": "Kubernetes", + "description": "The label for category Kubernetes in sidebar kusion" + }, + "sidebar.kusion.category.Automated Observability": { + "message": "Automated Observability", + "description": "The label for category Automated Observability in sidebar kusion" + }, + "sidebar.kusion.category.Secrets Management": { + "message": "Secrets Management", + "description": "The label for category Secrets Management in sidebar kusion" + }, + "sidebar.kusion.category.Reference": { + "message": "Reference", + "description": "The label for category Reference in sidebar kusion" + }, + "sidebar.kusion.category.Kusion Commands": { + "message": "Kusion Commands", + "description": "The label for category Kusion Commands in sidebar kusion" + }, + "sidebar.kusion.category.Kusion Modules": { + "message": "Kusion Modules", + "description": "The label for category Kusion Modules in sidebar kusion" + }, + "sidebar.kusion.category.Developer Schemas": { + "message": "Developer Schemas", + "description": "The label for category Developer Schemas in sidebar kusion" + }, + "sidebar.kusion.category.database": { + "message": "database", + "description": "The label for category database in sidebar kusion" + }, + "sidebar.kusion.category.internal": { + "message": "internal", + "description": "The label for category internal in sidebar kusion" + }, + "sidebar.kusion.category.container": { + "message": "container", + "description": "The label for category container in sidebar kusion" + }, + "sidebar.kusion.category.monitoring": { + "message": "monitoring", + "description": "The label for category monitoring in sidebar kusion" + }, + "sidebar.kusion.category.workload": { + "message": "workload", + "description": "The label for category workload in sidebar kusion" + }, + "sidebar.kusion.category.Workspace Configs": { + "message": "Workspace Configs", + "description": "The label for category Workspace Configs in sidebar kusion" + }, + "sidebar.kusion.category.networking": { + "message": "networking", + "description": "The label for category networking in sidebar kusion" + }, + "sidebar.kusion.category.FAQ": { + "message": "FAQ", + "description": "The label for category FAQ in sidebar kusion" + } +} diff --git a/i18n/zh/docusaurus-plugin-content-docs-docs/version-v0.9.json b/i18n/zh/docusaurus-plugin-content-docs-docs/version-v0.9.json new file mode 100644 index 00000000..b0fcd5d3 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-docs/version-v0.9.json @@ -0,0 +1,110 @@ +{ + "version.label": { + "message": "v0.9", + "description": "The label for version v0.9" + }, + "sidebar.kusion.category.What is Kusion?": { + "message": "What is Kusion?", + "description": "The label for category What is Kusion? in sidebar kusion" + }, + "sidebar.kusion.category.Getting Started": { + "message": "Getting Started", + "description": "The label for category Getting Started in sidebar kusion" + }, + "sidebar.kusion.category.Concepts": { + "message": "Concepts", + "description": "The label for category Concepts in sidebar kusion" + }, + "sidebar.kusion.category.Configuration Walkthrough": { + "message": "Configuration Walkthrough", + "description": "The label for category Configuration Walkthrough in sidebar kusion" + }, + "sidebar.kusion.category.User Guide": { + "message": "User Guide", + "description": "The label for category User Guide in sidebar kusion" + }, + "sidebar.kusion.category.Cloud Resources": { + "message": "Cloud Resources", + "description": "The label for category Cloud Resources in sidebar kusion" + }, + "sidebar.kusion.category.Kubernetes": { + "message": "Kubernetes", + "description": "The label for category Kubernetes in sidebar kusion" + }, + "sidebar.kusion.category.Automated Observability": { + "message": "Automated Observability", + "description": "The label for category Automated Observability in sidebar kusion" + }, + "sidebar.kusion.category.GitHub Actions": { + "message": "GitHub Actions", + "description": "The label for category GitHub Actions in sidebar kusion" + }, + "sidebar.kusion.category.Reference": { + "message": "Reference", + "description": "The label for category Reference in sidebar kusion" + }, + "sidebar.kusion.category.Command Line Tools": { + "message": "Command Line Tools", + "description": "The label for category Command Line Tools in sidebar kusion" + }, + "sidebar.kusion.category.Kusion Commands": { + "message": "Kusion Commands", + "description": "The label for category Kusion Commands in sidebar kusion" + }, + "sidebar.kusion.category.Backend Configuration": { + "message": "Backend Configuration", + "description": "The label for category Backend Configuration in sidebar kusion" + }, + "sidebar.kusion.category.Kusion Model Library": { + "message": "Kusion Model Library", + "description": "The label for category Kusion Model Library in sidebar kusion" + }, + "sidebar.kusion.category.Catalog Models": { + "message": "Catalog Models", + "description": "The label for category Catalog Models in sidebar kusion" + }, + "sidebar.kusion.category.Workload": { + "message": "Workload", + "description": "The label for category Workload in sidebar kusion" + }, + "sidebar.kusion.category.Database": { + "message": "Database", + "description": "The label for category Database in sidebar kusion" + }, + "sidebar.kusion.category.Monitoring": { + "message": "Monitoring", + "description": "The label for category Monitoring in sidebar kusion" + }, + "sidebar.kusion.category.Trait": { + "message": "Trait", + "description": "The label for category Trait in sidebar kusion" + }, + "sidebar.kusion.category.Internal": { + "message": "Internal", + "description": "The label for category Internal in sidebar kusion" + }, + "sidebar.kusion.category.container": { + "message": "container", + "description": "The label for category container in sidebar kusion" + }, + "sidebar.kusion.category.lifecycle": { + "message": "lifecycle", + "description": "The label for category lifecycle in sidebar kusion" + }, + "sidebar.kusion.category.probe": { + "message": "probe", + "description": "The label for category probe in sidebar kusion" + }, + "sidebar.kusion.category.network": { + "message": "network", + "description": "The label for category network in sidebar kusion" + }, + "sidebar.kusion.category.secret": { + "message": "secret", + "description": "The label for category secret in sidebar kusion" + }, + "sidebar.kusion.category.FAQ": { + "message": "FAQ", + "description": "The label for category FAQ in sidebar kusion" + } +} diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/current.json b/i18n/zh/docusaurus-plugin-content-docs-karpor/current.json new file mode 100644 index 00000000..53a259c7 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/current.json @@ -0,0 +1,46 @@ +{ + "version.label": { + "message": "v0.6 🚧", + "description": "The label for version current" + }, + "sidebar.karpor.category.Getting Started": { + "message": "入门", + "description": "The label for category Getting Started in sidebar karpor" + }, + "sidebar.karpor.category.Concepts": { + "message": "概念", + "description": "The label for category Concepts in sidebar karpor" + }, + "sidebar.karpor.category.User Guide": { + "message": "用户手册", + "description": "The label for category User Guide in sidebar karpor" + }, + "sidebar.karpor.category.How to Insight": { + "message": "如何洞察", + "description": "The label for category How to Insight in sidebar karpor" + }, + "sidebar.karpor.category.Best Production Practices": { + "message": "生产最佳实践", + "description": "The label for category Best Production Practices in sidebar karpor" + }, + "sidebar.karpor.category.Developer Guide": { + "message": "开发者手册", + "description": "The label for category Developer Guide in sidebar karpor" + }, + "sidebar.karpor.category.Contribution Guide": { + "message": "贡献指南", + "description": "The label for category Contribution Guide in sidebar karpor" + }, + "sidebar.karpor.category.Conventions": { + "message": "规约", + "description": "The label for category Conventions in sidebar karpor" + }, + "sidebar.karpor.category.References": { + "message": "参考手册", + "description": "The label for category References in sidebar karpor" + }, + "sidebar.karpor.category.CLI Commands": { + "message": "CLI Commands", + "description": "The label for category CLI Commands in sidebar karpor" + } +} \ No newline at end of file diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/current/1-getting-started/1-overview.mdx b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/1-getting-started/1-overview.mdx new file mode 100644 index 00000000..665ed4f3 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/1-getting-started/1-overview.mdx @@ -0,0 +1,419 @@ +--- +id: overview +title: 概览 +slug: / +--- + +import { + AiOutlineArrowRight, + AiFillCheckCircle, + AiFillCloseCircle, +} from "react-icons/ai"; +import logoImg from "@site/static/karpor/assets/logo/logo-full.png"; +import searchImg from "@site/static/karpor/assets/overview/search.png"; +import insightImg from "@site/static/karpor/assets/overview/insight.png"; +import visionImg from "@site/static/karpor/assets/overview/vision.png"; +import comingSoonImg from "@site/static/karpor/assets/misc/coming-soon.jpeg"; +import KarporButton from "@site/src/components/KarporButton"; +import GithubStar from "@site/src/components/GithubStars"; +import ReactPlayer from "react-player"; +import Typed from "typed.js"; + +export const FeatureBlock = ({ + title, + reverse = false, + imgSrc, + imgAlt, + children, +}) => { + const isMobile = typeof window !== "undefined" && window.innerWidth <= 768; + return ( + <> +

{title}

+
+
+ {imgAlt} +
+
{children}
+
+ + ); +}; + +export const Content = () => { + const karporVsOthers = [ + { + label: "用户界面", + karpor: true, + kubernetesDashboard: true, + labelDesc: "", + }, + { + label: "多集群", + karpor: true, + kubernetesDashboard: false, + labelDesc: "能够同时连接到多个集群", + }, + { + label: "聚合资源视图", + karpor: true, + kubernetesDashboard: false, + labelDesc: "人类友好的资源视图", + }, + { + label: "安全合规", + karpor: true, + kubernetesDashboard: false, + labelDesc: "自动扫描风险,评估健康分", + }, + { + label: "资源关系拓扑", + karpor: true, + kubernetesDashboard: false, + labelDesc: "洞察资源的上下文关系", + }, + ]; + const h2Style = { + paddingBottom: "14px", + borderBottom: "2px solid #f1f1f1", + fontSize: 28, + }; + const flexDirectionStyle = { + display: "flex", + flexDirection: "column", + alignItems: "center", + }; + // Setup typed animation + const el = React.useRef(null); + React.useEffect(() => { + const typed = new Typed(el.current, { + strings: [ + "帮助开发者快速定位资源", + "帮助管理员深入洞察集群", + "帮助平台和多集群建立连接", + ], + typeSpeed: 40, + backDelay: 1500, + loop: true, + }); + return () => { + // Destroy Typed instance during cleanup to stop animation + typed.destroy(); + }; + }, []); + return ( + <> +
+
+ +
+
+
+ +
+
+ +
+
+
+ Intelligence for Kubernetes ✨ +
+
+ +
+
+
+
+
+

📖 Karpor 是什么?

+
+ Karpor 是智能化的 Kubernetes 平台,它为 Kubernetes + 带来了高级的 🔍 搜索、💡 洞察和 ✨ AI 功能,本质上是一个 + Kubernetes 可视化工具。通过 + Karpor,您可以在任何云平台上获得对 Kubernetes + 集群的关键可见性。 +
+
+ 我们立志成为一个 + + 小而美、厂商中立、开发者友好、社区驱动 + + 的开源项目! 🚀 +
+
+
+
+ +
+
+
+
+

💡 为什么选择 Karpor?

+
+ +
+ ⚡️ 自动同步 +
+ 自动同步您在多云平台管理的任何集群中的资源 +
+
+ 🔍 强大灵活的查询 +
+ 以快速简单的方式有效地检索和定位跨集群的资源 +
+
+
+ +
+ 🔒 安全合规 +
+ 了解您在多个集群和合规标准中的合规性状态 +
+
+ 📊 资源拓扑 +
+ 提供包含资源运行上下文信息的关系拓扑和逻辑视图 +
+
+ 📉 成本优化 +
+ 即将推出 +
+
+
+ +
+ 💬 自然语言操作 +
+ 使用自然语言与 Kubernetes 交互,实现更直观的操作 +
+
+ 📦 情境化 AI 响应 +
+ 获得智能的、情境化的辅助,满足您的需求 +
+
+ 🤖 Kubernetes AIOps +
+ 利用 AI 驱动的洞察,自动化和优化 Kubernetes 管理 +
+
+
+
+
+
+
+

🌈 Our Vision

+
+ 现如今,Kubernetes + 生态系统日益复杂是一个不可否认的趋势,这一趋势越来越难以驾驭。这种复杂性不仅增加了运维的难度,也降低了用户采纳新技术的速度,从而限制了他们充分利用 + Kubernetes 的潜力。 +
+
+ 我们希望 Karpor 围绕着 🔍 搜索、📊 洞察和✨AI,击穿 + Kubernetes 愈演愈烈的复杂性,达成以下价值主张: +
+
+
+ +
+
+
+

🙌 Karpor vs. Kubernetes Dashboard

+
+ + {karporVsOthers?.map((item) => { + return ( +
+
+
{item?.label}
+ {item?.labelDesc && ( +
{item?.labelDesc}
+ )} +
+
+ {item?.karpor ? ( + + ) : ( + + )} +
+
+ {item?.kubernetesDashboard ? ( + + ) : ( + + )} +
+
+ ); + })} +
+

🎖️ 项目贡献者

+
+

感谢这些了不起的人! 🍻

+

+ 查看{" "} + 贡献指南, + 欢迎加入我们! 👇 +

+ +
+

👉 下一步

+
+ +
+ + ); +}; + + + + diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/current/1-getting-started/2-installation.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/1-getting-started/2-installation.md new file mode 100644 index 00000000..86e85980 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/1-getting-started/2-installation.md @@ -0,0 +1,172 @@ +--- +title: 安装 +--- + +## 使用 Helm 安装 + +如果您拥有 Kubernetes 集群,Helm 是推荐的安装方法。 + +以下教程将指导您使用 Helm 安装 Karpor,这将在命名空间 `karpor` 中以 `karpor-release` 为 Release 名称安装 Chart。 + +### 先决条件 + +* Helm v3+ +* Kubernetes 集群(最简单的方法是使用 `kind` 或 `minikube` 在本地部署 Kubernetes 集群) + +### 远程安装 + +首先,将 karpor chart 仓库添加到您的本地仓库。 + +```shell +helm repo add kusionstack https://kusionstack.github.io/charts +helm repo update +``` + +然后,您可以使用以下命令安装 Karpor 的最新版本。 + +```shell +helm install karpor-release kusionstack/karpor +``` + +![安装](./assets/2-installation/install.gif) + +**注意**:直接安装此 Chart 意味着它将使用 Karpor 的 [默认模板值](https://github.com/KusionStack/charts/blob/master/charts/karpor/values.yaml)。 + +如果将其部署到生产集群中,或者您想要自定义 Chart 配置,如 `resources`、`replicas`、`port` 等,您可以通过 `--set` 参数覆盖默认值。 + +Karpor Chart 的所有可配置参数都详细说明在 [这里](#chart-参数)。 + +```shell +helm install karpor-release kusionstack/karpor --set server.replicas=3 --set syncer.port=7654 +``` + +### 查看所有可用版本 + +您可以使用以下命令查看所有可安装的 Karpor Chart 版本。 + +```shell +helm repo update +helm search repo kusionstack/karpor --versions +``` + +### 升级到指定版本 + +您可以通过 `--version` 指定要升级的版本。 + +```shell +# 升级到最新版本 +helm upgrade karpor-release kusionstack/karpor + +# 升级到指定版本 +helm upgrade karpor-release kusionstack/karpor --version 1.2.3 +``` + +### 本地安装 + +如果您在生产中连接 [https://kusionstack.github.io/charts/](https://kusionstack.github.io/charts/) 有问题,您可能需要从 [这里](https://github.com/KusionStack/charts) 手动下载 Chart,并在本地使用它来安装或升级 Karpor 版本。 + +```shell +git clone https://github.com/KusionStack/charts.git +helm install karpor-release charts/karpor +helm upgrade karpor-release charts/karpor +``` + +### 卸载 + +卸载/删除命名空间 `karpor` 中的 `karpor-release` Helm Release: + +```shell +helm uninstall karpor-release +``` + +### 中国镜像代理 + +如果你在中国、并且从官方 DockerHub 上拉取镜像时遇到困难,那么你可以使用第三方的镜像代理服务: + +```shell +helm install karpor-release kusionstack/karpor --set registryProxy=docker.m.daocloud.io +``` + +**注意**: 以上只是一个样例,你可以根据需要替换 `registryProxy` 的值。 + +### Chart 参数 + +以下表格列出了 Chart 的所有可配置参数及其默认值。 + +#### 通用参数 + +| 键 | 类型 | 默认值 | 描述 | +|-----|------|---------|-------------| +| namespace | string | `"karpor"` | 部署的目标命名空间 | +| namespaceEnabled | bool | `true` | 是否生成命名空间 | +| registryProxy | string | `""` | 镜像代理地址,配置后将作为所有组件镜像的前缀。 比如,`golang:latest` 将替换为 `/golang:latest` | + +#### 全局参数 + +| 键 | 类型 | 默认值 | 描述 | +|-----|------|---------|-------------| +| global.image.imagePullPolicy | string | `"IfNotPresent"` | 应用于所有 Karpor 组件的镜像拉取策略 | + +#### Karpor Server + +Karpor Server 组件是主要的后端服务。它本身就是一个 `apiserver`,也提供 `/rest-api` 来服务 Web UI + +| 键 | 类型 | 默认值 | 描述 | +|-----|------|---------|-------------| +| server.image.repo | string | `"kusionstack/karpor"` | Karpor Server 镜像的仓库 | +| server.image.tag | string | `""` | Karpor Server 镜像的标签。如果未指定,则默认为 Chart 的 appVersion | +| server.name | string | `"karpor-server"` | Karpor Server 的组件名称 | +| server.port | int | `7443` | Karpor Server 的端口 | +| server.replicas | int | `1` | 要运行的 Karpor Server pod 的数量 | +| server.resources | object | `{"limits":{"cpu":"500m","ephemeral-storage":"10Gi","memory":"1Gi"},"requests":{"cpu":"250m","ephemeral-storage":"2Gi","memory":"256Mi"}}` | Karpor Server pod 的资源规格 | +| server.serviceType | string | `"ClusterIP"` | Karpor Server 的服务类型,可用的值为 ["ClusterIP"、"NodePort"、"LoadBalancer"] | + +#### Karpor Syncer + +Karpor Syncer 组件是独立的服务,用于实时同步集群资源。 + +| 键 | 类型 | 默认值 | 描述 | +|-----|------|---------|-------------| +| syncer.image.repo | string | `"kusionstack/karpor"` | Karpor Syncer 镜像的仓库 | +| syncer.image.tag | string | `""` | Karpor Syncer 镜像的标签。如果未指定,则默认为 Chart 的 appVersion | +| syncer.name | string | `"karpor-syncer"` | karpor Syncer 的组件名称 | +| syncer.port | int | `7443` | karpor Syncer 的端口 | +| syncer.replicas | int | `1` | 要运行的 karpor Syncer pod 的数量 | +| syncer.resources | object | `{"limits":{"cpu":"500m","ephemeral-storage":"10Gi","memory":"1Gi"},"requests":{"cpu":"250m","ephemeral-storage":"2Gi","memory":"256Mi"}}` | karpor Syncer pod 的资源规格 | + +#### ElasticSearch + +ElasticSearch 组件用于存储同步的资源和用户数据。 + +| 键 | 类型 | 默认值 | 描述 | +|-----|------|---------|-------------| +| elasticsearch.image.repo | string | `"docker.elastic.co/elasticsearch/elasticsearch"` | ElasticSearch 镜像的仓库 | +| elasticsearch.image.tag | string | `"8.6.2"` | ElasticSearch 镜像的特定标签 | +| elasticsearch.name | string | `"elasticsearch"` | ElasticSearch 的组件名称 | +| elasticsearch.port | int | `9200` | ElasticSearch 的端口 | +| elasticsearch.replicas | int | `1` | 要运行的 ElasticSearch pod 的数量 | +| elasticsearch.resources | object | `{"limits":{"cpu":"2","ephemeral-storage":"10Gi","memory":"4Gi"},"requests":{"cpu":"2","ephemeral-storage":"10Gi","memory":"4Gi"}}` | karpor elasticsearch pod 的资源规格 | + +#### ETCD + +ETCD 组件是 Karpor Server 作为 `apiserver` 背后的存储。 + +| 键 | 类型 | 默认值 | 描述 | +|-----|------|---------|-------------| +| etcd.image.repo | string | `"quay.io/coreos/etcd"` | ETCD 镜像的仓库 | +| etcd.image.tag | string | `"v3.5.11"` | ETCD 镜像的标签 | +| etcd.name | string | `"etcd"` | ETCD 的组件名称 | +| etcd.persistence.accessModes[0] | string | `"ReadWriteOnce"` | | +| etcd.persistence.size | string | `"10Gi"` | | +| etcd.port | int | `2379` | ETCD 的端口 | +| etcd.replicas | int | `1` | 要运行的 etcd pod 的数量 | +| etcd.resources | object | `{"limits":{"cpu":"500m","ephemeral-storage":"10Gi","memory":"1Gi"},"requests":{"cpu":"250m","ephemeral-storage":"2Gi","memory":"256Mi"}}` | karpor etcd pod 的资源规格 | + +#### Job + +这是一个一次性 Kubernetes Job,用于生成根证书和一些前置工作。Karpor Server 和 Karpor Syncer 都需要依赖它完成才能正常启动。 + +| 键 | 类型 | 默认值 | 描述 | +|-----|------|---------|-------------| +| job.image.repo | string | `"kusionstack/karpor"` | Job 镜像的仓库 | +| job.image.tag | string | `""` | Karpor 镜像的标签。如果未指定,则默认为 Chart 的 appVersion | diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/current/1-getting-started/3-quick-start.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/1-getting-started/3-quick-start.md new file mode 100644 index 00000000..6d1d3afe --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/1-getting-started/3-quick-start.md @@ -0,0 +1,143 @@ +--- +title: 快速开始 +--- +## 前提条件 + +* 确保已安装 [kubectl](https://kubernetes.io/docs/tasks/tools/)。 +* 确保已安装 [helm](https://helm.sh/docs/intro/install/)。 +* 如果你没有现成的集群,你仍然需要一个 [kind](https://kind.sigs.k8s.io/docs/user/quick-start/#installation/)。 + +## 创建集群(可选) + +首先,如果你没有现成的集群,可以使用 `kind` 工具在本地环境中创建一个 Kubernetes 集群。按照以下步骤操作: + +1. 创建集群。你可以使用以下命令创建名为 `demo-cluster` 的集群: + ```shell + kind create cluster --name demo-cluster + ``` + + 这将在你的本地 Docker 环境中创建一个新的 Kubernetes 集群。稍等片刻,直到集群创建完成。 +2. 通过执行以下命令验证集群是否正常运行: + ```shell + kubectl cluster-info + ``` + + 如果一切设置正确,你将看到你的 Kubernetes 集群信息。 + +## 安装 + +要安装 Karpor,请在终端中执行以下命令: + +```shell +helm repo add kusionstack https://kusionstack.github.io/charts +helm repo update +helm install karpor kusionstack/karpor +``` + +更多的安装详情,请参考 [安装文档](2-installation.md)。 + +![安装](./assets/2-installation/install.gif) + +## 访问 Karpor Web 界面 + +1. 运行以下命令来访问运行在集群中的 Karpor 服务: + ```shell + kubectl -n karpor port-forward service/karpor-server 7443:7443 + ``` + + 执行这条命令后,如果你访问本地机器上的 7443 端口,流量会被转发到 Kubernetes 集群中 karpor-server 服务的 7443 端口。 +2. 打开浏览器并输入以下 URL: + ```shell + https://127.0.0.1:7443 + ``` + +这将打开 Karpor 的 Web 界面。👇 + +![在浏览器中打开](./assets/2-installation/open-in-browser.gif) + +祝贺你!🎉 你已成功安装 Karpor。现在你可以开始使用 Karpor 探索和洞察多集群中的资源。 + +## 创建访问令牌 + +在注册集群之前,你需要创建一个访问令牌来登录 Karpor Web 界面。以下是创建令牌的简要步骤: + +1. 导出 Hub Cluster 的 KubeConfig: + +```shell +kubectl get configmap karpor-kubeconfig -n karpor -o go-template='{{.data.config}}' > $HOME/.kube/karpor-hub-cluster.kubeconfig +``` + +2. 创建 ServiceAccount 和 ClusterRoleBinding: + +```shell +export KUBECONFIG=$HOME/.kube/karpor-hub-cluster.kubeconfig +kubectl create serviceaccount karpor-admin +kubectl create clusterrolebinding karpor-admin --clusterrole=karpor-admin --serviceaccount=default:karpor-admin +``` + +3. 创建令牌: + +```shell +kubectl create token karpor-admin --duration=1000h +``` + +复制生成的令牌,稍后将用于登录 Karpor Web 界面。 + +有关创建令牌的详细说明,请参阅 [如何创建 Token](../3-user-guide/1-how-to-create-token.md) 文档。 + +## 注册集群 + +要向 Karpor 注册新集群,请按照以下步骤操作: + +1. 使用上一步创建的令牌登录 Karpor Web 界面。 +2. 打开 Karpor Web 界面中的 集群管理 部分。 +3. 点击 接入集群 按钮。 +4. 按照界面上的说明完成集群注册过程。 + +5. 在注册集群时,请注意以下事项: + + - 集群名称必须唯一且一旦创建不能更改。 + - 确保上传的集群证书中的 server 地址(目标集群地址)与 Karpor 之间有网络连通性。 + - 如果你在本地集群中部署了 Karpor,并希望注册该本地集群,则需要将集群证书中的 server 地址修改为集群内部地址 `https://kubernetes.default.svc.cluster.local:443`,以确保 Karpor 能够直接访问目标集群。 + - 如果要注册 EKS 集群,需要对 KubeConfig 进行额外的配置,包括添加 `env`、`interactiveMode` 和 `provideClusterInfo` 字段。详细步骤请参考 [多集群管理](../3-user-guide/2-multi-cluster-management.md) 文档中的 "注册 EKS 集群" 部分。 + +6. 完成上述步骤后,点击 验证并提交 按钮。 + +以下是 `注册集群` 页面的示例: + +![](/karpor/assets/cluster-mng/cluster-mng-register-new-cluster.png) + +有关注册过程的更详细解释,请参阅 [多集群管理](../3-user-guide/2-multi-cluster-management.md) 指南。 + +## 搜索资源 + +Karpor 提供了一个强大的搜索功能,允许你快速跨集群查找资源。要使用此功能: + +1. 打开 Karpor Web 界面中的 搜索 页面。 +2. 输入你要查找的资源的搜索条件。 + +以下是 `搜索` 页面的示例: + +![](/karpor/assets/search/search-auto-complete.png) +![](/karpor/assets/search/search-result.png) + +要了解更多关于搜索功能以及如何有效使用它们的说明,请查看 [搜索方法](../5-references/3-search-methods.md) 指南。 + +## 资源洞察 + +通过点击搜索结果,你可以进入到资源的**洞察**页面,在这里你可以查看资源风险报告、健康分、资源关系拓扑图等经过我们提炼的信息。 + +以下是 `洞察` 页面的示例: + +![](/karpor/assets/insight/insight-home.png) +![](/karpor/assets/insight/insight-single-issue.png) +![](/karpor/assets/insight/insight-topology.png) + +## 结论 + +请注意,本指南仅提供 Karpor 的快速入门,你可能需要参考其他文档和资源来深入地了解每个功能。 + +## 下一步 + +- 了解 Karpor 的 [架构](../concepts/architecture) 和 [术语表](../concepts/glossary)。 +- 查看 [用户指南](../user-guide/multi-cluster-management) 以了解 Karpor 的更多功能。 diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/current/1-getting-started/_category_.json b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/1-getting-started/_category_.json new file mode 100644 index 00000000..41f4c00e --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/1-getting-started/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Getting Started" +} diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/current/1-getting-started/assets/2-installation/install.gif b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/1-getting-started/assets/2-installation/install.gif new file mode 100644 index 00000000..68889793 Binary files /dev/null and b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/1-getting-started/assets/2-installation/install.gif differ diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/current/1-getting-started/assets/2-installation/open-in-browser.gif b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/1-getting-started/assets/2-installation/open-in-browser.gif new file mode 100644 index 00000000..00adfb18 Binary files /dev/null and b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/1-getting-started/assets/2-installation/open-in-browser.gif differ diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/current/2-concepts/1-architecture.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/2-concepts/1-architecture.md new file mode 100644 index 00000000..018f5f3f --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/2-concepts/1-architecture.md @@ -0,0 +1,24 @@ +--- +title: 架构 +--- +![](assets/1-architecture/architecture.png) + +## 组件 + +- `Dashboard`:Karpor 的 Web UI 界面。 +- `Server`:Karpor 的核心后端服务。 +- `Syncer`:用于实时同步集群资源的独立服务。 +- `Storage`:用于存储已同步的资源和用户数据的存储后端。 + +## Karpor 的工作原理 + +1. 安装后,用户可以将感兴趣的集群注册到 Karpor 中。 +2. Syncer 组件会自动将已注册集群中的资源实时同步到 Storage 中,同时会确保资源的实时变化也会自动同步到 Storage 中。 +3. 当用户需要查找特定资源时,只需在 Dashboard 的搜索框中输入查询语句。Dashboard 会与 Server 的搜索接口交互,Server 内的搜索模块将解析这些语句,并在 Storage 中查找相应的资源,然后将搜索结果返回给 Dashboard。 +4. 点击搜索结果后,用户将被引导至资源洞察页面。Dashboard 调用 Server 的洞察接口,其中 Server 的洞察模块对资源进行静态扫描,生成问题报告,并定位其相关资源,以绘制包含所有父资源和子资源的资源拓扑图。 +5. 洞察页面同样适用于资源组,比如洞察特定 Group-Version-Kind 的资源组、单个命名空间,或是用户自定义的资源组。 + +## 下一步 + +- 学习 Karpor 的 [术语表](../concepts/glossary)。 +- 查看 [用户指南](../user-guide/multi-cluster-management) 以了解更多关于你能够通过 Karpor 实现的内容。 diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/current/2-concepts/3-glossary.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/2-concepts/3-glossary.md new file mode 100644 index 00000000..b63a52bd --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/2-concepts/3-glossary.md @@ -0,0 +1,56 @@ +--- +title: 术语表 +--- +## 集群 + +等同于 `Kubernetes` 中的集群概念,例如名为 `democluster` 的集群。 + +`Karpor` 可以管理多个集群,包括集群注册、证书轮换、生成和查看洞察,以及通过 Dashboard 进行的其他操作。它还支持使用 `Karpor` 发放的统一证书,通过 `kubectl` 和 `kubectx` 等命令行工具访问任何被管理的集群。 + +更多细节,请参考最佳实践:[告别集群证书切换,让你“一卡通行”](../3-user-guide/5-best-production-practices/1-one-pass-with-proxy.md)。 + +## Hub Cluster + +管理其他集群的集群。由于 Karpor 本身也是一个 Kubernetes Apiserver,在这个特殊的集群中我们注册了一些自定义资源用于管理集群元数据、资源回流策略等,这个特殊的集群我们叫它 Hub Cluster,和托管的用户集群区分开。 + +## Managed Cluster + +泛指被 Hub Cluster 管理的集群,一般是托管在 Karpor 中的用户集群。 + +## 资源 + +等同于 `Kubernetes` 中的资源概念,如名为 `mockDeployment` 的 `Deployment`。 + +`Karpor` 对其管理集群中的资源进行实时同步、搜索和洞察。资源是 `Karpor` 里搜索和洞察的最小粒度对象。 + +## 资源组 + +**资源组是一种逻辑上的组织结构**,用于将相关的 `Kubernetes` 资源组合起来,以便于更直观的查看、搜索和洞察。例如,可以创建一个名为 `mockapp` 的 `Application` 资源组,其中包括一个 `Namespace`、一个 `Deployment` 和多个具有特定标签(如 `app.kubernetes.io/name: mockapp`)的 `Pods`。 + +## 资源组规则 + +**资源组规则是一套规则**,将特定资源分组到适当的资源组中。这些规则旨在基于 `annotations`、`labels`、`namespace` 等属性,将资源组织成逻辑单元。例如,要定义一个应用程序资源组规则,可以指定 `annotations` 为 `app.kubernetes.io/name` 作为分组条件。 +`Karpor` 预设了一个资源组规则 `Namespace` 以及自定义资源组规则。 + +![](assets/3-glossary/image-20240326171327110.png) + +## 拓扑 + +在 `Karpor` 中,拓扑是指**给定资源组内相关资源之间的关系和依赖**。利用可视化的拓扑图可以更容易地查看和理解资源组的内部结构,这对于故障排查和定位问题很有帮助。 + +## 审计 + +审计是指**对给定资源组内的所有资源执行合规性扫描**。其目的是帮助用户发现潜在风险。当前系统内置使用的扫描工具和规则,但我们将来会支持自定义方式进行扫描。 + +## 问题 + +**审计的输出被称为问题**。如果被扫描对象没有问题,则审计结果将为空。否则,所有识别到的风险将根据其风险等级进行分类并显示,包括每个风险的描述、相关资源等,用来指导用户解决问题,确保集群资源的安全和合规。 + +## 健康分 + +评分用于反映资源组或资源的**整体健康状况**,提醒用户及时调整和采取措施。健康评分是基于资源组的审计结果计算得出。影响评分的因素包括:**风险等级**、**风险数量**和**资源总数**。 + +## 下一步 + +- 学习 Karpor 的 [架构](../concepts/architecture)。 +- 查看 [用户指南](../user-guide/multi-cluster-management),了解更多有关你可以通过 Karpor 实现的内容。 diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/current/2-concepts/_category_.json b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/2-concepts/_category_.json new file mode 100644 index 00000000..bccddbf1 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/2-concepts/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Concepts" +} diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/current/2-concepts/assets/1-architecture/architecture.png b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/2-concepts/assets/1-architecture/architecture.png new file mode 100644 index 00000000..afec9346 Binary files /dev/null and b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/2-concepts/assets/1-architecture/architecture.png differ diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/current/2-concepts/assets/3-glossary/image-20240326171327110.png b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/2-concepts/assets/3-glossary/image-20240326171327110.png new file mode 100644 index 00000000..f5673eb8 Binary files /dev/null and b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/2-concepts/assets/3-glossary/image-20240326171327110.png differ diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/current/3-user-guide/1-how-to-create-token.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/3-user-guide/1-how-to-create-token.md new file mode 100644 index 00000000..bd72e5c5 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/3-user-guide/1-how-to-create-token.md @@ -0,0 +1,82 @@ +--- +title: 如何创建 Token +--- +在这篇文档中,你将了解如何使用 token 访问 Karpor dashboard。 + +[Hub Cluster](../2-concepts/3-glossary.md#hub-cluster) 采用了与 Kubernetes 相同的基于角色的访问控制(RBAC)机制。这意味着,要访问 Hub Cluster,用户需要在 Hub Cluster 上创建 ClusterRole、ServiceAccount,以及相应的 ClusterRoleBinding 来将两者绑定。为了提升用户体验,我们预设了两种 ClusterRole:karpor-admin 和 karpor-guest。karpor-admin 角色拥有在面板上执行所有操作的权限,包括但不限于添加或删除集群、创建资源组等;而 karpor-guest 角色则仅限于在面板上进行查看操作。随着对 Karpor 的深入了解,用户可以根据自身需求,创建额外的 ClusterRole,实现更细致的权限管理。 + +## 导出 Hub Cluster 的 KubeConfig + +由于 Hub Cluster 需要 KubeConfig 进行验证,可以通过以下命令一键导出用于访问 Hub Cluster 的 KubeConfig。 +```shell +# 以下操作在安装 Karpor 的 Kubernetes 集群中运行 +kubectl get configmap karpor-kubeconfig -n karpor -o go-template='{{.data.config}}' > $HOME/.kube/karpor-hub-cluster.kubeconfig +``` + +**注意**:确保本地可访问 Hub Cluster kubeconfig 中的 server 地址。默认为集群内部地址 (https://karpor-server.karpor.svc:7443),本地无法直接连接。如在本地部署 Karpor,需将 karpor-server 服务转发至本地 7443 端口,并将 server 地址改为 `https://127.0.0.1:7443`。 + +你可以使用以下 sed 命令将 Hub 集群证书中的访问地址更改为本地地址: + +对于 MacOS/BSD 系统(需要在 `-i` 后添加 `''`): +```shell +sed -i '' 's/karpor-server.karpor.svc/127.0.0.1/g' $HOME/.kube/karpor-hub-cluster.kubeconfig +``` + +对于 Linux/GNU 系统(仅需要 `-i`): +```shell +sed -i 's/karpor-server.karpor.svc/127.0.0.1/g' $HOME/.kube/karpor-hub-cluster.kubeconfig +``` + +对于 Windows 系统: +请手动修改 kubeconfig 文件中的服务器地址。 + +## 将 Hub Cluster 的服务转发到本地 + +在本节中,我们假设你将 Karpor 部署在了本地集群(比如用 kind 或者 minikube 创建的集群)。 + +如上节所说,为了在本地访问 Hub Cluster,你需要将 karpor-server 的服务转发到本地。如果你使用了其他方法进行了转发,可以跳过这一步。这里使用简单的 port-forward 进行转发,打开另一个终端,运行: + +```shell +# 以下操作在安装 Karpor 的 Kubernetes 集群中运行 +kubectl -n karpor port-forward svc/karpor-server 7443:7443 +``` + +## 为你的用户创建 ServiceAccount 和 ClusterRoleBinding + +本节将指导你如何在 Hub Cluster 中创建 karpor-admin 和 karpor-guest 用户,并为它们分配相应的 ClusterRoleBinding。以下是具体的操作步骤: + +首先,指定 kubectl 连接的目标集群为 Hub Cluster: +```shell +export KUBECONFIG=$HOME/.kube/karpor-hub-cluster.kubeconfig +``` + +然后,我们将创建两个常用的身份:管理员(karpor-admin)和访客(karpor-guest)。这个过程包括创建 ServiceAccount 并将其绑定到相应的 ClusterRole: + +```shell +kubectl create serviceaccount karpor-admin +kubectl create clusterrolebinding karpor-admin --clusterrole=karpor-admin --serviceaccount=default:karpor-admin +kubectl create serviceaccount karpor-guest +kubectl create clusterrolebinding karpor-guest --clusterrole=karpor-guest --serviceaccount=default:karpor-guest +``` + +## 为你的用户创建 Token + +以下操作需在 Hub Cluster 中执行,请确保已正确设置 kubectl 连接到 Hub Cluster: +```shell +export KUBECONFIG=$HOME/.kube/karpor-hub-cluster.kubeconfig +``` + +默认情况下,token 的有效期为 1 小时。如果你需要长期使用的 token,可以在生成时指定更长的过期时间。例如: +```shell +kubectl create token karpor-admin --duration=1000h +``` + +默认参数下, token 的最长有效期为 8760h(1 年)。如果你需要修改这个最长有效期,可以在 karpor-server 的启动参数中添加 `--service-account-max-token-expiration={MAX_EXPIRATION:h/m/s}`。 + +**注意**:创建 token 需要 v1.25.0 或更高版本的 kubectl 。 + +## 开始安全地使用 Karpor + +复制刚刚生成的 token,粘贴到 Karpor dashboard 的 token 输入框中, 点击登录。 + +在安全环境下开启你的 Karpor 之旅吧! diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/current/3-user-guide/2-multi-cluster-management.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/3-user-guide/2-multi-cluster-management.md new file mode 100644 index 00000000..750379d9 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/3-user-guide/2-multi-cluster-management.md @@ -0,0 +1,94 @@ +--- +title: 多集群管理 +--- +多集群管理是将集群注册进 Karpor 的入口,使能在大量集群间进行搜索和洞察。 + +## 注册集群 + +1. 点击 集群管理 标签页。 +2. 点击 注册集群 按钮。 + ![](/karpor/assets/cluster-mng/cluster-mng-empty.png) +3. 添加集群名字。集群名称必须唯一且一旦创建不能更改。 +4. 上传该集群的 KubeConfig 文件(一个具有读权限的文件就足够了)。 +5. 点击 验证并提交 按钮。 + ![](/karpor/assets/cluster-mng/cluster-mng-register-new-cluster.png) +6. 一旦验证通过,集群将会被添加到 集群管理 页面。 + ![](/karpor/assets/cluster-mng/cluster-mng-register-success.png) + +**注意**:请确保上传的集群证书中的 server 地址(目标集群地址)与 Karpor 之间 的网络连通性。举例来说,如果你在本地集群中部署了 Karpor,并希望注册该本地集群,则需要将集群证书中的 server 地址修改为集群内部地址 `https://kubernetes.default.svc.cluster.local:443`,以确保 Karpor 能够直接访问目标集群。 + +### 注册 EKS 集群 + +如果你想注册 EKS 集群,那么需要对 KubeConfig 进行一些额外的操作: + +1. 导出 EKS 集群的 KubeConfig。例如,通过如下 aws 命令可以获得指定集群的 KubeConfig: + +```shell +aws eks --region update-kubeconfig --name --kubeconfig= +``` + +2. 在导出的 KubeConfig 文件中的 `users/exec` 中添加 `env`、`interactiveMode` 和 `provideClusterInfo` 字段。可以参考以下的 KubeConfig 结构: + +```yaml +apiVersion: v1 +clusters: +- cluster: + certificate-authority-data: CA + server: SERVER + name: CLUSTER +contexts: +- context: + cluster: CLUSTER + user: USER + name: CONTEXT +current-context: CONTEXT +kind: Config +preferences: {} +users: +- name: USER + user: + exec: + apiVersion: client.authentication.k8s.io/v1beta1 + args: + - --region + - ap-southeast-1 + - eks + - get-token + - --cluster-name + - mycluster3 + - --output + - json + command: aws + ### 以下字段需要补充到 KubeConfig 中 + env: + - name: AWS_ACCESS_KEY_ID + value: + - name: AWS_SECRET_ACCESS_KEY + value: + - name: AWS_DEFAULT_REGION + value: + - name: AWS_DEFAULT_OUTPUT + value: json + interactiveMode: IfAvailable + provideClusterInfo: false +``` + +3. 在 [注册集群](#%E6%B3%A8%E5%86%8C%E9%9B%86%E7%BE%A4) 中使用修改后的 KubeConfig。 + +## 编辑集群 + +编辑 按钮允许修改 显示名称描述,从而改变仪表盘中集群名称和描述的显示方式。 + +![](/karpor/assets/cluster-mng/cluster-mng-edit-cluster.png) + +## 轮换证书 + +当 KubeConfig 过期时,你可以通过点击 轮换证书 来更新证书。 +![](/karpor/assets/cluster-mng/cluster-mng-rotate-cluster-1.png) +![](/karpor/assets/cluster-mng/cluster-mng-rotate-cluster-2.png) +![](/karpor/assets/cluster-mng/cluster-mng-rotate-cluster-3.png) + +## 移除集群 + +通过 删除 按钮方便地移除已注册的集群。 +![](/karpor/assets/cluster-mng/cluster-mng-delete-cluster.png) diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/current/3-user-guide/3-search.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/3-user-guide/3-search.md new file mode 100644 index 00000000..cae5e22d --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/3-user-guide/3-search.md @@ -0,0 +1,47 @@ +--- +title: 如何搜索 +--- +在本节中,我们将探索如何使用 Karpor 执行多集群资源搜索,本指南完全通过 Dashboard 进行。 + +我们支持三种搜索方法: + +- **通过 SQL 搜索**:使用 SQL 查询语言执行资源搜索。 +- **通过 DSL 搜索**:通过 `Karpor` 的特定领域语言(DSL)进行资源搜索。 +- **通过自然语言搜索**:使用自然语言进行资源搜索。 + +## 通过 SQL 搜索 + +Karpor 提供了一个方便的 SQL 查询功能,允许你使用熟悉的 SQL 语法搜索和过滤所有托管集群中的 Kubernetes 资源,并为多集群资源搜索提供了针对性的优化和增强。 + +SQL 是软件工程行业从业者容易获取的技能之一,理论上使得学习曲线相当低。因此,这种搜索方法是为你准备的!特别适合 Karpor 的初学者。 + +以下是使用 SQL 搜索的步骤: + +1. **进入搜索页面**:我们将首页设计为搜索的入口点,因此打开 `Karpor` 的 Web UI 立即呈现给你搜索页面。 + ![](/karpor/assets/search/search-home.png) +2. **编写 SQL 查询语句**:使用 SQL 语法编写你的查询语句,指定你希望搜索的集群名称、资源类型、条件和过滤器。此外,如果你输入关键词并按空格,搜索框将弹出带有下拉菜单的自动完成提示,建议你可以输入的下一个可能的关键词。 + ![](/karpor/assets/search/search-auto-complete.png) +3. **执行查询**:点击 `搜索` 按钮执行查询,并被发送到搜索结果页面。Karpor 将返回与 SQL 查询匹配的资源列表。 + ![](/karpor/assets/search/search-result.png) +4. **高级功能**:利用我们的内置高级 SQL 语法,如排序、全文搜索等,进一步细化你的搜索。详情请参阅:[搜索方法文档](../5-references/3-search-methods.md)。 + +## 通过 DSL 搜索 + +敬请期待。🚧 + +## 通过自然语言搜索 + +虽然我们目前提供的 SQL 搜索功能不需要额外的学习,因为很多工程师已经具备 SQL 知识,但显然最直观、学习门槛最低的搜索方式是使用用户的母语——自然语言。 + +因此,我们在 Karpor 中集成了自然语言搜索 Kubernetes 资源的功能。 + +以下是使用自然语言搜索的步骤: + +1. **进入搜索页面**:我们将首页设计为搜索的入口点,因此打开 `Karpor` 的 Web UI 立即呈现给你搜索页面。然后我们可以选择自然语言搜索。 + ![](/karpor/assets/search/search-home-natural-language.png) +2. **编写自然语言查询语句**:使用自然语言编写你的查询语句,指定你希望搜索的集群名称、资源类型、条件和过滤器。 + ![](/karpor/assets/search/search-by-natural-language.png) +3. **执行查询**:点击 `搜索` 按钮执行查询,并被发送到搜索结果页面。Karpor 将返回与自然语言查询匹配的资源列表。 + ![](/karpor/assets/search/search-by-natural-language-result.png) +4. **搜索提示**:对于不完整或者随意输入的自然语言,我们会进行提示。 +5. **二次搜索**:我们对自然语言的搜索会转换为 SQL 语句,用户可以进行二次修改,重新搜索。 \ No newline at end of file diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/current/3-user-guide/4-insight/1-inspecting-any-resource-group-and-resource.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/3-user-guide/4-insight/1-inspecting-any-resource-group-and-resource.md new file mode 100644 index 00000000..3ac1df52 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/3-user-guide/4-insight/1-inspecting-any-resource-group-and-resource.md @@ -0,0 +1,27 @@ +--- +title: 检查任何资源组和资源 +--- +在这部分内容中,我们将通过清晰的步骤和实例详细解释如何使用 Karpor 来检查任何资源组或资源。 + +如果你不熟悉相关概念,可以参考 [术语表](../../2-concepts/3-glossary.md) 章节。 + +## 检查具体资源 + +1. 搜索你感兴趣的资源: + ![](/karpor/assets/search/search-home.png) +2. 在搜索结果页,所有通过条件筛选的资源将会被列出: + ![](/karpor/assets/search/search-result.png) +3. 点击任意资源名称,即可跳转到该资源的洞察页面: + ![](/karpor/assets/insight/insight-home.png) + +## 检查具体资源组 + +你可能已经注意到,在每一个搜索结果条目中,资源的 `Cluster`、`Kind`、`Namespace` 等标签都列了出来。请注意,这些标签是**超链接**,我们称之为 "**锚点**"。它们代表了指向特定资源组或资源的链接。通过点击这些**锚点**,你可以快速跳转到该资源组或资源的洞察页面。 + +![](/karpor/assets/search/search-result.png) + +## 在资源组 / 资源间灵活切换 + +实际上,除了前述搜索结果中的标签外,在任何页面上看到的任何资源 / 资源组名称,都可以作为**锚点**重定向,就像是时空虫洞,允许你在任何维度之间来回穿梭,直到找到你正在搜索的资源。搜索和锚点都是加速检索的手段,它们是 Karpor 作为 Kubernetes 探索器的关键特性。 + +![](/karpor/assets/insight/insight-breadcrumbs.png) diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/current/3-user-guide/4-insight/2-custom-resource-group.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/3-user-guide/4-insight/2-custom-resource-group.md new file mode 100644 index 00000000..e009d79d --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/3-user-guide/4-insight/2-custom-resource-group.md @@ -0,0 +1,92 @@ +--- +title: 自定义资源组 +--- +## 创建自定义资源组 + +本节将重点介绍如何在 Karpor 中创建自定义资源组。通过自定义资源组,你可以根据自己的需求和逻辑概念,在 Karpor 中灵活管理和组织资源。我们将逐步指导你创建和定义自定义资源组,并展示如何使用这些组进行资源洞察和管理。 + +如果你不熟悉**资源组**和**资源组规则**相关概念,可以参考 [词汇表](../../2-concepts/3-glossary.md) 部分。 + +**假设**在你的组织或公司内,有一个 `应用单元` 的概念,代表**某个环境中应用的所有资源**。 + +我们在**标签中标记应用的名称和环境**。例如,以下是 `生产环境` 中 `mock-apple` 的 `应用单元`: + +```yaml +apiVersion: v1 +kind: Namespace +metadata: + labels: + app.kubernetes.io/name: mock-apple + name: mock-apple +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/environment: prod + app.kubernetes.io/name: mock-apple +spec: + replicas: 3 + selector: + matchLabels: + app.kubernetes.io/environment: prod + app.kubernetes.io/name: mock-apple + template: + metadata: + labels: + app.kubernetes.io/environment: prod + app.kubernetes.io/name: mock-apple + fruit: apple + spec: + containers: + - image: nginx:latest + name: mock-container + dnsPolicy: ClusterFirst + restartPolicy: Always +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/environment: prod + app.kubernetes.io/name: mock-apple + name: mock-service-apple-prod + namespace: mock-apple +spec: + ports: + - port: 80 + protocol: TCP + targetPort: 80 + selector: + app.kubernetes.io/environment: prod + app.kubernetes.io/name: mock-apple + type: ClusterIP +``` + +现在,我们将按照以下步骤创建一个名为 `应用单元` 的自定义 `资源组规则`。它将根据用户指定的规则对集群中的所有资源进行分类,并列出所有符合规则的 `资源组`。 + +1. 点击 洞察 标签进入洞察首页。 +2. 在页面底部,你将看到一个默认的资源组规则 `命名空间`,这是按命名空间分类的单一规则。 + ![](/karpor/assets/insight/insight-homepage.png) +3. 点击创建资源组按钮 +,并在弹出窗口中填入 `应用单元` 的**基本信息和分类规则**。 + ![](/karpor/assets/insight/insight-create-app-resource-group-rule.png) +4. 点击 提交 按钮,然后点击新出现的 应用单元 标签,列出所有应用单元。 + ![](/karpor/assets/insight/insight-list-app-resource-groups.png) +5. 你可以在搜索框中输入关键词,快速找到 `生产` 环境中的 `mock-apple` 应用单元。 + ![](/karpor/assets/insight/insight-search-app-resource-group.png) +6. 你可以点击资源组卡片上的 查看 按钮,跳转到相应的 `资源组洞察页面`,查看某个应用单元的所有资源、拓扑关系、合规报告等聚合信息。 +7. 如有需要,你也可以使用相同的步骤创建 `环境资源组`。 + ![](/karpor/assets/insight/insight-create-env-resource-group-rule.png) + ![](/karpor/assets/insight/insight-list-env-resource-groups.png) + +## 编辑自定义资源组 + +你可以点击自定义资源组选项卡右侧的按钮 来修改弹出窗口中的基本信息和分类规则。 + +![](/karpor/assets/insight/insight-edit-env-resource-group.png) + +## 删除自定义资源组 + +你可以点击自定义资源组标签右侧的按钮 然后在弹出窗口中点击 删除,以删除当前资源组规则。 + +![](/karpor/assets/insight/insight-delete-env-resource-group.png) diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/current/3-user-guide/4-insight/3-summary.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/3-user-guide/4-insight/3-summary.md new file mode 100644 index 00000000..639426ff --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/3-user-guide/4-insight/3-summary.md @@ -0,0 +1,23 @@ +--- +title: 概览 +--- +在本节中,我们将了解 Karpor 洞察页面上的 `概览卡片`,它们用于快速查看和理解当前资源组或资源的关键指标。 + +在不同的资源组下,`概览卡片` 显示的内容也可能有所不同。 + +如果你查看的是: + +1. **资源组洞察页面**: + + 1. **集群洞察页面**,概览卡片显示的是集群的**节点、Pod 数量、CPU、内存容量以及 Kubernetes 版本**。 + ![](/karpor/assets/insight/insight-summary-cluster.png) + 2. **资源种类洞察页面**,概览卡片显示的是**所属集群、GVK(Group Version Kind)信息,以及当前集群下该类型资源的数量**。 + ![](/karpor/assets/insight/insight-summary-kind.png) + 3. **命名空间洞察页面**,概览卡片显示的是**所属集群、命名空间,以及当前命名空间下最丰富的资源类型**。 + ![](/karpor/assets/insight/insight-summary-namespace.png) + 4. **自定义资源组洞察页面**,概览卡片显示的是**每个规则的关键值,以及当前资源组下的几个资源统计数据**。 + ![](/karpor/assets/insight/insight-summary-custom-resource-group.png) +2. **资源洞察页面**,概览卡片显示的是**当前资源的名称、GVK 信息、所属集群和命名空间**。 + ![](/karpor/assets/insight/insight-summary-resource.png) + +⚠️ **注意**:无论你处于哪个资源组洞察页面,概览卡片总会显示一个健康评分,该评分基于实体的风险合规状态计算得出。 diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/current/3-user-guide/4-insight/4-compliance-report.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/3-user-guide/4-insight/4-compliance-report.md new file mode 100644 index 00000000..68b60d87 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/3-user-guide/4-insight/4-compliance-report.md @@ -0,0 +1,16 @@ +--- +title: 合规报告 +--- +本节将介绍合规扫描功能,主要用于检测和评估当前资源或资源组中的所有资源是否符合特定的合规标准和安全政策。在本节中,你将了解如何有效利用合规扫描功能以确保集群和资源的安全与合规。 + +如果你不熟悉**合规报告**或**风险**相关概念,你可以参考 [术语表](../../2-concepts/3-glossary.md) 章节。 + +1. 按照 [检查任何资源组和资源](#%E6%A3%80%E6%9F%A5%E4%BB%BB%E4%BD%95%E8%B5%84%E6%BA%90%E7%BB%84%E5%92%8C%E8%B5%84%E6%BA%90) 的指引,导航至特定资源组 / 资源的洞察页面。 +2. 你可以看到资源的**合规报告**卡片。 + ![](/karpor/assets/insight/insight-home.png) +3. 该卡片显示了对当前资源或资源组下所有资源进行扫描时识别出的**风险**,按风险等级分类。在每个风险等级标签下,风险按发生频率从高到低排序。每个风险条目显示标题、描述、发生次数以及发现问题的扫描工具。 +4. 点击特定风险将显示一个弹出窗口,展示风险的详细信息。 + ![](/karpor/assets/insight/insight-single-issue.png) +5. 点击 查看所有风险 ,将弹出一个抽屉,列出所有风险。这里,你可以搜索、分类、分页等。 + ![](/karpor/assets/insight/insight-all-issues.png) +6. 一旦你按照其指示解决了一个风险,可以点击 重新扫描 按钮,这将触发对资源组下所有资源进行全面的合规扫描。一旦扫描完成,仪表板将显示新的结果。 diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/current/3-user-guide/4-insight/5-topology.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/3-user-guide/4-insight/5-topology.md new file mode 100644 index 00000000..1bbaedd5 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/3-user-guide/4-insight/5-topology.md @@ -0,0 +1,19 @@ +--- +title: 拓扑结构 +--- +## 拓扑结构 + +在本节中,我们将探索 Karpor 中的拓扑功能。拓扑视图将帮助你更直观地理解集群中各种资源之间的关系和依赖。以下是如何使用拓扑视图。 + +1. 按照 [检查任意资源组和资源](#%E6%A3%80%E6%9F%A5%E4%BB%BB%E4%BD%95%E8%B5%84%E6%BA%90%E7%BB%84%E5%92%8C%E8%B5%84%E6%BA%90) 的指引,导航至特定资源组 / 资源的洞察页面。 +2. 在页面底部,你可以看到资源拓扑图。 + ![](/karpor/assets/insight/insight-topology.png) +3. 根据当前页面情况: + 1. 资源洞察页面: + 1. 该图将展示与当前资源相关的上游和下游资源。例如,如果当前资源是一个 Deployment(部署),拓扑图将展示 Deployment 下的 ReplicaSet(副本集)以及 ReplicaSet 下的 Pods(容器组)。 + ![](/karpor/assets/insight/insight-topology-example.png) + 2. 点击资源拓扑图中的一个节点,等同于点击特定资源的锚点,这将直接导航至该资源的洞察页面。 + 2. 资源组洞察页面: + 1. 该图将直观显示当前资源组下各种资源类型的数量与关系。 + 2. 点击资源拓扑图中的一个节点,等同于点击资源类型,下方列表将刷新显示当前资源组中特定类型下的所有资源。 + ![](/karpor/assets/insight/insight-linkage.png) diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/current/3-user-guide/4-insight/_category_.json b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/3-user-guide/4-insight/_category_.json new file mode 100644 index 00000000..c39e5397 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/3-user-guide/4-insight/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "How to Insight" +} diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/current/3-user-guide/4-insight/index.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/3-user-guide/4-insight/index.md new file mode 100644 index 00000000..9cec8507 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/3-user-guide/4-insight/index.md @@ -0,0 +1,6 @@ +--- +title: 如何洞察 +--- +在本节中,我们将介绍如何使用 Karpor 对集群内的资源进行全面洞察。你可以通过多种方式访问洞察页面,并且可以轻松地在不同范围(如集群、类型、命名空间或单个资源)的洞察页面之间切换。如果你当前组织内有特定领域的逻辑范围,你甚至可以通过设置资源组规则来自定义资源组(如应用程序、环境等)。我们还提供功能以便对这些自定义资源组进行洞察。 + +本指南将完全在 Karpor 仪表板上操作。 diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/current/3-user-guide/5-best-production-practices/1-one-pass-with-proxy.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/3-user-guide/5-best-production-practices/1-one-pass-with-proxy.md new file mode 100644 index 00000000..44f92637 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/3-user-guide/5-best-production-practices/1-one-pass-with-proxy.md @@ -0,0 +1,42 @@ +--- +title: 告别集群证书切换,让你“一卡通行” +--- +## 挑战与需求 + +### 大规模多集群的规模挑战 + +2014 年 6 月,Kubernetes 从 Google 的内部 Borg 项目诞生,引人注目地亮相。在科技巨头的支持和一个蓬勃发展的开源社区的帮助下,它逐渐成为了容器编排领域的事实标准。随着公司开始在生产环境中部署 Kubernetes,单个 Kubernetes 集群无法再满足内部日益复杂的需求。单个集群中的节点数量超过社区推荐的限制(5,000)是很常见的,使得扩展到多集群成为一个自然的选择。 + +### 多集群访问者的基本需求 + +随着多集群的蓬勃发展,各种平台可能需要跨不同集群访问资源,需要获取每个集群的 KubeConfig。 + +随着用户和集群数量的增加,集群管理员面临着巨大的时间成本:如果有 `M` 个集群和 `N` 个用户,管理 KubeConfig 的时间复杂度将变为 `O (M*N)`。此外,用户在访问不同集群时需要切换不同的 KubeConfig,不同集群的 KubeConfig 对应的权限也各不相同,无疑增加了使用的复杂度。 + +![直接连接:用户需要维护多个 KubeConfigs](assets/1-one-pass-with-proxy/image-20240326163622363.png) + +在这种情况下,有没有一种方法能方便地访问不同集群中的资源,而无需维护大量的 KubeConfig 和管理跨集群的各种用户权限问题?此外,这种方法理想地应该是云原生的,可以通过 kubectl 和 Kubernetes 官方客户端访问,以降低过渡到使用这种方法的成本。`Karpor` 的出现就是为了解决这些问题。 + +## "一站式通行" 的理念 + +我们开发了 `Karpor`,一个开源项目。作为一个 Kubernetes 资源探索器,在搜索和洞察集群资源方面具有独特优势,它的基础多集群管理组件,具备集群证书颁发和多集群请求代理的特点,使其高度适合作为平台对多个集群的统一访问入口。该组件支持以云原生方式转发用户请求到指定集群,允许用户维护一套 KubeConfig 来访问不同的集群,使访问多集群像访问单个集群一样简单。那么,它是如何工作的呢?下面,我们介绍 `Karpor` 的架构和功能。 + +![使用多集群网关:用户只需要维护一套 KubeConfigs](assets/1-one-pass-with-proxy/image-20240326164141400.png) + +### 多集群请求路由和代理 + +`Karpor` 包含一个应用层网关,能够将任何 Kubernetes 风格的请求转发给指定的 Kubernetes 集群。`Karpor` 也是基于 Kubernetes 框架开发的,作为 kube-apiserver,可以独立运行或作为现有 kube-apiserver 的扩展。`Karpor` 支持处理两种类型的扩展资源:`Cluster` 和 `Cluster/Proxy`,前者用于存储集群信息,后者用于将用户请求转发到特定集群。用户可以通过 Kubernetes 官方 CLI(`kubectl`)或 SDK(`client-go`、`client-java` 等)进行访问。 + +`Karpor` 将所有对 `Cluster/Proxy` 子资源的访问代理到目标集群。例如,要从 `Cluster1` 集群检索 Pod 信息,用户需要向 `Karpor` 发送 `GET /apis/kusionstack.io/Cluster/cluster1/proxy/api/v1/pods` 请求。`Karpor` 将从 `Cluster/Cluster1` 资源生成一个 KubeConfig 以访问该集群,并将 `/api/v1/pods` 请求代理到 `Cluster1` 集群。 + +![使用 kubectl 和 karpor 证书访问任何管理的集群](assets/1-one-pass-with-proxy/image-20240326165247891.png) + +### 支持所有 Kubernetes 原生请求 + +`Karpor` 支持转发所有 kube-apiserver 请求。具体来说,`Karpor` 是一个应用层网关,通过 HTTP connect 协议代理 HTTP 请求。除了支持对资源的 `get`、`create`、`update` 和 `delete` 操作外,它还支持 `watch`、`log`、`exec`、`attach` 等。(由于用于 `exec` 和 `attach` 的 SPDY 协议不支持 http2,`Karpor` 在转发这些请求时会禁用 http2,切换到 http1.1 并支持 hijacker 处理)。 + +![](assets/1-one-pass-with-proxy/image-20240326165632158.png) + +## 总结 + +从上文中可以看出,利用 `Karpor` 的多集群管理组件,为用户提供了一个可控权限的 “多集群通行证”。用户不再需要关心频繁切换集群证书和新集群的接入等问题。有了这个“一证通行”,访问多个集群的成本降低了,满足了大多数用户在多集群平台上的最基本需求。 diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/current/3-user-guide/5-best-production-practices/_category_.json b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/3-user-guide/5-best-production-practices/_category_.json new file mode 100644 index 00000000..82dd90e3 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/3-user-guide/5-best-production-practices/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Best Production Practices" +} diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/current/3-user-guide/5-best-production-practices/assets/1-one-pass-with-proxy/image-20240326163622363.png b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/3-user-guide/5-best-production-practices/assets/1-one-pass-with-proxy/image-20240326163622363.png new file mode 100644 index 00000000..ab8051fe Binary files /dev/null and b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/3-user-guide/5-best-production-practices/assets/1-one-pass-with-proxy/image-20240326163622363.png differ diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/current/3-user-guide/5-best-production-practices/assets/1-one-pass-with-proxy/image-20240326164141400.png b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/3-user-guide/5-best-production-practices/assets/1-one-pass-with-proxy/image-20240326164141400.png new file mode 100644 index 00000000..de950079 Binary files /dev/null and b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/3-user-guide/5-best-production-practices/assets/1-one-pass-with-proxy/image-20240326164141400.png differ diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/current/3-user-guide/5-best-production-practices/assets/1-one-pass-with-proxy/image-20240326165247891.png b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/3-user-guide/5-best-production-practices/assets/1-one-pass-with-proxy/image-20240326165247891.png new file mode 100644 index 00000000..27fffb47 Binary files /dev/null and b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/3-user-guide/5-best-production-practices/assets/1-one-pass-with-proxy/image-20240326165247891.png differ diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/current/3-user-guide/5-best-production-practices/assets/1-one-pass-with-proxy/image-20240326165632158.png b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/3-user-guide/5-best-production-practices/assets/1-one-pass-with-proxy/image-20240326165632158.png new file mode 100644 index 00000000..99053c68 Binary files /dev/null and b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/3-user-guide/5-best-production-practices/assets/1-one-pass-with-proxy/image-20240326165632158.png differ diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/current/3-user-guide/_category_.json b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/3-user-guide/_category_.json new file mode 100644 index 00000000..8f01ba26 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/3-user-guide/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "User Guide" +} diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/current/4-developer-guide/1-contribution-guide/1-non-code-contribute.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/4-developer-guide/1-contribution-guide/1-non-code-contribute.md new file mode 100644 index 00000000..0c723628 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/4-developer-guide/1-contribution-guide/1-non-code-contribute.md @@ -0,0 +1,40 @@ +--- +title: 非代码贡献指南 +--- +你可以用以下任何感兴趣的方式贡献。 + +## 贡献用户示例和 Demo + +* 如果你正在使用 Karpor,最简单的贡献方式就是 [向社区表达感谢](https://github.com/KusionStack/karpor/issues/343)。 + +## 报告漏洞 + +在提交新的 issue 之前,请确保该问题没有被提交过。 + +检查 [Issue 列表](https://github.com/KusionStack/karpor/issues) 是否有类似 issue。 + +通过 [报告漏洞](https://github.com/KusionStack/karpor/issues/new?assignees=&labels=kind%2Fbug&projects=&template=bug-report.yaml) 提交漏洞报告,确保你提供了足够的信息帮助复现该漏洞。 + +遵循下面的 issue 模板并且添加额外信息来帮助我们复现该问题。 + +## 安全性 issue + +如果你确信发现了安全漏洞,请阅读我们的 [安全策略](https://github.com/KusionStack/karpor/blob/main/SECURITY.md) 获取更多细节。 + +## 提议增强特性 + +如果你有提升 Karpor 的好点子,请提交 [特性请求](https://github.com/KusionStack/karpor/issues/new?assignees=&labels=kind%2Ffeature&projects=&template=enhancement.yaml)。 + +## 回答问题 + +如果你有疑问并且在 [文档](https://www.kusionstack.io/karpor/) 中找不到答案,下一步是在 [GitHub 论坛](https://github.com/KusionStack/karpor/discussions) 中提问。 + +帮助这些用户对我们很重要,我们很需要你的帮助。你可以通过回答 [他们的问题](https://github.com/KusionStack/karpor/discussions) 来帮助其他的 Karpor 用户。 + +## 贡献文档 + +贡献文档需要一些提交 pull request 到 Github 的知识,按照下面的指南这将会非常简单。 + +* [kusionstack.io 开发者指南](https://github.com/KusionStack/kusionstack.io/blob/main/README.md) + +查看 [开源指南](https://opensource.guide/how-to-contribute/) 获取更多贡献方式。 diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/current/4-developer-guide/1-contribution-guide/2-code-contribute.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/4-developer-guide/1-contribution-guide/2-code-contribute.md new file mode 100644 index 00000000..1e54bffa --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/4-developer-guide/1-contribution-guide/2-code-contribute.md @@ -0,0 +1,174 @@ +--- +title: 代码贡献指南 +--- +在本代码贡献指南,你将会了解下列内容: + +- [如何在本地运行 Karpor](#%E5%A6%82%E4%BD%95%E5%9C%A8%E6%9C%AC%E5%9C%B0%E8%BF%90%E8%A1%8C-karpor) +- [如何创建拉取请求(pull request)](#%E5%88%9B%E5%BB%BA%E6%8B%89%E5%8F%96%E8%AF%B7%E6%B1%82pull-request) +- [代码审查指导规则](#%E4%BB%A3%E7%A0%81%E5%AE%A1%E6%9F%A5) +- [Pull request 格式指南](#pull-request-%E6%A0%BC%E5%BC%8F%E6%8C%87%E5%8D%97) +- [更新文档和网站](#%E6%9B%B4%E6%96%B0%E6%96%87%E6%A1%A3%E5%92%8C%E7%BD%91%E7%AB%99) + +## 如何在本地运行 Karpor + +本指南将会帮助你开始 Karpor 开发。 + +### 前提条件 + +* Golang 版本 1.22+ + +
+ 安装 Golang + +1. 从 [官方网站](https://go.dev/dl/) 安装 golang 1.22+。解压二进制文件并放置到某个位置,假设该位置是 home 目录下的 `~/go/`,下面是一个示例命令,你应当选择适合你系统的正确二进制文件。 + +``` +wget https://go.dev/dl/go1.22.5.linux-amd64.tar.gz +tar xzf go1.22.5.linux-amd64.tar.gz +``` + +如果你想在本地开发环境维护多个 golang 版本,你可以下载包并解压到某个位置,比如 `~/go/go1.22.1`,然后根据下面的命令相应地改变路径。 + +1. 为 Golang 设置环境变量 + +``` +export PATH=~/go/bin/:$PATH +export GOROOT=~/go/ +export GOPATH=~/gopath/ +``` + +如果 `gopath` 目录不存在,可以使用 `mkdir ~/gopath` 创建。这些命令将会把 go 二进制文件所在的目录添加到 `PATH` 环境变量 (让其成为 go 命令的优先选择)并且设置 `GOROOT` 环境到该 go 目录。如果将这些行添加到你的 `~/.bashrc` or `~/.zshrc` 文件,你就不用每次打开新的终端时设置这些环境变量。 + +1. (可选) 在一些地区,例如中国大陆,连接到默认的 go 仓库可能会很慢;你可以配置 GOPROXY 来加速下载过程。 + +``` +go env -w GOPROXY=https://goproxy.cn,direct +``` + +
+ +* Kubernetes 版本 v1.20+ ,且配置文件保存在 `~/.kube/config`。 +* golangci-lint 版本 v1.52.2+, 通过运行 `make lint` 可以自动安装,如果自动安装失败,你可以手动安装。 + +
+ 手动安装 golangci-lint + +你可以根据 [安装指南](https://golangci-lint.run/welcome/install)手动安装,或者使用以下命令: + +``` +cd ~/go/ && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s v1.52.2 +``` + +
+ +### 构建 + +- 克隆项目 + +```shell +git clone git@github.com:KusionStack/karpor.git +``` + +- 本地构建 + +执行 `make build-all` 将会为所有平台创建可执行文件;如果你只想为特定平台构建,执行 `make build-${PlatformName}`,例如 `make build-darwin`。查看所有可用命令,执行 `make help`。 + +### 测试 + +为了保证代码质量,编写测试代码是必不可少的,你可以在项目根目录运行以下命令执行单元测试: + +```shell +make test +``` + +如果你需要生成额外的覆盖率报告,执行: + +```shell +make cover +``` + +接下来你可以执行下列命令,来从浏览器中阅读测试覆盖率报告: + +```shell +make cover-html +``` + +## 创建拉取请求(Pull Request) + +我们很高兴你考虑为 Karpor 项目作出贡献! + +本文档将会指导你完成 [创建拉取请求](./index.md#contribute-a-pull-request) 的过程。 + +### 在你开始之前 + +我们知道你对于创建第一个 pull request 非常兴奋。在我们开始之前,请确保你的代码符合相关的 [代码规约](../2-conventions/2-code-conventions.md)。 + +### 你的第一个 Pull Request + +在提交你的 PR 之前,运行下面的命令并确保它们都成功了: + +``` +make test +make lint +``` + +如果这是你第一次向 Github 上的开源项目贡献,请确保你已经阅读了 [创建拉取请求](https://docs.github.com/zh/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/creating-a-pull-request)。 + +为了提高你的 pull request 被接受的机会,请确保你的 pull rquest 符合以下指导规则: + +- 标题和描述与实现相符。 +- pull request 中的所有 commit 都符合 [格式指南](#Formatting-guidelines)。 +- pull request 会关闭一个相关 issue。 +- pull request 包含了必要的测试,以验证预期行为。 +- 如果你的 pull request 有冲突,请将你的分支 rebase 到 main 分支。 + +如果 pull request 修复了一个漏洞: + +- pull request 的描述中必须包含 `Closes #` 或 `Fixes #`。 +- 为了避免回归问题,pull request 必须包含验证该漏洞被修复的测试。 + +## 代码审查 + +一旦你创建了一个 pull requset,下一步就是让其他人审查你的改动。代码审查对审查者和 pull request 作者都是很好的学习机会。 + +如果你觉得某个特定的人应当审查你的 pull request,你可以在描述或评论中标记他们。 +通过输入 `@` 符号和其用户名来标记用户。 + +我们建议你阅读 [如何进行代码审查](https://google.github.io/eng-practices/review/reviewer/) 来了解更多关于代码审查的知识。 + +## Pull request 格式指南 + +精心编写的 pull request 可以最大程度地缩短你的更改被接受的时间。这些指南将帮助你为 pull requset 编写条理清晰的提交消息和说明。 + +### Commit 信息格式 + +了解更多:[Commit 规约](../2-conventions/4-commit-conventions.md) + +### Pull Request 标题 + +在接受 pull request 时,Karpor 团队会将所有的 commit 合并为一个。 + +Pull request 的标题将会成为合并后的 commit 信息的描述。 + +我们仍然鼓励贡献者撰写详细的 commit 信息,因为它们将会作为 git commit 正文的一部分。 + +我们在生成发布更新日志时将会使用 pull request 的标题。因此,我们会努力使标题尽可能具有信息量。 + +确保你的 pull request 标题使用与 commit 信息相同的格式。如果不遵循该格式,我们将会在该 pull request 添加 `title-needs-formatting` 标签。 + +### 通过所有 CI 检查 + +在合并之前,所有的测试 CI 都应该通过: + +- 覆盖率不应该下降。当前,pull request 的覆盖率应当至少为 60%。 +- Karpor 使用 **CLA** 作为贡献者协议。它要求你在第一次合并 pull request 之前签署。 + +## 更新文档和网站 + +如果你的 pull request 被合并了,而且它引入了新的特性或增强,你需要更新文档并且提交 pull requset 到 [kusionstack.io](https://github.com/KusionStack/kusionstack.io) 仓库。 + +根据下面的文档了解如何编写文档: + +- [kusionstack.io 开发者指南](https://github.com/KusionStack/kusionstack.io/blob/main/README.md) + +太棒了,你已经完成了代码贡献的整个生命周期! diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/current/4-developer-guide/1-contribution-guide/3-roles.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/4-developer-guide/1-contribution-guide/3-roles.md new file mode 100644 index 00000000..206d98ea --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/4-developer-guide/1-contribution-guide/3-roles.md @@ -0,0 +1,36 @@ +--- +title: 角色 +--- +感谢您对本开源项目的关注和支持!本文档将阐述贡献者在项目中的角色、职责以及如何从 Contributor 升级为 Maintainer,以及 Maintainer 降级为 Contributor 的规则。我们希望通过这份文档,让每位贡献者都能清楚地了解自己的成长路径,并为项目的发展做出更大的贡献。 + +## 贡献者角色及职责 + +在本开源项目中,我们主要设有两个贡献者角色:Contributor 和 Maintainer。 +以下是对这两个角色的简要介绍: + +1. Contributor:项目的贡献者,可以是代码贡献者、文档贡献者、测试贡献者等。Contributor 为项目提供了宝贵的资源,帮助项目不断完善和发展。 +2. Maintainer:项目的维护者,负责项目的日常维护工作,包括审查和合并 PR、处理 Issue、发布版本等。Maintainer 是项目的核心成员,对项目的发展方向和决策具有重要的影响力。 + +## Contributor 升级为 Maintainer + +我们非常欢迎每位 Contributor 为项目的发展做出贡献,并鼓励 Contributor 向 Maintainer 的角色发展。 +以下是从 Contributor 升级为 Maintainer 的条件: + +1. 持续贡献:Contributor 需要在一段时间内(例如 3 个月)持续为项目贡献代码、文档或其他资源。这表明 Contributor 对项目的关注度和热情。 +2. 质量保证:Contributor 提交的代码或文档等资源需要保持较高的质量,符合项目的规范要求,并对项目产生积极的影响。 +3. 积极参与:Contributor 需要积极参与到项目的讨论和决策中来,为项目的发展提供建设性的意见和建议。 +4. 团队协作:Contributor 需要具备良好的团队协作精神,能够与其他贡献者和 Maintainer 友好沟通,共同解决问题。 +5. 责任担当:Contributor 需要具备一定的责任心,愿意承担项目维护的部分工作,包括审查 PR、处理 Issue 等。 + +当 Contributor 满足以上条件时,现有的 Maintainer 将会对其进行评估,如果达到 Maintainer 的要求,将会邀请其成为新的 Maintainer。 + +## Maintainer 降级为 Contributor + +Maintainer 在项目中承担了重要的职责,我们希望每位 Maintainer 都能够保持对项目的关注和热情。 +然而,我们也理解每个人的时间和精力是有限的,因此,当 Maintainer 无法继续履行职责时,将会降级为 Contributor: + +1. 长时间不活跃:如果 Maintainer 在一段时间内(例如 3 个月)没有参与项目的维护工作,包括审查 PR、处理 Issue 等,将被视为不活跃。 +2. 质量问题:如果 Maintainer 在项目中的工作出现严重的质量问题,导致项目的发展受到影响,将被视为不符合 Maintainer 的要求。 +3. 团队协作问题:如果 Maintainer 在与其他贡献者和 Maintainer 的协作过程中出现严重的沟通问题或团队协作问题,如不尊重他人意见、频繁产生冲突、拒绝协作等,影响到项目的正常运作和氛围,将被视为不符合 Maintainer 的要求。 +4. 违反规定:如果 Maintainer 违反了项目的规定或行为准则,包括但不限于泄露敏感信息、滥用权限等,将被视为不符合 Maintainer 的要求。 +5. 主动申请:如果 Maintainer 由于个人原因无法继续履行职责,可以主动申请降级为 Contributor。 diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/current/4-developer-guide/1-contribution-guide/_category_.json b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/4-developer-guide/1-contribution-guide/_category_.json new file mode 100644 index 00000000..09eab23b --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/4-developer-guide/1-contribution-guide/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Contribution Guide" +} diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/current/4-developer-guide/1-contribution-guide/index.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/4-developer-guide/1-contribution-guide/index.md new file mode 100644 index 00000000..57c4cca4 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/4-developer-guide/1-contribution-guide/index.md @@ -0,0 +1,117 @@ +# 贡献指南 + +贡献指南介绍了如何参与社区发展和向社区贡献。 + +为了帮助我们为所有人建立安全和积极的社区体验,我们要求所有的参与者遵守 CNCF 社区 [行为准则](https://github.com/cncf/foundation/blob/main/code-of-conduct-languages/zh.md)。 + +## 开始贡献之前 + +### 找到一个贡献点 + +有多种方式对 Karpor 贡献,包括代码和非代码贡献,我们对任何人对社区的任何方式的努力都非常感谢。 + +这里是一些示例: + +* 贡献代码仓库和文档。 +* 报告和分类 issue。 +* 在你的地区组织会议和用户群组。 +* 回答 Karpor 相关问题帮助别人。 + +并且: + +- 如果你不知道如何开始,我们准备了一份 [新手任务清单 | Community tasks 🎖︎](https://github.com/KusionStack/karpor/issues/463),或者你可以通过 issue 跟踪器过滤 [help wanted | 需要帮助](https://github.com/KusionStack/karpor/issues?q=is%3Aopen+is%3Aissue+label%3A%22help+wanted%22) 或 [good first issue | 新手任务](https://github.com/KusionStack/karpor/issues?q=is%3Aopen+is%3Aissue++label%3A%22good+first+issue%22) 标签. 你可以从任何感兴趣的 issue 开始。 +- 如果你有任何问题,欢迎 [提交 Issue](https://github.com/KusionStack/karpor/issues/new/choose) 或者 [发帖讨论](https://github.com/KusionStack/karpor/discussions/new/choose),我们会尽快回答。 + +### 如何进行非代码贡献 + +我们认为对社区存续和未来发展而言,非代码贡献和代码贡献同样重要。 + +- 参考 [非代码贡献指南](./non-code-contribute) 获取更多细节 + +### 如何进行代码贡献 + +不确定从哪里开始向 Karpor 代码库贡献?可以从浏览带有 `good first issue` 或 `help wanted` 标签的 issue 开始。 + +- [Good first issue | 新手任务](https://github.com/KusionStack/karpor/labels/good%20first%20issue) 通常很容易解决的任务。 +- [Help wantet | 需要帮助](https://github.com/KusionStack/karpor/labels/help%20wanted) 和复杂程度无关, 我们希望能够在社区解决的问题。 +- 参考 [代码贡献指南](./code-contribute) 获取更多细节。 + +学习 [代码规约](../conventions/code-conventions) 和 [测试规约](../conventions/test-conventions),并了解在写代码时要注意的地方。 + +然后阅读 [发布流程与节奏指南](../conventions/release-process),了解你的代码什么时候会发布。 + +## 贡献一个拉取请求(Pull Request) + +在打开或者认领 issue 之后,你可以通过提交一个拉取请求(Pull Request)为 karpor 进行代码或非代码贡献。这里是你应该遵循的一些步骤: + +### Fork 仓库 + +Karpor 遵循主干开发模式,也就是说,用于发布的代码维护在 main 分支。 + +那么,为了开发 Karpor,你需要从 [karpor](https://github.com/KusionStack/karpor) Fork 一个项目到你自己的工作空间,然后检出一个新的分支用于开发代码。 + +### 开发代码和非代码 + +现在你可以开始解决 issue 。为了维护 Karpor 的代码质量,提交 PR 之后,一些必要的检查会被触发。 + +开发结束之后,你需要 commit 代码然后将代码 push 到你 fork 出的仓库。由于 PR 的标题将作为 commit message,你的 PR 标题需要符合 [commit 规约](../2-conventions/4-commit-conventions.md)。 + +以下是一些简单的解释: + +PR 的标题需要按照以下结构组织: + +``` +<类型>[可选 范围]: <描述> + +[可选 正文] +``` + +要求中的类型可以帮助更好地确认这次提交的范围,基于 [Angular 指南](https://github.com/angular/angular/blob/22b96b9/CONTRIBUTING.md#-commit-message-guidelines)。 + +我们使用小写的 `<类型>`,以避免在大小写敏感的问题上浪费时间。`<类型>` 可以是以下之一: + +``` +feat: 新特性 +fix: 漏洞修复 +docs: 仅文档改动 +build: 关于构建系统和外部依赖的改动 +style: 不影响代码含义的改动(如空行、格式、缺少分号等) +refactor: 不属于漏洞修复或者增加特性的代码改动 +perf: 提升性能的代码改动 +test: 增加缺少的测试用例或者修正现有的测试用例 +chore: 构建过程或辅助工具和库(如文档生成)的修改 +``` + +### 打开一个拉取请求(Pull Request) + +[打开一个拉取请求(Pull Request)](https://github.com/KusionStack/karpor/pulls):打开一个从你 fork 的仓库的开发分支到 karpor main 分支的拉取请求(Pull Request)。你需要清楚地描述你的 PR 做了什么,并且链接到一个 issue。除此之外,PR 的标题应该按照前面提到的 commit 规约,并且长度在 5-256 个字符之间,不允许使用 `WIP` 和 `[WIP]` 前缀。 + +### 签署贡献者许可协议(Contributor License Agreement,CLA) + +如果这是你的第一个 PR ,你需要签署我们的 [CLA(贡献者许可协议)](https://github.com/KusionStack/.github/blob/main/CLA.md)。 你唯一需要做的事情的是在当前 PR 按以下格式发表评论: + +`I have read the CLA Document and I hereby sign the CLA` + +如果你的 CLA 签署失败了,可能有以下原因: + +* 评论的格式必须与上面完全一致,例如不能有额外的空格、空行等。 +* git commit 的作者和 Karpor PR 的作者必须一致。 + +### PR 检查 + +为了维持 karpor 项目的可靠性,以下检查将会自动触发: + +* 单元测试 +* Golang 代码风格检查 +* Commit 风格检查 +* PR 标题检查 +* 代码许可证检查 +* Markdown 格式检查 + +请确保你的 PR 通过这些检查。 + +## 成为社区成员 + +如果你对成为社区成员感兴趣或者想了解更多关于治理的内容,请查看 [角色](./3-roles.md) 获取更多细节。 + +在 Karpor 的世界中享受编码和协作吧! diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/current/4-developer-guide/2-conventions/1-release-process.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/4-developer-guide/2-conventions/1-release-process.md new file mode 100644 index 00000000..0454cc00 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/4-developer-guide/2-conventions/1-release-process.md @@ -0,0 +1,49 @@ +--- +title: 发布流程和节奏 +--- +## 发布计划 + +我们将通过 [GitHub 里程碑](https://github.com/KusionStack/karpor/milestones) 建立并持续根据发布计划。每个发布里程碑将包括两类任务: + +- Maintainer 承诺完成的任务。Maintainer 会在根据他们的时间和精力承诺下次发布要实现的特性。通常,这些任务会经过离线讨论并添加到里程碑。这些任务会被分配给计划实施和测试它们的 Maintainer。 +- Contributor 提出的额外事项,通常是不紧急的特性和优化。Maintainer 不承诺在当前 release 周期内完成,但承诺会对这些社区提交进行代码审查。 + +里程碑会清晰地描述最终要的特性和期望完成日期。这将清楚地告知终端用户下一版本的发布时间和内容。 + +除了下一次里程碑之外,我们也会维护未来几个发布里程碑的草稿。 + +## 发布标准 + +- 所有的 **官方发布** 都应该在 `main` 分支添加标签,并且携带类似 `alpha`、 `beta`、 `rc` 的可选先行版本后缀,例如,一个通常的官方发布版本可能是 `v1.2.3`、 `v1.2.3-alpha.0`。例如,如果我们想要在发布正式版本 `v1.2.3` 之前进行一些验证,我们可以先发布类似 `v1.2.3-alpha.0` 的先行版本,在验证完成之后再发布 `v1.2.3` 版本。 +- Maintainer 承诺完成特定的特性和增强,由 [GitHub 里程碑](https://github.com/KusionStack/karpor/milestones) 跟踪。 +- 我们会尽可能防止发布延期;如果一个特性无法按时完成,它将会被挪到下次发布。 +- **每月** 发布一个新版本。 + +## 发布标准流程 + +Maintainer 负责推动发布过程并遵循标准操作程序以确保发布的质量。 + +1. 为指定发布的 git commit 添加标签并推到上游;该标签需要满足[语义化版本控制](#%E8%AF%AD%E4%B9%89%E5%8C%96%E7%89%88%E6%9C%AC%E6%8E%A7%E5%88%B6)。 +2. 确保触发的 Github Action 流水线执行成功。一旦成功,将会自动触发一次 Github 发布,其中包括根据提交信息计算出的 Changelog,以及镜像和 tar.gz 文件等制品。 +3. 根据 **Github 发布** 编写清晰的发布说明,包括: + - 用户友好的发布亮点。 + - 已弃用和不兼容的更改。 + - 有关如何安装和升级的简要说明。 + +## 门控测试 + +在创建发布分支之前:我们会有一个 **一周** 的代码冻结期。在这期间,我们将避免合并任何功能 PR,只会修复错误。 + +Maintainer 会负责测试并修复那些在临近发布时间发现的紧急问题。 + +## 语义化版本控制 + +`Karpor` 采用 [语义化版本控制](https://semver.org/lang/zh-CN/) 作为版本号。 + +版本格式为:主版本号.次版本号.修订号,例如, `v1.2.3`。版本号 **递增规则** 如下: + +- 主版本号:当你做了不兼容的 API 修改。 +- 次版本号:当你做了向下兼容的功能性新增。 +- 修订号:当你做了向下兼容的问题修正。 + +**先行版本号及版本编译信息** 可以作为加到“主版本号.次版本号.修订号”的后面,作为延伸,比如 `v1.2.3-alpha.0`, `v1.2.3-beta.1`, `v1.2.3-rc.2`, 其中 `-alpha.0`, `-beta.1`, `-rc.2` 是先行版本号。 diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/current/4-developer-guide/2-conventions/2-code-conventions.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/4-developer-guide/2-conventions/2-code-conventions.md new file mode 100644 index 00000000..e30bab57 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/4-developer-guide/2-conventions/2-code-conventions.md @@ -0,0 +1,79 @@ +--- +title: 代码规约 +--- +在这部分,你将会了解 Karpor 项目中所有类型的代码规约。不必一次把这些规则全部了解,确保你在编写代码前阅读对应的部分就可以了。 + +- [Go 代码规约](#go-%E4%BB%A3%E7%A0%81%E8%A7%84%E7%BA%A6) +- [Bash 或脚本规约](#bash-%E6%88%96%E8%84%9A%E6%9C%AC%E8%A7%84%E7%BA%A6) +- [目录和文件规约](#%E7%9B%AE%E5%BD%95%E5%92%8C%E6%96%87%E4%BB%B6%E8%A7%84%E7%BA%A6) +- [Linting 和格式化](#linting-%E5%92%8C%E6%A0%BC%E5%BC%8F%E5%8C%96) + +## Go 代码规约 + +- [Go 代码评审评论](https://go.dev/wiki/CodeReviewComments) +- [高效的 Go](https://golang.org/doc/effective_go.html) +- 了解并且避免 [Go 地雷](https://gist.github.com/lavalamp/4bd23295a9f32706a48f) +- 为你的代码编写注释. + + - [Go's 注释规约](https://go.dev/blog/godoc) + - 如果代码评阅者询问你代码为什么要这么实现,这可能说明你应当为你的代码编写注释。 +- 命令行标志应该用双连接号 `--`,而不是下划线 +- 接口 + + - 根据 RFC3986,URL 是大小写敏感的。Karpor 使用“短横线命名(`kebab-case`)”作为 URL。 + - 例如:`POST /rest-api/v1/resource-group-rule` +- 命名 + + - 为接口选择名称时请考虑包名称,避免冗余。 + + - 例如: `storage.Interface` 优于 `storage.StorageInterface`。 + - 不要在包名称中使用大写字符、下划线和破折号。 + - 选择包名称时,请考虑父目录名称。 + + - 所有 `pkg/manager/cluster/foo.go` 应该命名为 `package cluster` + 而不是 `package clustermanager`。 + - 除非有充分理由,`package foo` 行应该与 .go 文件所在目录的名称相同。 + - 为了避免歧义,导入包时可以指定别名。 + - 锁应该被称为 `lock`,并且永远不应该被嵌入(总是以 `lock sync.Mutex` 的形式明确声明)。当存在多个锁时,应该遵循 Go 的命名约定给每个锁一个 buts 的名称 - `stateLock`,`mapLock` 等。 + +## Bash 或脚本规约 + +- [Shell 样式指南](https://google.github.io/styleguide/shell.xml) +- 确保构建、发布、测试和集群管理脚本可以在 macOS 上运行 + +## 目录和文件规约 + +- 避免软件包无序扩展。为新的包找到合适的子目录。 + + - 没有更合适放置位置的新包应该放在 `pkg/util` 下的子目录。 +- 避免使用通用包。使用名为 `util` 的包让人疑惑。相反地,应当根据你期望的功能推导出包名 + 例如,处理等待操作的使用功能位于 `wait` 包中,包括类似 Poll 这样的功能,所以完整名称是 `wait.Poll` +- 所有的文件名都应该是小写 +- Go 源码文件名和目录名中使用 `_`,而不是 `-` + + - 包目录名通常应当尽量避免使用分隔符(当包目录名含多个单词时,它们通常应该被放在嵌套的子目录) +- 文档的文件名和目录名中应该使用 `-`,而不是 `_` +- 用于说明系统特性的示例应该位于 `/docs/user-guide` 或 `/docs/admin`, 取决于它是主要面向部署应用的用户还是集群管理员。实际的应用示例应位于 `/example` 中 + + - 示例还应该展示 [配置和使用系统的最佳实践](https://kubernetes.io/docs/concepts/configuration/overview/) +- 第三方代码 + + - 普通的第三方依赖 Go 代码 由 [go modules](https://github.com/golang/go/wiki/Modules) 管理 + - 其他的第三方代码应该放在 `/third_party` 目录下 + + - fork 的第三方 Go 代码放在 `/third_party/forked` 目录下 + - fork 的_golang stdlib_ 代码放在 `/third_party/forked/golang` 目录下 + - 第三方代码必须包含许可证 + - 这也包括修改过的第三方代码和引用 + +## Linting 和格式化 + +为了确保 Go 代码库之间的一致性,我们要求所有代码通过多个 linter 检查。 + +要运行所有的 linter,请使用 `lint` 作为 Makefile 目标: + +```shell +make lint +``` + +该命令将清理代码并进行一些 lint 检查。检查结束后请记得检查所有变更。 diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/current/4-developer-guide/2-conventions/3-test-conventions.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/4-developer-guide/2-conventions/3-test-conventions.md new file mode 100644 index 00000000..892c7f9e --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/4-developer-guide/2-conventions/3-test-conventions.md @@ -0,0 +1,266 @@ +--- +title: 测试规约 +--- +## 测试原则 + +在 Karpor 中,我们主要关注以下三种测试: + +- 单元测试:主要关注 **最小可测试单元**(例如函数,方法,实用类等) +- 集成测试:针对 **多个单元(或模块)**间相互作用和集成的测试 +- 端到端测试(e2e tests): 针对 **整个系统行为** 的测试,通常需要模拟真实用户场景。 + +每种测试都有优势,劣势和适用场景。为了实现更好的开发体验,我们在编写测试时应遵循以下原则。 + +**测试原则**: + +- 单个测试用例应该仅覆盖单个场景 +- 遵循 **7-2-1 原则**,即 70% 的单元测试,20% 的集成测试和 10% 的端到端测试 +- **非必要情况下,避免在单元测试中使用框架**(比如 `golang/mock`)。如果你觉得需要在单元测试中使用 mock 框架,那么你可能应该实现的是集成测试甚至端到端测试。 + +## 技术选择 + +在当前时间点,在 Go 语言生态中最流行的测试框架主要有 [Ginkgo](https://onsi.github.io/ginkgo/)/[Gomega](https://onsi.github.io/gomega/) 和 [Testify](https://github.com/stretchr/testify)。因此,本节主要讨论这两个测试框架的的特点、优缺点以及最终的选择。 + +### Ginkgo/Gomega + +**优点**: + +1. **BDD 支持**:Ginkgo 因为支持行为驱动开发(Behavior-Driven Development,BDD)风格而收到许多开发人员的青睐。它提供了丰富的 DSL 语法,通过 `Describe`、`Context`、`It` 等关键字使测试用例更具描述性和可读性。 +2. **并行执行**:Ginkgo 能够以多进程并行执行测试,提高了测试执行的效率。 +3. **丰富的匹配器**:与 Gomega 匹配器库结合使用时,它提供了丰富的断言能力,使测试更加直观和方便。 +4. **异步支持**:Ginkgo 提供了对处理复杂异步场景的原生支持,降低了死锁和超时的风险。 +5. **测试用例组织**:支持将测试用例组织到测试套件中,便于管理和扩展。 + +**缺点**: + +1. **学习曲线过于陡峭**:对不熟悉 BDD 测试框架的开发者来说,Ginkgo 的 DSL 语法可能需要一些时间熟悉。 +2. **并行测试的复杂性**:尽管 Ginkgo 支持并行测试,但是管理用于并行测试的资源和环境可能会引入额外的复杂性。 + +### Testify + +**优点**: + +1. **简化的接口 API**: Testify 提供了简单明了的 API,容易上手,特别是对于熟悉了 `testing` 包的开发者。 +2. **Mock 支持**: 提供了强大的 Mock 功能,便于模拟依赖和接口进行单元测试。 +3. **表格驱动测试**: 支持表格驱动测试,允许测试同一个函数使用各种不同输入和预期输出,增强了测试用例的可重用性。 +4. **与 `testing` 包的兼容性**: Testify 与 Go 标准库的 testing 包高度兼容,可以无缝集成到现有的测试工作流中。 +5. **文档**: Testify 的 [官方文档](https://pkg.go.dev/github.com/stretchr/testify) 也提供了丰富的介绍,如何使用其断言和 Mock 功能。 + +**缺点**: + +1. **缺少 BDD 支持**: Testify 不支持 BDD 风格,对于寻求提高测试用例可读性的开发人员可能直观性较差。 +2. **功能相对简单**: 与 Ginkgo 相比,Testify 的功能相对简单,可能不满足一些复杂测试场景的需求。 + +### 总结 + +简而言之,Ginkgo/Gomega 提供了更好的可读性和可维护性,产生清晰明了的测试代码,但需要熟悉 BDD 风格,学习曲线比较陡峭。Testify 更简单、更实用,学习曲线较为平缓,但随着时间的推移,测试代码风格可能变得更加多样化,降低可维护性。 + +考虑到 Karpor 的实际情况和两种框架的优缺点,我们决定结合使用这两个框架: + +- 使用 Testify 进行单元测试,坚持使用 [表格驱动测试](https://go.dev/wiki/TableDrivenTests) 来约束代码风格,防止退化; +- 利用 Ginkgo 的 BDD 特性编写更高级别的集成和端到端测试; + +这种组合充分发挥了两种框架的优势,提高了测试的整体效率、可读性和质量。 + +## 编写规范 + +### 测试风格 + +[表格驱动测试](https://go.dev/wiki/TableDrivenTests) 是编写测试用例的最佳实践,类似于编程中的设计模式,它也是官方 Go 语言推荐的风格。表格驱动测试使用表格提供各种输入和预期输出,允许同一个测试函数验证不同的场景。这种方法的优点是它增加了测试用例的可重用性,减少了重复代码,并使测试更加清晰易于维护。 + +虽然 Go 的 `testing` 包中没有直接支持表格驱动测试的语法,但可以通过编写辅助函数和使用匿名函数来模拟实现。 + +这是一个在 Go 标准库的 `fmt` 包中实现的表格驱动测试的示例: + +```go +var flagtests = []struct { + in string + out string +}{ + {"%a", "[%a]"}, + {"%-a", "[%-a]"}, + {"%+a", "[%+a]"}, + {"%#a", "[%#a]"}, + {"% a", "[% a]"}, + {"%0a", "[%0a]"}, + {"%1.2a", "[%1.2a]"}, + {"%-1.2a", "[%-1.2a]"}, + {"%+1.2a", "[%+1.2a]"}, + {"%-+1.2a", "[%+-1.2a]"}, + {"%-+1.2abc", "[%+-1.2a]bc"}, + {"%-1.2abc", "[%-1.2a]bc"}, +} +func TestFlagParser(t *testing.T) { + var flagprinter flagPrinter + for _, tt := range flagtests { + t.Run(tt.in, func(t *testing.T) { + s := Sprintf(tt.in, &flagprinter) + if s != tt.out { + t.Errorf("got %q, want %q", s, tt.out) + } + }) + } +} +``` + +值得注意的是,目前大部分主流 IDE 都已经集成了 [gotests](https://github.com/cweill/gotests),可以自动生成表格驱动风格的 Go 单元测试,相信这可以提升大家编写单元测试的效率: + +- [GoLand](https://blog.jetbrains.com/go/2020/03/13/test-driven-development-with-goland/) +- [Visual Studio Code](https://juandes.com/go-test-vsc/) + +### 文件命名 + +- **规范内容**:测试函数必须以 `Test` 开头,后面跟着被测试函数的名称,使用驼峰式命名法。 +- **正面示例**:`xxx_test.go` +- **反面示例**:`testFile.go`、`test_xxx.go` + +### 测试函数命名 + +- **规范内容**:测试函数名必须以 `Test` 开头,后面跟着被测试函数的名称,使用驼峰式命名法。 +- **正面示例**: + ```go + func TestAdd(t *testing.T) { + // 测试逻辑 ... + } + ``` +- **反面示例**: + ```go + func TestAddWrong(t *testing.T) { + // 测试逻辑 ... + } + ``` + +### 测试函数签名 + +- **规范内容**:测试函数签名必须是 `func TestXxx(t *testing.T)` 形式,其中 `t` 是类型为 `*testing.T` 的对象,并且不应该有其他的参数和返回值。 +- **正面示例**: + ```go + func TestSubtraction(t *testing.T) { + // 测试逻辑 ... + } + ``` +- **反面示例**: + ```go + func TestSubtraction(value int) { + // 测试逻辑 ... + } + ``` + +### 测试组织 + +- **规范内容**:测试用例应当相互独立,避免测试之间相互影响;使用子测试 `t.Run` 来组织复杂的测试场景。 +- **正面示例**: + ```go + func TestMathOperations(t *testing.T) { + t.Run("Addition", func(t *testing.T) { + // addition 的测试逻辑 ... + }) + t.Run("Subtraction", func(t *testing.T) { + // subtraction 的测试逻辑 ... + }) + } + ``` +- **反面示例**: + ```go + func TestMathOperations(t *testing.T) { + // 混合 addition 和 subtraction 的测试逻辑 ... + } + ``` + +### Test Coverage + +- **规范内容**:需要注意测试覆盖率,使用 `go test -cover` 命令检查代码的测试覆盖率。 +- **正面示例**: + + ```shell + $ go test -cover + ``` +- **反面示例**: + + ```shell + $ go test # 不检查测试覆盖率 + ``` +- **注意**: Karpor 将此命令包装为 `make cover`,它将在命令行中输出每个包的覆盖率和总覆盖率。如果你想在浏览器中查看覆盖率报告,请执行 `make cover-html`。 + +### 性能测试 + +- **规范内容**:性能测试应当以 `Benchmark` 开头,并且接受 `*testing.B` 类型的参数,专注于性能测试。 +- **正面示例**: + ```go + func BenchmarkAdd(b *testing.B) { + for i := 0; i < b.N; i++ { + add(1, 1) + } + } + ``` +- **反面示例**: + ```go + func BenchmarkAddWrong(b *testing.B) { + for i := 0; i < 1000; i++ { + add(1, 1) + } + } + ``` + +### 并发测试 + +- **规范内容**:对于并发代码,应该编写适当的测试用例,以确保并发逻辑的正确性。 +- **正面示例**: + ```go + func TestConcurrentAccess(t *testing.T) { + // 设置并发环境 ... + // 并发访问测试逻辑 ... + } + ``` +- **反面示例**: + ```go + func TestConcurrentAccess(t *testing.T) { + // 仅测试单线程逻辑... + } + ``` + +### 测试帮助函数 + +- **规范内容**:可以在测试文件中定义辅助函数来帮助设置测试环境或清理资源。 +- **正面示例**: + ```go + func setupTest(t *testing.T) { + // 设置测试环境 ... + } + + func tearDownTest(t *testing.T) { + // 清理资源 ... + } + + func TestMyFunction(t *testing.T) { + t.Run("TestSetup", func(t *testing.T) { + setupTest(t) + // 测试逻辑 ... + }) + } + ``` +- **反面示例**: + ```go + // 直接在测试中设置环境和清理资源 + func TestMyFunction(t *testing.T) { + // 设置测试环境 ... + // 测试逻辑 ... + // 清理资源 ... + } + ``` + +### 避免使用全局变量 + +- **规范内容**: 尽量避免在测试中使用全局变量以确保测试独立。 +- **正面示例**: 在测试函数内部声明并使用必要的变量。 +- **反面示例**: 在测试文件的顶部声明全局变量。 + +### 清晰的错误信息 + +- **规范内容**: 当测试失败时,输出清晰易懂的错误信息,帮助开发人员定位问题。 +- **正面示例**: + - `t.Errorf("Expected value %d, but got %d", expected, real)` +- **反面示例**: + - `t.Errorf("Error occurred")` + - `fmt.Println("Error occurred")` + - `panic("Error occurred")` diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/current/4-developer-guide/2-conventions/4-commit-conventions.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/4-developer-guide/2-conventions/4-commit-conventions.md new file mode 100644 index 00000000..979b7631 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/4-developer-guide/2-conventions/4-commit-conventions.md @@ -0,0 +1,71 @@ +--- +title: Commit 规约 +--- +## Commit 信息结构 + +Karpor 遵循 [约定式提交](https://www.conventionalcommits.org/zh-hans/v1.0.0/)。 + +Commit 信息应当组织为以下结构: + +``` +<类型>[可选 范围]: <描述> + +[可选 正文] +``` + +## 示例 + +带范围的 Commit 信息: + +``` +feat(lang): add polish language +``` + +不带正文的 Commit 信息: + +``` +docs: correct spelling of CHANGELOG +``` + +带多个正文段落的 Commit 信息: + +``` +fix: correct minor typos in code + +see the issue for details + +on typos fixed. + +reviewed-by: Z +refs #133 +``` + +## `<类型>`(必须) + +必须填写的类型有助于更容易确定这次提交的范围,基于 [Angular 指南](https://github.com/angular/angular/blob/22b96b9/CONTRIBUTING.md#-commit-message-guidelines)。 + +我们在 `<类型>` 使用小写,以避免花费时间在大小写敏感问题上。`<类型>` 可以是以下之一: + +- **feat**:新特性 +- **fix**:漏洞修复 +- **docs**:仅文档改动 +- **build**:关于构建系统和外部依赖的改动 +- **style**:不影响代码含义的改动(如空行、格式、缺少分号等) +- **refactor**:不属于漏洞修复或者增加特性的代码改动 +- **perf**:提升性能的代码改动 +- **test**: 增加缺少的测试用例或者修正现有的测试用例 +- **chore**: 构建过程或辅助工具和库(如文档生成)的修改 + +## `<范围>`(可选) + +范围是可选的,可以包含在括号中为类型提供更多的上下文信息。可以指定这次 commit 的任何内容。Github issue 也是有效的范围,例如 `fix(ui)`、`feat(api)`、`fix(#233)` 等。 + +当改动影响多个范围时,可以使用 `*`。 + +## `<描述>`(必须) + +描述必须紧跟在类型/范围前缀后面的冒号和空格。它是代码更改的简明摘要,例如 `fix: array parsing issue when multiple spaces were contained in string`,而不是 `fix: bug`。 + +## `<正文>`(可选) + +在简短的描述后可以添加较长的正文,提供有关代码更改的更多上下文信息。正文必须位于描述之后一行。 diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/current/4-developer-guide/2-conventions/_category_.json b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/4-developer-guide/2-conventions/_category_.json new file mode 100644 index 00000000..3287fa06 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/4-developer-guide/2-conventions/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Conventions" +} diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/current/4-developer-guide/_category_.json b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/4-developer-guide/_category_.json new file mode 100644 index 00000000..8de262b6 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/4-developer-guide/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Developer Guide" +} diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/current/5-references/1-cli-commands/1-karpor.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/5-references/1-cli-commands/1-karpor.md new file mode 100644 index 00000000..891809d7 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/5-references/1-cli-commands/1-karpor.md @@ -0,0 +1,230 @@ +--- +title: karpor +--- +### Synopsis + +Launch an API server + +``` +karpor [flags] +``` + +### Options + +``` + --admission-control-config-file string File with admission control configuration. + --advertise-address ip The IP address on which to advertise the apiserver to members of the cluster. This address must be reachable by the rest of the cluster. If blank, the --bind-address will be used. If --bind-address is unspecified, the host's default interface will be used. + --anonymous-auth Enables anonymous requests to the secure port of the API server. Requests that are not rejected by another authentication method are treated as anonymous requests. Anonymous requests have a username of system:anonymous, and a group name of system:unauthenticated. (default true) + --api-audiences strings Identifiers of the API. The service account token authenticator will validate that tokens used against the API are bound to at least one of these audiences. If the --service-account-issuer flag is configured and this flag is not, this field defaults to a single element list containing the issuer URL. + --audit-log-batch-buffer-size int The size of the buffer to store events before batching and writing. Only used in batch mode. (default 10000) + --audit-log-batch-max-size int The maximum size of a batch. Only used in batch mode. (default 1) + --audit-log-batch-max-wait duration The amount of time to wait before force writing the batch that hadn't reached the max size. Only used in batch mode. + --audit-log-batch-throttle-burst int Maximum number of requests sent at the same moment if ThrottleQPS was not utilized before. Only used in batch mode. + --audit-log-batch-throttle-enable Whether batching throttling is enabled. Only used in batch mode. + --audit-log-batch-throttle-qps float32 Maximum average number of batches per second. Only used in batch mode. + --audit-log-compress If set, the rotated log files will be compressed using gzip. + --audit-log-format string Format of saved audits. "legacy" indicates 1-line text format for each event. "json" indicates structured json format. Known formats are legacy,json. (default "json") + --audit-log-maxage int The maximum number of days to retain old audit log files based on the timestamp encoded in their filename. + --audit-log-maxbackup int The maximum number of old audit log files to retain. Setting a value of 0 will mean there's no restriction on the number of files. + --audit-log-maxsize int The maximum size in megabytes of the audit log file before it gets rotated. + --audit-log-mode string Strategy for sending audit events. Blocking indicates sending events should block server responses. Batch causes the backend to buffer and write events asynchronously. Known modes are batch,blocking,blocking-strict. (default "blocking") + --audit-log-path string If set, all requests coming to the apiserver will be logged to this file. '-' means standard out. + --audit-log-truncate-enabled Whether event and batch truncating is enabled. + --audit-log-truncate-max-batch-size int Maximum size of the batch sent to the underlying backend. Actual serialized size can be several hundreds of bytes greater. If a batch exceeds this limit, it is split into several batches of smaller size. (default 10485760) + --audit-log-truncate-max-event-size int Maximum size of the audit event sent to the underlying backend. If the size of an event is greater than this number, first request and response are removed, and if this doesn't reduce the size enough, event is discarded. (default 102400) + --audit-log-version string API group and version used for serializing audit events written to log. (default "audit.k8s.io/v1") + --audit-policy-file string Path to the file that defines the audit policy configuration. + --audit-webhook-batch-buffer-size int The size of the buffer to store events before batching and writing. Only used in batch mode. (default 10000) + --audit-webhook-batch-max-size int The maximum size of a batch. Only used in batch mode. (default 400) + --audit-webhook-batch-max-wait duration The amount of time to wait before force writing the batch that hadn't reached the max size. Only used in batch mode. (default 30s) + --audit-webhook-batch-throttle-burst int Maximum number of requests sent at the same moment if ThrottleQPS was not utilized before. Only used in batch mode. (default 15) + --audit-webhook-batch-throttle-enable Whether batching throttling is enabled. Only used in batch mode. (default true) + --audit-webhook-batch-throttle-qps float32 Maximum average number of batches per second. Only used in batch mode. (default 10) + --audit-webhook-config-file string Path to a kubeconfig formatted file that defines the audit webhook configuration. + --audit-webhook-initial-backoff duration The amount of time to wait before retrying the first failed request. (default 10s) + --audit-webhook-mode string Strategy for sending audit events. Blocking indicates sending events should block server responses. Batch causes the backend to buffer and write events asynchronously. Known modes are batch,blocking,blocking-strict. (default "batch") + --audit-webhook-truncate-enabled Whether event and batch truncating is enabled. + --audit-webhook-truncate-max-batch-size int Maximum size of the batch sent to the underlying backend. Actual serialized size can be several hundreds of bytes greater. If a batch exceeds this limit, it is split into several batches of smaller size. (default 10485760) + --audit-webhook-truncate-max-event-size int Maximum size of the audit event sent to the underlying backend. If the size of an event is greater than this number, first request and response are removed, and if this doesn't reduce the size enough, event is discarded. (default 102400) + --audit-webhook-version string API group and version used for serializing audit events written to webhook. (default "audit.k8s.io/v1") + --authorization-mode strings Ordered list of plug-ins to do authorization on secure port. Comma-delimited list of: AlwaysAllow,AlwaysDeny,ABAC,Webhook,RBAC,Node. (default [RBAC]) + --authorization-policy-file string File with authorization policy in json line by line format, used with --authorization-mode=ABAC, on the secure port. + --authorization-webhook-cache-authorized-ttl duration The duration to cache 'authorized' responses from the webhook authorizer. (default 5m0s) + --authorization-webhook-cache-unauthorized-ttl duration The duration to cache 'unauthorized' responses from the webhook authorizer. (default 30s) + --authorization-webhook-config-file string File with webhook configuration in kubeconfig format, used with --authorization-mode=Webhook. The API server will query the remote service to determine access on the API server's secure port. + --authorization-webhook-version string The API version of the authorization.k8s.io SubjectAccessReview to send to and expect from the webhook. (default "v1beta1") + --bind-address ip The IP address on which to listen for the --secure-port port. The associated interface(s) must be reachable by the rest of the cluster, and by CLI/web clients. If blank or an unspecified address (0.0.0.0 or ::), all interfaces will be used. (default 0.0.0.0) + --cert-dir string The directory where the TLS certs are located. If --tls-cert-file and --tls-private-key-file are provided, this flag will be ignored. (default "apiserver.local.config/certificates") + --client-ca-file string If set, any request presenting a client certificate signed by one of the authorities in the client-ca-file is authenticated with an identity corresponding to the CommonName of the client certificate. + --contention-profiling Enable lock contention profiling, if profiling is enabled + --cors-allowed-origins strings List of allowed origins for CORS, comma separated. An allowed origin can be a regular expression to support subdomain matching. If this list is empty CORS will not be enabled. (default [.*]) + --delete-collection-workers int Number of workers spawned for DeleteCollection call. These are used to speed up namespace cleanup. (default 1) + --disable-admission-plugins strings admission plugins that should be disabled although they are in the default enabled plugins list (NamespaceLifecycle, MutatingAdmissionWebhook, ValidatingAdmissionPolicy, ValidatingAdmissionWebhook). Comma-delimited list of admission plugins: MutatingAdmissionWebhook, NamespaceLifecycle, ValidatingAdmissionPolicy, ValidatingAdmissionWebhook. The order of plugins in this flag does not matter. (default [MutatingAdmissionWebhook,NamespaceLifecycle,ValidatingAdmissionWebhook,ValidatingAdmissionPolicy]) + --egress-selector-config-file string File with apiserver egress selector configuration. + --elastic-search-addresses strings The elastic search address + --elastic-search-password string The elastic search password + --elastic-search-username string The elastic search username + --enable-admission-plugins strings admission plugins that should be enabled in addition to default enabled ones (NamespaceLifecycle, MutatingAdmissionWebhook, ValidatingAdmissionPolicy, ValidatingAdmissionWebhook). Comma-delimited list of admission plugins: MutatingAdmissionWebhook, NamespaceLifecycle, ValidatingAdmissionPolicy, ValidatingAdmissionWebhook. The order of plugins in this flag does not matter. + --enable-garbage-collector Enables the generic garbage collector. MUST be synced with the corresponding flag of the kube-controller-manager. (default true) + --enable-priority-and-fairness If true and the APIPriorityAndFairness feature gate is enabled, replace the max-in-flight handler with an enhanced one that queues and dispatches with priority and fairness (default true) + --encryption-provider-config string The file containing configuration for encryption providers to be used for storing secrets in etcd + --encryption-provider-config-automatic-reload Determines if the file set by --encryption-provider-config should be automatically reloaded if the disk contents change. Setting this to true disables the ability to uniquely identify distinct KMS plugins via the API server healthz endpoints. + --etcd-cafile string SSL Certificate Authority file used to secure etcd communication. + --etcd-certfile string SSL certification file used to secure etcd communication. + --etcd-compaction-interval duration The interval of compaction requests. If 0, the compaction request from apiserver is disabled. (default 5m0s) + --etcd-count-metric-poll-period duration Frequency of polling etcd for number of resources per type. 0 disables the metric collection. (default 1m0s) + --etcd-db-metric-poll-interval duration The interval of requests to poll etcd and update metric. 0 disables the metric collection (default 30s) + --etcd-healthcheck-timeout duration The timeout to use when checking etcd health. (default 2s) + --etcd-keyfile string SSL key file used to secure etcd communication. + --etcd-prefix string The prefix to prepend to all resource paths in etcd. (default "/registry/karpor") + --etcd-readycheck-timeout duration The timeout to use when checking etcd readiness (default 2s) + --etcd-servers strings List of etcd servers to connect with (scheme://ip:port), comma separated. + --etcd-servers-overrides strings Per-resource etcd servers overrides, comma separated. The individual override format: group/resource#servers, where servers are URLs, semicolon separated. Note that this applies only to resources compiled into this server binary. + --external-hostname string The hostname to use when generating externalized URLs for this master (e.g. Swagger API Docs or OpenID Discovery). + --feature-gates mapStringBool A set of key=value pairs that describe feature gates for alpha/experimental features. Options are: + APIListChunking=true|false (BETA - default=true) + APIPriorityAndFairness=true|false (BETA - default=true) + APIResponseCompression=true|false (BETA - default=true) + APISelfSubjectReview=true|false (ALPHA - default=false) + APIServerIdentity=true|false (BETA - default=true) + APIServerTracing=true|false (ALPHA - default=false) + AggregatedDiscoveryEndpoint=true|false (ALPHA - default=false) + AllAlpha=true|false (ALPHA - default=false) + AllBeta=true|false (BETA - default=false) + AnyVolumeDataSource=true|false (BETA - default=true) + AppArmor=true|false (BETA - default=true) + CPUManagerPolicyAlphaOptions=true|false (ALPHA - default=false) + CPUManagerPolicyBetaOptions=true|false (BETA - default=true) + CPUManagerPolicyOptions=true|false (BETA - default=true) + CSIMigrationPortworx=true|false (BETA - default=false) + CSIMigrationRBD=true|false (ALPHA - default=false) + CSINodeExpandSecret=true|false (ALPHA - default=false) + CSIVolumeHealth=true|false (ALPHA - default=false) + ComponentSLIs=true|false (ALPHA - default=false) + ContainerCheckpoint=true|false (ALPHA - default=false) + CronJobTimeZone=true|false (BETA - default=true) + CrossNamespaceVolumeDataSource=true|false (ALPHA - default=false) + CustomCPUCFSQuotaPeriod=true|false (ALPHA - default=false) + CustomResourceValidationExpressions=true|false (BETA - default=true) + DisableCloudProviders=true|false (ALPHA - default=false) + DisableKubeletCloudCredentialProviders=true|false (ALPHA - default=false) + DownwardAPIHugePages=true|false (BETA - default=true) + DynamicResourceAllocation=true|false (ALPHA - default=false) + EventedPLEG=true|false (ALPHA - default=false) + ExpandedDNSConfig=true|false (BETA - default=true) + ExperimentalHostUserNamespaceDefaulting=true|false (BETA - default=false) + GRPCContainerProbe=true|false (BETA - default=true) + GracefulNodeShutdown=true|false (BETA - default=true) + GracefulNodeShutdownBasedOnPodPriority=true|false (BETA - default=true) + HPAContainerMetrics=true|false (ALPHA - default=false) + HPAScaleToZero=true|false (ALPHA - default=false) + HonorPVReclaimPolicy=true|false (ALPHA - default=false) + IPTablesOwnershipCleanup=true|false (ALPHA - default=false) + InTreePluginAWSUnregister=true|false (ALPHA - default=false) + InTreePluginAzureDiskUnregister=true|false (ALPHA - default=false) + InTreePluginAzureFileUnregister=true|false (ALPHA - default=false) + InTreePluginGCEUnregister=true|false (ALPHA - default=false) + InTreePluginOpenStackUnregister=true|false (ALPHA - default=false) + InTreePluginPortworxUnregister=true|false (ALPHA - default=false) + InTreePluginRBDUnregister=true|false (ALPHA - default=false) + InTreePluginvSphereUnregister=true|false (ALPHA - default=false) + JobMutableNodeSchedulingDirectives=true|false (BETA - default=true) + JobPodFailurePolicy=true|false (BETA - default=true) + JobReadyPods=true|false (BETA - default=true) + KMSv2=true|false (ALPHA - default=false) + KubeletInUserNamespace=true|false (ALPHA - default=false) + KubeletPodResources=true|false (BETA - default=true) + KubeletPodResourcesGetAllocatable=true|false (BETA - default=true) + KubeletTracing=true|false (ALPHA - default=false) + LegacyServiceAccountTokenTracking=true|false (ALPHA - default=false) + LocalStorageCapacityIsolationFSQuotaMonitoring=true|false (ALPHA - default=false) + LogarithmicScaleDown=true|false (BETA - default=true) + MatchLabelKeysInPodTopologySpread=true|false (ALPHA - default=false) + MaxUnavailableStatefulSet=true|false (ALPHA - default=false) + MemoryManager=true|false (BETA - default=true) + MemoryQoS=true|false (ALPHA - default=false) + MinDomainsInPodTopologySpread=true|false (BETA - default=false) + MinimizeIPTablesRestore=true|false (ALPHA - default=false) + MultiCIDRRangeAllocator=true|false (ALPHA - default=false) + NetworkPolicyStatus=true|false (ALPHA - default=false) + NodeInclusionPolicyInPodTopologySpread=true|false (BETA - default=true) + NodeOutOfServiceVolumeDetach=true|false (BETA - default=true) + NodeSwap=true|false (ALPHA - default=false) + OpenAPIEnums=true|false (BETA - default=true) + OpenAPIV3=true|false (BETA - default=true) + PDBUnhealthyPodEvictionPolicy=true|false (ALPHA - default=false) + PodAndContainerStatsFromCRI=true|false (ALPHA - default=false) + PodDeletionCost=true|false (BETA - default=true) + PodDisruptionConditions=true|false (BETA - default=true) + PodHasNetworkCondition=true|false (ALPHA - default=false) + PodSchedulingReadiness=true|false (ALPHA - default=false) + ProbeTerminationGracePeriod=true|false (BETA - default=true) + ProcMountType=true|false (ALPHA - default=false) + ProxyTerminatingEndpoints=true|false (BETA - default=true) + QOSReserved=true|false (ALPHA - default=false) + ReadWriteOncePod=true|false (ALPHA - default=false) + RecoverVolumeExpansionFailure=true|false (ALPHA - default=false) + RemainingItemCount=true|false (BETA - default=true) + RetroactiveDefaultStorageClass=true|false (BETA - default=true) + RotateKubeletServerCertificate=true|false (BETA - default=true) + SELinuxMountReadWriteOncePod=true|false (ALPHA - default=false) + SeccompDefault=true|false (BETA - default=true) + ServerSideFieldValidation=true|false (BETA - default=true) + SizeMemoryBackedVolumes=true|false (BETA - default=true) + StatefulSetAutoDeletePVC=true|false (ALPHA - default=false) + StatefulSetStartOrdinal=true|false (ALPHA - default=false) + StorageVersionAPI=true|false (ALPHA - default=false) + StorageVersionHash=true|false (BETA - default=true) + TopologyAwareHints=true|false (BETA - default=true) + TopologyManager=true|false (BETA - default=true) + TopologyManagerPolicyAlphaOptions=true|false (ALPHA - default=false) + TopologyManagerPolicyBetaOptions=true|false (BETA - default=false) + TopologyManagerPolicyOptions=true|false (ALPHA - default=false) + UserNamespacesStatelessPodsSupport=true|false (ALPHA - default=false) + ValidatingAdmissionPolicy=true|false (ALPHA - default=false) + VolumeCapacityPriority=true|false (ALPHA - default=false) + WinDSR=true|false (ALPHA - default=false) + WinOverlay=true|false (BETA - default=true) + WindowsHostNetwork=true|false (ALPHA - default=true) (default APIPriorityAndFairness=true) + --goaway-chance float To prevent HTTP/2 clients from getting stuck on a single apiserver, randomly close a connection (GOAWAY). The client's other in-flight requests won't be affected, and the client will reconnect, likely landing on a different apiserver after going through the load balancer again. This argument sets the fraction of requests that will be sent a GOAWAY. Clusters with single apiservers, or which don't use a load balancer, should NOT enable this. Min is 0 (off), Max is .02 (1/50 requests); .001 (1/1000) is a recommended starting point. + -h, --help help for karpor + --http2-max-streams-per-connection int The limit that the server gives to clients for the maximum number of streams in an HTTP/2 connection. Zero means to use golang's default. (default 1000) + --lease-reuse-duration-seconds int The time in seconds that each lease is reused. A lower value could avoid large number of objects reusing the same lease. Notice that a too small value may cause performance problems at storage layer. (default 60) + --livez-grace-period duration This option represents the maximum amount of time it should take for apiserver to complete its startup sequence and become live. From apiserver's start time to when this amount of time has elapsed, /livez will assume that unfinished post-start hooks will complete successfully and therefore return true. + --max-mutating-requests-inflight int This and --max-requests-inflight are summed to determine the server's total concurrency limit (which must be positive) if --enable-priority-and-fairness is true. Otherwise, this flag limits the maximum number of mutating requests in flight, or a zero value disables the limit completely. (default 200) + --max-requests-inflight int This and --max-mutating-requests-inflight are summed to determine the server's total concurrency limit (which must be positive) if --enable-priority-and-fairness is true. Otherwise, this flag limits the maximum number of non-mutating requests in flight, or a zero value disables the limit completely. (default 400) + --min-request-timeout int An optional field indicating the minimum number of seconds a handler must keep a request open before timing it out. Currently only honored by the watch request handler, which picks a randomized value above this number as the connection timeout, to spread out load. (default 1800) + --permit-address-sharing If true, SO_REUSEADDR will be used when binding the port. This allows binding to wildcard IPs like 0.0.0.0 and specific IPs in parallel, and it avoids waiting for the kernel to release sockets in TIME_WAIT state. [default=false] + --permit-port-sharing If true, SO_REUSEPORT will be used when binding the port, which allows more than one instance to bind on the same address and port. [default=false] + --profiling Enable profiling via web interface host:port/debug/pprof/ (default true) + --read-only-mode turn on the read only mode + --request-timeout duration An optional field indicating the duration a handler must keep a request open before timing it out. This is the default request timeout for requests but may be overridden by flags such as --min-request-timeout for specific types of requests. (default 1m0s) + --requestheader-allowed-names strings List of client certificate common names to allow to provide usernames in headers specified by --requestheader-username-headers. If empty, any client certificate validated by the authorities in --requestheader-client-ca-file is allowed. + --requestheader-client-ca-file string Root certificate bundle to use to verify client certificates on incoming requests before trusting usernames in headers specified by --requestheader-username-headers. WARNING: generally do not depend on authorization being already done for incoming requests. + --requestheader-extra-headers-prefix strings List of request header prefixes to inspect. X-Remote-Extra- is suggested. + --requestheader-group-headers strings List of request headers to inspect for groups. X-Remote-Group is suggested. + --requestheader-username-headers strings List of request headers to inspect for usernames. X-Remote-User is common. + --search-storage-type string The search storage type + --secure-port int The port on which to serve HTTPS with authentication and authorization. If 0, don't serve HTTPS at all. (default 443) + --shutdown-delay-duration duration Time to delay the termination. During that time the server keeps serving requests normally. The endpoints /healthz and /livez will return success, but /readyz immediately returns failure. Graceful termination starts after this delay has elapsed. This can be used to allow load balancer to stop sending traffic to this server. + --shutdown-send-retry-after If true the HTTP Server will continue listening until all non long running request(s) in flight have been drained, during this window all incoming requests will be rejected with a status code 429 and a 'Retry-After' response header, in addition 'Connection: close' response header is set in order to tear down the TCP connection when idle. + --storage-backend string The storage backend for persistence. Options: 'etcd3' (default). + --storage-media-type string The media type to use to store objects in storage. Some resources or storage backends may only support a specific media type and will ignore this setting. Supported media types: [application/json, application/yaml, application/vnd.kubernetes.protobuf] (default "application/json") + --strict-transport-security-directives strings List of directives for HSTS, comma separated. If this list is empty, then HSTS directives will not be added. Example: 'max-age=31536000,includeSubDomains,preload' + --tls-cert-file string File containing the default x509 Certificate for HTTPS. (CA cert, if any, concatenated after server cert). If HTTPS serving is enabled, and --tls-cert-file and --tls-private-key-file are not provided, a self-signed certificate and key are generated for the public address and saved to the directory specified by --cert-dir. (default "apiserver.local.config/certificates/apiserver.crt") + --tls-cipher-suites strings Comma-separated list of cipher suites for the server. If omitted, the default Go cipher suites will be used. + Preferred values: TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_256_CBC_SHA, TLS_RSA_WITH_AES_256_GCM_SHA384. + Insecure values: TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_RSA_WITH_RC4_128_SHA, TLS_RSA_WITH_3DES_EDE_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA256, TLS_RSA_WITH_RC4_128_SHA. + --tls-min-version string Minimum TLS version supported. Possible values: VersionTLS10, VersionTLS11, VersionTLS12, VersionTLS13 + --tls-private-key-file string File containing the default x509 private key matching --tls-cert-file. (default "apiserver.local.config/certificates/apiserver.key") + --tls-sni-cert-key namedCertKey A pair of x509 certificate and private key file paths, optionally suffixed with a list of domain patterns which are fully qualified domain names, possibly with prefixed wildcard segments. The domain patterns also allow IP addresses, but IPs should only be used if the apiserver has visibility to the IP address requested by a client. If no domain patterns are provided, the names of the certificate are extracted. Non-wildcard matches trump over wildcard matches, explicit domain patterns trump over extracted names. For multiple key/certificate pairs, use the --tls-sni-cert-key multiple times. Examples: "example.crt,example.key" or "foo.crt,foo.key:*.foo.com,foo.com". (default []) + --tracing-config-file string File with apiserver tracing configuration. + --watch-cache Enable watch caching in the apiserver (default true) + --watch-cache-sizes strings Watch cache size settings for some resources (pods, nodes, etc.), comma separated. The individual setting format: resource[.group]#size, where resource is lowercase plural (no version), group is omitted for resources of apiVersion v1 (the legacy core API) and included for others, and size is a number. This option is only meaningful for resources built into the apiserver, not ones defined by CRDs or aggregated from external servers, and is only consulted if the watch-cache is enabled. The only meaningful size setting to supply here is zero, which means to disable watch caching for the associated resource; all non-zero values are equivalent and mean to not disable watch caching for that resource +``` + +### SEE ALSO + +* [karpor syncer](2-karpor-syncer.md) - start a resource syncer to sync resource from clusters + +###### Auto generated by spf13/cobra on 7-May-2024 diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/current/5-references/1-cli-commands/2-karpor-syncer.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/5-references/1-cli-commands/2-karpor-syncer.md new file mode 100644 index 00000000..d25245ae --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/5-references/1-cli-commands/2-karpor-syncer.md @@ -0,0 +1,25 @@ +--- +title: karpor syncer +--- +## karpor syncer + +start a resource syncer to sync resource from clusters + +``` +karpor syncer [flags] +``` + +### Options + +``` + --elastic-search-addresses strings The elastic search address. + --health-probe-bind-address string The address the probe endpoint binds to. (default ":8081") + -h, --help help for syncer + --metrics-bind-address string The address the metric endpoint binds to. (default ":8080") +``` + +### SEE ALSO + +* [karpor](1-karpor.md) - Launch an API server + +###### Auto generated by spf13/cobra on 7-May-2024 diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/current/5-references/1-cli-commands/_category_.json b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/5-references/1-cli-commands/_category_.json new file mode 100644 index 00000000..41757f5f --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/5-references/1-cli-commands/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "CLI Commands" +} diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/current/5-references/2-openapi.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/5-references/2-openapi.md new file mode 100644 index 00000000..81c0321d --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/5-references/2-openapi.md @@ -0,0 +1,1862 @@ +--- +title: OpenAPI +--- +## Informations + +### Version + +1.0 + +### Contact + +## Content negotiation + +### URI Schemes + +* http + +### Consumes + +* application/json +* multipart/form-data +* text/plain + +### Produces + +* application/json +* text/plain + +## All endpoints + +### cluster + +| Method | URI | Name | Summary | +| ------ | ------------------------------------ | ------------------------------------------------------------------------------------- | -------------------------------------------- | +| DELETE | /rest-api/v1/cluster/{clusterName} | [delete rest API v1 cluster cluster name](#delete-rest-api-v1-cluster-cluster-name) | Delete removes a cluster resource by name. | +| GET | /rest-api/v1/cluster/{clusterName} | [get rest API v1 cluster cluster name](#get-rest-api-v1-cluster-cluster-name) | Get returns a cluster resource by name. | +| GET | /rest-api/v1/clusters | [get rest API v1 clusters](#get-rest-api-v1-clusters) | List lists all cluster resources. | +| POST | /rest-api/v1/cluster/{clusterName} | [post rest API v1 cluster cluster name](#post-rest-api-v1-cluster-cluster-name) | Create creates a cluster resource. | +| POST | /rest-api/v1/cluster/config/file | [post rest API v1 cluster config file](#post-rest-api-v1-cluster-config-file) | Upload kubeConfig file for cluster | +| POST | /rest-api/v1/cluster/config/validate | [post rest API v1 cluster config validate](#post-rest-api-v1-cluster-config-validate) | Validate KubeConfig | +| PUT | /rest-api/v1/cluster/{clusterName} | [put rest API v1 cluster cluster name](#put-rest-api-v1-cluster-cluster-name) | Update updates the cluster metadata by name. | + +### debug + +| Method | URI | Name | Summary | +| ------ | ---------- | ------------------------------- | ---------------------------- | +| GET | /endpoints | [get endpoints](#get-endpoints) | List all available endpoints | + +### insight + +| Method | URI | Name | Summary | +| ------ | ----------------------------- | --------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------- | +| GET | /rest-api/v1/insight/audit | [get rest API v1 insight audit](#get-rest-api-v1-insight-audit) | Audit based on resource group. | +| GET | /rest-api/v1/insight/detail | [get rest API v1 insight detail](#get-rest-api-v1-insight-detail) | GetDetail returns a Kubernetes resource by name, namespace, cluster, apiVersion and kind. | +| GET | /rest-api/v1/insight/events | [get rest API v1 insight events](#get-rest-api-v1-insight-events) | GetEvents returns events for a Kubernetes resource by name, namespace, cluster, apiVersion and kind. | +| GET | /rest-api/v1/insight/score | [get rest API v1 insight score](#get-rest-api-v1-insight-score) | ScoreHandler calculates a score for the audited manifest. | +| GET | /rest-api/v1/insight/stats | [get rest API v1 insight stats](#get-rest-api-v1-insight-stats) | Get returns a global statistics info. | +| GET | /rest-api/v1/insight/summary | [get rest API v1 insight summary](#get-rest-api-v1-insight-summary) | Get returns a Kubernetes resource summary by name, namespace, cluster, apiVersion and kind. | +| GET | /rest-api/v1/insight/topology | [get rest API v1 insight topology](#get-rest-api-v1-insight-topology) | GetTopology returns a topology map for a Kubernetes resource by name, namespace, cluster, apiVersion and kind. | + +### resourcegroup + +| Method | URI | Name | Summary | +| ------ | ---------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | ------------------------------------------- | +| GET | /rest-api/v1/resource-groups/{resourceGroupRuleName} | [get rest API v1 resource groups resource group rule name](#get-rest-api-v1-resource-groups-resource-group-rule-name) | List lists all ResourceGroups by rule name. | + +### resourcegrouprule + +| Method | URI | Name | Summary | +| ------ | -------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------ | +| DELETE | /rest-api/v1/resource-group-rule/{resourceGroupRuleName} | [delete rest API v1 resource group rule resource group rule name](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name) | Delete removes a ResourceGroupRule by name. | +| GET | /rest-api/v1/resource-group-rule/{resourceGroupRuleName} | [get rest API v1 resource group rule resource group rule name](#get-rest-api-v1-resource-group-rule-resource-group-rule-name) | Get returns a ResourceGroupRule by name. | +| GET | /rest-api/v1/resource-group-rules | [get rest API v1 resource group rules](#get-rest-api-v1-resource-group-rules) | List lists all ResourceGroupRules. | +| POST | /rest-api/v1/resource-group-rule | [post rest API v1 resource group rule](#post-rest-api-v1-resource-group-rule) | Create creates a ResourceGroupRule. | +| PUT | /rest-api/v1/resource-group-rule | [put rest API v1 resource group rule](#put-rest-api-v1-resource-group-rule) | Update updates the ResourceGroupRule metadata by name. | + +### search + +| Method | URI | Name | Summary | +| ------ | ------------------- | ------------------------------------------------- | ----------------------------------------------------------------------------------------------------- | +| GET | /rest-api/v1/search | [get rest API v1 search](#get-rest-api-v1-search) | SearchForResource returns an array of Kubernetes runtime Object matched using the query from context. | + +## Paths + +### Delete removes a cluster resource by name. (*DeleteRestAPIV1ClusterClusterName*) + +``` +DELETE /rest-api/v1/cluster/{clusterName} +``` + +This endpoint deletes the cluster resource by name. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ----------- | ------ | ------ | -------- | --------- | :------: | ------- | ----------------------- | +| clusterName | `path` | string | `string` | | ✓ | | The name of the cluster | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| --------------------------------------------------- | --------------------- | --------------------- | :---------: | ------------------------------------------------------------- | +| [200](#delete-rest-api-v1-cluster-cluster-name-200) | OK | Operation status | | [schema](#delete-rest-api-v1-cluster-cluster-name-200-schema) | +| [400](#delete-rest-api-v1-cluster-cluster-name-400) | Bad Request | Bad Request | | [schema](#delete-rest-api-v1-cluster-cluster-name-400-schema) | +| [401](#delete-rest-api-v1-cluster-cluster-name-401) | Unauthorized | Unauthorized | | [schema](#delete-rest-api-v1-cluster-cluster-name-401-schema) | +| [404](#delete-rest-api-v1-cluster-cluster-name-404) | Not Found | Not Found | | [schema](#delete-rest-api-v1-cluster-cluster-name-404-schema) | +| [405](#delete-rest-api-v1-cluster-cluster-name-405) | Method Not Allowed | Method Not Allowed | | [schema](#delete-rest-api-v1-cluster-cluster-name-405-schema) | +| [429](#delete-rest-api-v1-cluster-cluster-name-429) | Too Many Requests | Too Many Requests | | [schema](#delete-rest-api-v1-cluster-cluster-name-429-schema) | +| [500](#delete-rest-api-v1-cluster-cluster-name-500) | Internal Server Error | Internal Server Error | | [schema](#delete-rest-api-v1-cluster-cluster-name-500-schema) | + +#### Responses + +##### 200 - Operation status + +Status: OK + +###### Schema + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### Delete removes a ResourceGroupRule by name. (*DeleteRestAPIV1ResourceGroupRuleResourceGroupRuleName*) + +``` +DELETE /rest-api/v1/resource-group-rule/{resourceGroupRuleName} +``` + +This endpoint deletes the ResourceGroupRule by name. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| --------------------- | ------ | ------ | -------- | --------- | :------: | ------- | ----------------------------------- | +| resourceGroupRuleName | `path` | string | `string` | | ✓ | | The name of the resource group rule | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| --------------------------------------------------------------------------- | --------------------- | --------------------- | :---------: | ------------------------------------------------------------------------------------- | +| [200](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-200) | OK | Operation status | | [schema](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-200-schema) | +| [400](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-400) | Bad Request | Bad Request | | [schema](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-400-schema) | +| [401](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-401) | Unauthorized | Unauthorized | | [schema](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-401-schema) | +| [404](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-404) | Not Found | Not Found | | [schema](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-404-schema) | +| [405](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-405) | Method Not Allowed | Method Not Allowed | | [schema](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-405-schema) | +| [429](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-429) | Too Many Requests | Too Many Requests | | [schema](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-429-schema) | +| [500](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-500) | Internal Server Error | Internal Server Error | | [schema](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-500-schema) | + +#### Responses + +##### 200 - Operation status + +Status: OK + +###### Schema + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### List all available endpoints (*GetEndpoints*) + +``` +GET /endpoints +``` + +List all registered endpoints in the router + +#### Consumes + +* text/plain + +#### Produces + +* text/plain + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ------------------------- | ------ | ----------------------------- | :---------: | ----------------------------------- | +| [200](#get-endpoints-200) | OK | Endpoints listed successfully | | [schema](#get-endpoints-200-schema) | + +#### Responses + +##### 200 - Endpoints listed successfully + +Status: OK + +###### Schema + +### Get returns a cluster resource by name. (*GetRestAPIV1ClusterClusterName*) + +``` +GET /rest-api/v1/cluster/{clusterName} +``` + +This endpoint returns a cluster resource by name. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ----------- | ------- | ------ | -------- | --------- | :------: | ------- | -------------------------------------------------- | +| clusterName | `path` | string | `string` | | ✓ | | The name of the cluster | +| format | `query` | string | `string` | | | | The format of the response. Either in json or yaml | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ------------------------------------------------ | --------------------- | --------------------- | :---------: | ---------------------------------------------------------- | +| [200](#get-rest-api-v1-cluster-cluster-name-200) | OK | Unstructured object | | [schema](#get-rest-api-v1-cluster-cluster-name-200-schema) | +| [400](#get-rest-api-v1-cluster-cluster-name-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-cluster-cluster-name-400-schema) | +| [401](#get-rest-api-v1-cluster-cluster-name-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-cluster-cluster-name-401-schema) | +| [404](#get-rest-api-v1-cluster-cluster-name-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-cluster-cluster-name-404-schema) | +| [405](#get-rest-api-v1-cluster-cluster-name-405) | Method Not Allowed | Method Not Allowed | | [schema](#get-rest-api-v1-cluster-cluster-name-405-schema) | +| [429](#get-rest-api-v1-cluster-cluster-name-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-cluster-cluster-name-429-schema) | +| [500](#get-rest-api-v1-cluster-cluster-name-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-cluster-cluster-name-500-schema) | + +#### Responses + +##### 200 - Unstructured object + +Status: OK + +###### Schema + +[UnstructuredUnstructured](#unstructured-unstructured) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### List lists all cluster resources. (*GetRestAPIV1Clusters*) + +``` +GET /rest-api/v1/clusters +``` + +This endpoint lists all cluster resources. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ---------- | ------- | ------- | -------- | --------- | :------: | ------- | -------------------------------------------------------------- | +| descending | `query` | boolean | `bool` | | | | Whether to sort the list in descending order. Default to false | +| orderBy | `query` | string | `string` | | | | The order to list the cluster. Default to order by name | +| summary | `query` | boolean | `bool` | | | | Whether to display summary or not. Default to false | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ------------------------------------ | --------------------- | ----------------------- | :---------: | ---------------------------------------------- | +| [200](#get-rest-api-v1-clusters-200) | OK | List of cluster objects | | [schema](#get-rest-api-v1-clusters-200-schema) | +| [400](#get-rest-api-v1-clusters-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-clusters-400-schema) | +| [401](#get-rest-api-v1-clusters-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-clusters-401-schema) | +| [404](#get-rest-api-v1-clusters-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-clusters-404-schema) | +| [405](#get-rest-api-v1-clusters-405) | Method Not Allowed | Method Not Allowed | | [schema](#get-rest-api-v1-clusters-405-schema) | +| [429](#get-rest-api-v1-clusters-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-clusters-429-schema) | +| [500](#get-rest-api-v1-clusters-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-clusters-500-schema) | + +#### Responses + +##### 200 - List of cluster objects + +Status: OK + +###### Schema + +[][UnstructuredUnstructured](#unstructured-unstructured) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### Audit based on resource group. (*GetRestAPIV1InsightAudit*) + +``` +GET /rest-api/v1/insight/audit +``` + +This endpoint audits based on the specified resource group. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ---------- | ------- | ------- | -------- | --------- | :------: | ------- | ----------------------------------------------------- | +| apiVersion | `query` | string | `string` | | | | The specified apiVersion, such as 'apps/v1' | +| cluster | `query` | string | `string` | | | | The specified cluster name, such as 'example-cluster' | +| forceNew | `query` | boolean | `bool` | | | | Switch for forced scanning, default is 'false' | +| kind | `query` | string | `string` | | | | The specified kind, such as 'Deployment' | +| name | `query` | string | `string` | | | | The specified resource name, such as 'foo' | +| namespace | `query` | string | `string` | | | | The specified namespace, such as 'default' | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ----------------------------------------- | --------------------- | --------------------- | :---------: | --------------------------------------------------- | +| [200](#get-rest-api-v1-insight-audit-200) | OK | Audit results | | [schema](#get-rest-api-v1-insight-audit-200-schema) | +| [400](#get-rest-api-v1-insight-audit-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-insight-audit-400-schema) | +| [401](#get-rest-api-v1-insight-audit-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-insight-audit-401-schema) | +| [404](#get-rest-api-v1-insight-audit-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-insight-audit-404-schema) | +| [429](#get-rest-api-v1-insight-audit-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-insight-audit-429-schema) | +| [500](#get-rest-api-v1-insight-audit-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-insight-audit-500-schema) | + +#### Responses + +##### 200 - Audit results + +Status: OK + +###### Schema + +[ScannerAuditData](#scanner-audit-data) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### GetDetail returns a Kubernetes resource by name, namespace, cluster, apiVersion and kind. (*GetRestAPIV1InsightDetail*) + +``` +GET /rest-api/v1/insight/detail +``` + +This endpoint returns a Kubernetes resource by name, namespace, cluster, apiVersion and kind. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ---------- | ------- | ------ | -------- | --------- | :------: | ------- | ---------------------------------------------------------------------- | +| apiVersion | `query` | string | `string` | | | | The specified apiVersion, such as 'apps/v1'. Should be percent-encoded | +| cluster | `query` | string | `string` | | | | The specified cluster name, such as 'example-cluster' | +| format | `query` | string | `string` | | | | The format of the response. Either in json or yaml. Default to json | +| kind | `query` | string | `string` | | | | The specified kind, such as 'Deployment' | +| name | `query` | string | `string` | | | | The specified resource name, such as 'foo' | +| namespace | `query` | string | `string` | | | | The specified namespace, such as 'default' | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ------------------------------------------ | --------------------- | --------------------- | :---------: | ---------------------------------------------------- | +| [200](#get-rest-api-v1-insight-detail-200) | OK | Unstructured object | | [schema](#get-rest-api-v1-insight-detail-200-schema) | +| [400](#get-rest-api-v1-insight-detail-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-insight-detail-400-schema) | +| [401](#get-rest-api-v1-insight-detail-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-insight-detail-401-schema) | +| [404](#get-rest-api-v1-insight-detail-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-insight-detail-404-schema) | +| [405](#get-rest-api-v1-insight-detail-405) | Method Not Allowed | Method Not Allowed | | [schema](#get-rest-api-v1-insight-detail-405-schema) | +| [429](#get-rest-api-v1-insight-detail-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-insight-detail-429-schema) | +| [500](#get-rest-api-v1-insight-detail-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-insight-detail-500-schema) | + +#### Responses + +##### 200 - Unstructured object + +Status: OK + +###### Schema + +[UnstructuredUnstructured](#unstructured-unstructured) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### GetEvents returns events for a Kubernetes resource by name, namespace, cluster, apiVersion and kind. (*GetRestAPIV1InsightEvents*) + +``` +GET /rest-api/v1/insight/events +``` + +This endpoint returns events for a Kubernetes resource YAML by name, namespace, cluster, apiVersion and kind. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ---------- | ------- | ------ | -------- | --------- | :------: | ------- | ---------------------------------------------------------------------- | +| apiVersion | `query` | string | `string` | | | | The specified apiVersion, such as 'apps/v1'. Should be percent-encoded | +| cluster | `query` | string | `string` | | | | The specified cluster name, such as 'example-cluster' | +| kind | `query` | string | `string` | | | | The specified kind, such as 'Deployment' | +| name | `query` | string | `string` | | | | The specified resource name, such as 'foo' | +| namespace | `query` | string | `string` | | | | The specified namespace, such as 'default' | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ------------------------------------------ | --------------------- | --------------------- | :---------: | ---------------------------------------------------- | +| [200](#get-rest-api-v1-insight-events-200) | OK | List of events | | [schema](#get-rest-api-v1-insight-events-200-schema) | +| [400](#get-rest-api-v1-insight-events-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-insight-events-400-schema) | +| [401](#get-rest-api-v1-insight-events-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-insight-events-401-schema) | +| [404](#get-rest-api-v1-insight-events-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-insight-events-404-schema) | +| [405](#get-rest-api-v1-insight-events-405) | Method Not Allowed | Method Not Allowed | | [schema](#get-rest-api-v1-insight-events-405-schema) | +| [429](#get-rest-api-v1-insight-events-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-insight-events-429-schema) | +| [500](#get-rest-api-v1-insight-events-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-insight-events-500-schema) | + +#### Responses + +##### 200 - List of events + +Status: OK + +###### Schema + +[][UnstructuredUnstructured](#unstructured-unstructured) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### ScoreHandler calculates a score for the audited manifest. (*GetRestAPIV1InsightScore*) + +``` +GET /rest-api/v1/insight/score +``` + +This endpoint calculates a score for the provided manifest based on the number and severity of issues detected during the audit. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ---------- | ------- | ------- | -------- | --------- | :------: | ------- | ----------------------------------------------------- | +| apiVersion | `query` | string | `string` | | | | The specified apiVersion, such as 'apps/v1' | +| cluster | `query` | string | `string` | | | | The specified cluster name, such as 'example-cluster' | +| forceNew | `query` | boolean | `bool` | | | | Switch for forced compute score, default is 'false' | +| kind | `query` | string | `string` | | | | The specified kind, such as 'Deployment' | +| name | `query` | string | `string` | | | | The specified resource name, such as 'foo' | +| namespace | `query` | string | `string` | | | | The specified namespace, such as 'default' | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ----------------------------------------- | --------------------- | ------------------------ | :---------: | --------------------------------------------------- | +| [200](#get-rest-api-v1-insight-score-200) | OK | Score calculation result | | [schema](#get-rest-api-v1-insight-score-200-schema) | +| [400](#get-rest-api-v1-insight-score-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-insight-score-400-schema) | +| [401](#get-rest-api-v1-insight-score-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-insight-score-401-schema) | +| [404](#get-rest-api-v1-insight-score-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-insight-score-404-schema) | +| [429](#get-rest-api-v1-insight-score-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-insight-score-429-schema) | +| [500](#get-rest-api-v1-insight-score-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-insight-score-500-schema) | + +#### Responses + +##### 200 - Score calculation result + +Status: OK + +###### Schema + +[InsightScoreData](#insight-score-data) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### Get returns a global statistics info. (*GetRestAPIV1InsightStats*) + +``` +GET /rest-api/v1/insight/stats +``` + +This endpoint returns a global statistics info. + +#### Produces + +* application/json + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ----------------------------------------- | --------------------- | ---------------------- | :---------: | --------------------------------------------------- | +| [200](#get-rest-api-v1-insight-stats-200) | OK | Global statistics info | | [schema](#get-rest-api-v1-insight-stats-200-schema) | +| [400](#get-rest-api-v1-insight-stats-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-insight-stats-400-schema) | +| [401](#get-rest-api-v1-insight-stats-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-insight-stats-401-schema) | +| [404](#get-rest-api-v1-insight-stats-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-insight-stats-404-schema) | +| [405](#get-rest-api-v1-insight-stats-405) | Method Not Allowed | Method Not Allowed | | [schema](#get-rest-api-v1-insight-stats-405-schema) | +| [429](#get-rest-api-v1-insight-stats-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-insight-stats-429-schema) | +| [500](#get-rest-api-v1-insight-stats-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-insight-stats-500-schema) | + +#### Responses + +##### 200 - Global statistics info + +Status: OK + +###### Schema + +[InsightStatistics](#insight-statistics) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### Get returns a Kubernetes resource summary by name, namespace, cluster, apiVersion and kind. (*GetRestAPIV1InsightSummary*) + +``` +GET /rest-api/v1/insight/summary +``` + +This endpoint returns a Kubernetes resource summary by name, namespace, cluster, apiVersion and kind. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ---------- | ------- | ------ | -------- | --------- | :------: | ------- | ---------------------------------------------------------------------- | +| apiVersion | `query` | string | `string` | | | | The specified apiVersion, such as 'apps/v1'. Should be percent-encoded | +| cluster | `query` | string | `string` | | | | The specified cluster name, such as 'example-cluster' | +| kind | `query` | string | `string` | | | | The specified kind, such as 'Deployment' | +| name | `query` | string | `string` | | | | The specified resource name, such as 'foo' | +| namespace | `query` | string | `string` | | | | The specified namespace, such as 'default' | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ------------------------------------------- | --------------------- | --------------------- | :---------: | ----------------------------------------------------- | +| [200](#get-rest-api-v1-insight-summary-200) | OK | Resource Summary | | [schema](#get-rest-api-v1-insight-summary-200-schema) | +| [400](#get-rest-api-v1-insight-summary-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-insight-summary-400-schema) | +| [401](#get-rest-api-v1-insight-summary-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-insight-summary-401-schema) | +| [404](#get-rest-api-v1-insight-summary-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-insight-summary-404-schema) | +| [405](#get-rest-api-v1-insight-summary-405) | Method Not Allowed | Method Not Allowed | | [schema](#get-rest-api-v1-insight-summary-405-schema) | +| [429](#get-rest-api-v1-insight-summary-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-insight-summary-429-schema) | +| [500](#get-rest-api-v1-insight-summary-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-insight-summary-500-schema) | + +#### Responses + +##### 200 - Resource Summary + +Status: OK + +###### Schema + +[InsightResourceSummary](#insight-resource-summary) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### GetTopology returns a topology map for a Kubernetes resource by name, namespace, cluster, apiVersion and kind. (*GetRestAPIV1InsightTopology*) + +``` +GET /rest-api/v1/insight/topology +``` + +This endpoint returns a topology map for a Kubernetes resource by name, namespace, cluster, apiVersion and kind. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ---------- | ------- | ------- | -------- | --------- | :------: | ------- | ---------------------------------------------------------------------- | +| apiVersion | `query` | string | `string` | | | | The specified apiVersion, such as 'apps/v1'. Should be percent-encoded | +| cluster | `query` | string | `string` | | | | The specified cluster name, such as 'example-cluster' | +| forceNew | `query` | boolean | `bool` | | | | Force re-generating the topology, default is 'false' | +| kind | `query` | string | `string` | | | | The specified kind, such as 'Deployment' | +| name | `query` | string | `string` | | | | The specified resource name, such as 'foo' | +| namespace | `query` | string | `string` | | | | The specified namespace, such as 'default' | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| -------------------------------------------- | --------------------- | -------------------------------------------- | :---------: | ------------------------------------------------------ | +| [200](#get-rest-api-v1-insight-topology-200) | OK | map from string to resource.ResourceTopology | | [schema](#get-rest-api-v1-insight-topology-200-schema) | +| [400](#get-rest-api-v1-insight-topology-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-insight-topology-400-schema) | +| [401](#get-rest-api-v1-insight-topology-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-insight-topology-401-schema) | +| [404](#get-rest-api-v1-insight-topology-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-insight-topology-404-schema) | +| [405](#get-rest-api-v1-insight-topology-405) | Method Not Allowed | Method Not Allowed | | [schema](#get-rest-api-v1-insight-topology-405-schema) | +| [429](#get-rest-api-v1-insight-topology-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-insight-topology-429-schema) | +| [500](#get-rest-api-v1-insight-topology-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-insight-topology-500-schema) | + +#### Responses + +##### 200 - map from string to resource.ResourceTopology + +Status: OK + +###### Schema + +map of [InsightResourceTopology](#insight-resource-topology) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### Get returns a ResourceGroupRule by name. (*GetRestAPIV1ResourceGroupRuleResourceGroupRuleName*) + +``` +GET /rest-api/v1/resource-group-rule/{resourceGroupRuleName} +``` + +This endpoint returns a ResourceGroupRule by name. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| --------------------- | ------ | ------ | -------- | --------- | :------: | ------- | ----------------------------------- | +| resourceGroupRuleName | `path` | string | `string` | | ✓ | | The name of the resource group rule | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ------------------------------------------------------------------------ | --------------------- | --------------------- | :---------: | ---------------------------------------------------------------------------------- | +| [200](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-200) | OK | Unstructured object | | [schema](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-200-schema) | +| [400](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-400-schema) | +| [401](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-401-schema) | +| [404](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-404-schema) | +| [405](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-405) | Method Not Allowed | Method Not Allowed | | [schema](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-405-schema) | +| [429](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-429-schema) | +| [500](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-500-schema) | + +#### Responses + +##### 200 - Unstructured object + +Status: OK + +###### Schema + +[UnstructuredUnstructured](#unstructured-unstructured) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### List lists all ResourceGroupRules. (*GetRestAPIV1ResourceGroupRules*) + +``` +GET /rest-api/v1/resource-group-rules +``` + +This endpoint lists all ResourceGroupRules. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ---------- | ------- | ------- | -------- | --------- | :------: | ------- | ----------------------------------------------------------------- | +| descending | `query` | boolean | `bool` | | | | Whether to sort the list in descending order. Default to false | +| orderBy | `query` | string | `string` | | | | The order to list the resourceGroupRule. Default to order by name | +| summary | `query` | boolean | `bool` | | | | Whether to display summary or not. Default to false | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ------------------------------------------------ | --------------------- | --------------------------------- | :---------: | ---------------------------------------------------------- | +| [200](#get-rest-api-v1-resource-group-rules-200) | OK | List of resourceGroupRule objects | | [schema](#get-rest-api-v1-resource-group-rules-200-schema) | +| [400](#get-rest-api-v1-resource-group-rules-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-resource-group-rules-400-schema) | +| [401](#get-rest-api-v1-resource-group-rules-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-resource-group-rules-401-schema) | +| [404](#get-rest-api-v1-resource-group-rules-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-resource-group-rules-404-schema) | +| [405](#get-rest-api-v1-resource-group-rules-405) | Method Not Allowed | Method Not Allowed | | [schema](#get-rest-api-v1-resource-group-rules-405-schema) | +| [429](#get-rest-api-v1-resource-group-rules-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-resource-group-rules-429-schema) | +| [500](#get-rest-api-v1-resource-group-rules-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-resource-group-rules-500-schema) | + +#### Responses + +##### 200 - List of resourceGroupRule objects + +Status: OK + +###### Schema + +[][UnstructuredUnstructured](#unstructured-unstructured) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### List lists all ResourceGroups by rule name. (*GetRestAPIV1ResourceGroupsResourceGroupRuleName*) + +``` +GET /rest-api/v1/resource-groups/{resourceGroupRuleName} +``` + +This endpoint lists all ResourceGroups. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| --------------------- | ------ | ------ | -------- | --------- | :------: | ------- | ----------------------------------- | +| resourceGroupRuleName | `path` | string | `string` | | ✓ | | The name of the resource group rule | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| -------------------------------------------------------------------- | --------------------- | ----------------------------- | :---------: | ------------------------------------------------------------------------------ | +| [200](#get-rest-api-v1-resource-groups-resource-group-rule-name-200) | OK | List of resourceGroup objects | | [schema](#get-rest-api-v1-resource-groups-resource-group-rule-name-200-schema) | +| [400](#get-rest-api-v1-resource-groups-resource-group-rule-name-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-resource-groups-resource-group-rule-name-400-schema) | +| [401](#get-rest-api-v1-resource-groups-resource-group-rule-name-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-resource-groups-resource-group-rule-name-401-schema) | +| [404](#get-rest-api-v1-resource-groups-resource-group-rule-name-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-resource-groups-resource-group-rule-name-404-schema) | +| [405](#get-rest-api-v1-resource-groups-resource-group-rule-name-405) | Method Not Allowed | Method Not Allowed | | [schema](#get-rest-api-v1-resource-groups-resource-group-rule-name-405-schema) | +| [429](#get-rest-api-v1-resource-groups-resource-group-rule-name-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-resource-groups-resource-group-rule-name-429-schema) | +| [500](#get-rest-api-v1-resource-groups-resource-group-rule-name-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-resource-groups-resource-group-rule-name-500-schema) | + +#### Responses + +##### 200 - List of resourceGroup objects + +Status: OK + +###### Schema + +[][UnstructuredUnstructured](#unstructured-unstructured) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### SearchForResource returns an array of Kubernetes runtime Object matched using the query from context. (*GetRestAPIV1Search*) + +``` +GET /rest-api/v1/search +``` + +This endpoint returns an array of Kubernetes runtime Object matched using the query from context. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| -------- | ------- | ------ | -------- | --------- | :------: | ------- | ------------------------------------------------------ | +| page | `query` | string | `string` | | | | The current page to fetch. Default to 1 | +| pageSize | `query` | string | `string` | | | | The size of the page. Default to 10 | +| pattern | `query` | string | `string` | | ✓ | | The search pattern. Can be either sql or dsl. Required | +| query | `query` | string | `string` | | ✓ | | The query to use for search. Required | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ---------------------------------- | --------------------- | ----------------------- | :---------: | -------------------------------------------- | +| [200](#get-rest-api-v1-search-200) | OK | Array of runtime.Object | | [schema](#get-rest-api-v1-search-200-schema) | +| [400](#get-rest-api-v1-search-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-search-400-schema) | +| [401](#get-rest-api-v1-search-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-search-401-schema) | +| [404](#get-rest-api-v1-search-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-search-404-schema) | +| [405](#get-rest-api-v1-search-405) | Method Not Allowed | Method Not Allowed | | [schema](#get-rest-api-v1-search-405-schema) | +| [429](#get-rest-api-v1-search-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-search-429-schema) | +| [500](#get-rest-api-v1-search-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-search-500-schema) | + +#### Responses + +##### 200 - Array of runtime.Object + +Status: OK + +###### Schema + +[][interface{}](#interface) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### Create creates a cluster resource. (*PostRestAPIV1ClusterClusterName*) + +``` +POST /rest-api/v1/cluster/{clusterName} +``` + +This endpoint creates a new cluster resource using the payload. + +#### Consumes + +* application/json +* text/plain + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ----------- | ------ | ------------------------------------------------- | ------------------------------ | --------- | :------: | ------- | ---------------------------------------------------- | +| clusterName | `path` | string | `string` | | ✓ | | The name of the cluster | +| request | `body` | [ClusterClusterPayload](#cluster-cluster-payload) | `models.ClusterClusterPayload` | | ✓ | | cluster to create (either plain text or JSON format) | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ------------------------------------------------- | --------------------- | --------------------- | :---------: | ----------------------------------------------------------- | +| [200](#post-rest-api-v1-cluster-cluster-name-200) | OK | Unstructured object | | [schema](#post-rest-api-v1-cluster-cluster-name-200-schema) | +| [400](#post-rest-api-v1-cluster-cluster-name-400) | Bad Request | Bad Request | | [schema](#post-rest-api-v1-cluster-cluster-name-400-schema) | +| [401](#post-rest-api-v1-cluster-cluster-name-401) | Unauthorized | Unauthorized | | [schema](#post-rest-api-v1-cluster-cluster-name-401-schema) | +| [404](#post-rest-api-v1-cluster-cluster-name-404) | Not Found | Not Found | | [schema](#post-rest-api-v1-cluster-cluster-name-404-schema) | +| [405](#post-rest-api-v1-cluster-cluster-name-405) | Method Not Allowed | Method Not Allowed | | [schema](#post-rest-api-v1-cluster-cluster-name-405-schema) | +| [429](#post-rest-api-v1-cluster-cluster-name-429) | Too Many Requests | Too Many Requests | | [schema](#post-rest-api-v1-cluster-cluster-name-429-schema) | +| [500](#post-rest-api-v1-cluster-cluster-name-500) | Internal Server Error | Internal Server Error | | [schema](#post-rest-api-v1-cluster-cluster-name-500-schema) | + +#### Responses + +##### 200 - Unstructured object + +Status: OK + +###### Schema + +[UnstructuredUnstructured](#unstructured-unstructured) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### Upload kubeConfig file for cluster (*PostRestAPIV1ClusterConfigFile*) + +``` +POST /rest-api/v1/cluster/config/file +``` + +Uploads a KubeConfig file for cluster, with a maximum size of 2MB. + +#### Consumes + +* multipart/form-data + +#### Produces + +* text/plain + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ----------- | ---------- | ------ | --------------- | --------- | :------: | ------- | ---------------------------------- | +| description | `formData` | string | `string` | | ✓ | | cluster description | +| displayName | `formData` | string | `string` | | ✓ | | cluster display name | +| file | `formData` | file | `io.ReadCloser` | | ✓ | | Upload file with field name 'file' | +| name | `formData` | string | `string` | | ✓ | | cluster name | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ------------------------------------------------ | --------------------- | --------------------------------------------------------- | :---------: | ---------------------------------------------------------- | +| [200](#post-rest-api-v1-cluster-config-file-200) | OK | Returns the content of the uploaded KubeConfig file. | | [schema](#post-rest-api-v1-cluster-config-file-200-schema) | +| [400](#post-rest-api-v1-cluster-config-file-400) | Bad Request | The uploaded file is too large or the request is invalid. | | [schema](#post-rest-api-v1-cluster-config-file-400-schema) | +| [500](#post-rest-api-v1-cluster-config-file-500) | Internal Server Error | Internal server error. | | [schema](#post-rest-api-v1-cluster-config-file-500-schema) | + +#### Responses + +##### 200 - Returns the content of the uploaded KubeConfig file. + +Status: OK + +###### Schema + +[ClusterUploadData](#cluster-upload-data) + +##### 400 - The uploaded file is too large or the request is invalid. + +Status: Bad Request + +###### Schema + +##### 500 - Internal server error. + +Status: Internal Server Error + +###### Schema + +### Validate KubeConfig (*PostRestAPIV1ClusterConfigValidate*) + +``` +POST /rest-api/v1/cluster/config/validate +``` + +Validates the provided KubeConfig using cluster manager methods. + +#### Consumes + +* application/json +* text/plain + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ------- | ------ | --------------------------------------------------- | ------------------------------- | --------- | :------: | ------- | ------------------------------ | +| request | `body` | [ClusterValidatePayload](#cluster-validate-payload) | `models.ClusterValidatePayload` | | ✓ | | KubeConfig payload to validate | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ---------------------------------------------------- | --------------------- | ---------------------------------- | :---------: | -------------------------------------------------------------- | +| [200](#post-rest-api-v1-cluster-config-validate-200) | OK | Verification passed server version | | [schema](#post-rest-api-v1-cluster-config-validate-200-schema) | +| [400](#post-rest-api-v1-cluster-config-validate-400) | Bad Request | Bad Request | | [schema](#post-rest-api-v1-cluster-config-validate-400-schema) | +| [401](#post-rest-api-v1-cluster-config-validate-401) | Unauthorized | Unauthorized | | [schema](#post-rest-api-v1-cluster-config-validate-401-schema) | +| [404](#post-rest-api-v1-cluster-config-validate-404) | Not Found | Not Found | | [schema](#post-rest-api-v1-cluster-config-validate-404-schema) | +| [429](#post-rest-api-v1-cluster-config-validate-429) | Too Many Requests | Too Many Requests | | [schema](#post-rest-api-v1-cluster-config-validate-429-schema) | +| [500](#post-rest-api-v1-cluster-config-validate-500) | Internal Server Error | Internal Server Error | | [schema](#post-rest-api-v1-cluster-config-validate-500-schema) | + +#### Responses + +##### 200 - Verification passed server version + +Status: OK + +###### Schema + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### Create creates a ResourceGroupRule. (*PostRestAPIV1ResourceGroupRule*) + +``` +POST /rest-api/v1/resource-group-rule +``` + +This endpoint creates a new ResourceGroupRule using the payload. + +#### Consumes + +* application/json +* text/plain + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ------- | ------ | ------------------------------------------------------------------------------------------- | -------------------------------------------------- | --------- | :------: | ------- | -------------------------------------------------------------- | +| request | `body` | [ResourcegroupruleResourceGroupRulePayload](#resourcegrouprule-resource-group-rule-payload) | `models.ResourcegroupruleResourceGroupRulePayload` | | ✓ | | resourceGroupRule to create (either plain text or JSON format) | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ------------------------------------------------ | --------------------- | --------------------- | :---------: | ---------------------------------------------------------- | +| [200](#post-rest-api-v1-resource-group-rule-200) | OK | Unstructured object | | [schema](#post-rest-api-v1-resource-group-rule-200-schema) | +| [400](#post-rest-api-v1-resource-group-rule-400) | Bad Request | Bad Request | | [schema](#post-rest-api-v1-resource-group-rule-400-schema) | +| [401](#post-rest-api-v1-resource-group-rule-401) | Unauthorized | Unauthorized | | [schema](#post-rest-api-v1-resource-group-rule-401-schema) | +| [404](#post-rest-api-v1-resource-group-rule-404) | Not Found | Not Found | | [schema](#post-rest-api-v1-resource-group-rule-404-schema) | +| [405](#post-rest-api-v1-resource-group-rule-405) | Method Not Allowed | Method Not Allowed | | [schema](#post-rest-api-v1-resource-group-rule-405-schema) | +| [429](#post-rest-api-v1-resource-group-rule-429) | Too Many Requests | Too Many Requests | | [schema](#post-rest-api-v1-resource-group-rule-429-schema) | +| [500](#post-rest-api-v1-resource-group-rule-500) | Internal Server Error | Internal Server Error | | [schema](#post-rest-api-v1-resource-group-rule-500-schema) | + +#### Responses + +##### 200 - Unstructured object + +Status: OK + +###### Schema + +[UnstructuredUnstructured](#unstructured-unstructured) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### Update updates the cluster metadata by name. (*PutRestAPIV1ClusterClusterName*) + +``` +PUT /rest-api/v1/cluster/{clusterName} +``` + +This endpoint updates the display name and description of an existing cluster resource. + +#### Consumes + +* application/json +* text/plain + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ----------- | ------ | ------------------------------------------------- | ------------------------------ | --------- | :------: | ------- | ---------------------------------------------------- | +| clusterName | `path` | string | `string` | | ✓ | | The name of the cluster | +| request | `body` | [ClusterClusterPayload](#cluster-cluster-payload) | `models.ClusterClusterPayload` | | ✓ | | cluster to update (either plain text or JSON format) | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ------------------------------------------------ | --------------------- | --------------------- | :---------: | ---------------------------------------------------------- | +| [200](#put-rest-api-v1-cluster-cluster-name-200) | OK | Unstructured object | | [schema](#put-rest-api-v1-cluster-cluster-name-200-schema) | +| [400](#put-rest-api-v1-cluster-cluster-name-400) | Bad Request | Bad Request | | [schema](#put-rest-api-v1-cluster-cluster-name-400-schema) | +| [401](#put-rest-api-v1-cluster-cluster-name-401) | Unauthorized | Unauthorized | | [schema](#put-rest-api-v1-cluster-cluster-name-401-schema) | +| [404](#put-rest-api-v1-cluster-cluster-name-404) | Not Found | Not Found | | [schema](#put-rest-api-v1-cluster-cluster-name-404-schema) | +| [405](#put-rest-api-v1-cluster-cluster-name-405) | Method Not Allowed | Method Not Allowed | | [schema](#put-rest-api-v1-cluster-cluster-name-405-schema) | +| [429](#put-rest-api-v1-cluster-cluster-name-429) | Too Many Requests | Too Many Requests | | [schema](#put-rest-api-v1-cluster-cluster-name-429-schema) | +| [500](#put-rest-api-v1-cluster-cluster-name-500) | Internal Server Error | Internal Server Error | | [schema](#put-rest-api-v1-cluster-cluster-name-500-schema) | + +#### Responses + +##### 200 - Unstructured object + +Status: OK + +###### Schema + +[UnstructuredUnstructured](#unstructured-unstructured) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### Update updates the ResourceGroupRule metadata by name. (*PutRestAPIV1ResourceGroupRule*) + +``` +PUT /rest-api/v1/resource-group-rule +``` + +This endpoint updates the display name and description of an existing ResourceGroupRule. + +#### Consumes + +* application/json +* text/plain + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ------- | ------ | ------------------------------------------------------------------------------------------- | -------------------------------------------------- | --------- | :------: | ------- | -------------------------------------------------------------- | +| request | `body` | [ResourcegroupruleResourceGroupRulePayload](#resourcegrouprule-resource-group-rule-payload) | `models.ResourcegroupruleResourceGroupRulePayload` | | ✓ | | resourceGroupRule to update (either plain text or JSON format) | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ----------------------------------------------- | --------------------- | --------------------- | :---------: | --------------------------------------------------------- | +| [200](#put-rest-api-v1-resource-group-rule-200) | OK | Unstructured object | | [schema](#put-rest-api-v1-resource-group-rule-200-schema) | +| [400](#put-rest-api-v1-resource-group-rule-400) | Bad Request | Bad Request | | [schema](#put-rest-api-v1-resource-group-rule-400-schema) | +| [401](#put-rest-api-v1-resource-group-rule-401) | Unauthorized | Unauthorized | | [schema](#put-rest-api-v1-resource-group-rule-401-schema) | +| [404](#put-rest-api-v1-resource-group-rule-404) | Not Found | Not Found | | [schema](#put-rest-api-v1-resource-group-rule-404-schema) | +| [405](#put-rest-api-v1-resource-group-rule-405) | Method Not Allowed | Method Not Allowed | | [schema](#put-rest-api-v1-resource-group-rule-405-schema) | +| [429](#put-rest-api-v1-resource-group-rule-429) | Too Many Requests | Too Many Requests | | [schema](#put-rest-api-v1-resource-group-rule-429-schema) | +| [500](#put-rest-api-v1-resource-group-rule-500) | Internal Server Error | Internal Server Error | | [schema](#put-rest-api-v1-resource-group-rule-500-schema) | + +#### Responses + +##### 200 - Unstructured object + +Status: OK + +###### Schema + +[UnstructuredUnstructured](#unstructured-unstructured) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +## Models + +### cluster.ClusterPayload + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| ----------- | ------ | -------- | :------: | ------- | --------------------------------------------------------------- | ------- | +| description | string | `string` | | | ClusterDescription is the description of cluster to be created | | +| displayName | string | `string` | | | ClusterDisplayName is the display name of cluster to be created | | +| kubeconfig | string | `string` | | | ClusterKubeConfig is the kubeconfig of cluster to be created | | + +### cluster.UploadData + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| ----------------------- | ------- | -------- | :------: | ------- | ----------- | ------- | +| content | string | `string` | | | | | +| fileName | string | `string` | | | | | +| fileSize | integer | `int64` | | | | | +| sanitizedClusterContent | string | `string` | | | | | + +### cluster.ValidatePayload + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| ---------- | ------ | -------- | :------: | ------- | ----------- | ------- | +| kubeConfig | string | `string` | | | | | + +### entity.ResourceGroup + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| ----------- | ------------- | ------------------- | :------: | ------- | ----------- | ------- | +| annotations | map of string | `map[string]string` | | | | | +| apiVersion | string | `string` | | | | | +| cluster | string | `string` | | | | | +| kind | string | `string` | | | | | +| labels | map of string | `map[string]string` | | | | | +| name | string | `string` | | | | | +| namespace | string | `string` | | | | | + +### insight.ResourceSummary + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| ----------------- | --------------------------------------------- | --------------------- | :------: | ------- | ----------- | ------- | +| creationTimestamp | string | `string` | | | | | +| resource | [EntityResourceGroup](#entity-resource-group) | `EntityResourceGroup` | | | | | +| resourceVersion | string | `string` | | | | | +| uid | string | `string` | | | | | + +### insight.ResourceTopology + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| ------------- | --------------------------------------------- | --------------------- | :------: | ------- | ----------- | ------- | +| children | []string | `[]string` | | | | | +| parents | []string | `[]string` | | | | | +| resourceGroup | [EntityResourceGroup](#entity-resource-group) | `EntityResourceGroup` | | | | | + +### insight.ScoreData + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| ------------------------------------------------------------------------- | -------------- | ------------------ | :------: | ------- | ----------------------------------------------------------------------- | ------- | +| issuesTotal | integer | `int64` | | | IssuesTotal is the total count of all issues found during the audit. | | +| This count can be used to understand the overall number of problems | | | | | | | +| that need to be addressed. | | | | | | | +| resourceTotal | integer | `int64` | | | ResourceTotal is the count of unique resources audited during the scan. | | +| score | number | `float64` | | | Score represents the calculated score of the audited manifest based on | | +| the number and severity of issues. It provides a quantitative measure | | | | | | | +| of the security posture of the resources in the manifest. | | | | | | | +| severityStatistic | map of integer | `map[string]int64` | | | SeverityStatistic is a mapping of severity levels to their respective | | +| number of occurrences. It allows for a quick overview of the distribution | | | | | | | +| of issues across different severity categories. | | | | | | | + +### insight.Statistics + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| ---------------------- | ------- | ------- | :------: | ------- | ----------- | ------- | +| clusterCount | integer | `int64` | | | | | +| resourceCount | integer | `int64` | | | | | +| resourceGroupRuleCount | integer | `int64` | | | | | + +### resourcegrouprule.ResourceGroupRulePayload + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| ----------- | -------- | ---------- | :------: | ------- | ----------- | ------- | +| description | string | `string` | | | | | +| fields | []string | `[]string` | | | | | +| name | string | `string` | | | | | + +### scanner.AuditData + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| ------------- | ------------------------------------------- | ---------------------- | :------: | ------- | ----------- | ------- | +| bySeverity | map of integer | `map[string]int64` | | | | | +| issueGroups | [][ScannerIssueGroup](#scanner-issue-group) | `[]*ScannerIssueGroup` | | | | | +| issueTotal | integer | `int64` | | | | | +| resourceTotal | integer | `int64` | | | | | + +### scanner.Issue + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| -------- | ------- | -------- | :------: | ------- | ------------------------------------------------------------------------------------- | ------- | +| message | string | `string` | | | Message provides a detailed human-readable description of the issue. | | +| scanner | string | `string` | | | Scanner is the name of the scanner that discovered the issue. | | +| severity | integer | `int64` | | | Severity indicates how critical the issue is, using the IssueSeverityLevel constants. | | +| title | string | `string` | | | Title is a brief summary of the issue. | | + +### scanner.IssueGroup + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| -------------- | ----------------------------------------------- | ------------------------ | :------: | ------- | ----------- | ------- | +| issue | [ScannerIssue](#scanner-issue) | `ScannerIssue` | | | | | +| resourceGroups | [][EntityResourceGroup](#entity-resource-group) | `[]*EntityResourceGroup` | | | | | + +### unstructured.Unstructured + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| ---------------------- | ------------------------- | ------------- | :------: | ------- | -------------------------------------------------------------------------------- | ------- | +| object | [interface{}](#interface) | `interface{}` | | | Object is a JSON compatible map with string, float, int, bool, []interface{}, or | | +| map[string]interface{} | | | | | | | +| children. | | | | | | | diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/current/5-references/3-search-methods.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/5-references/3-search-methods.md new file mode 100644 index 00000000..e1e63903 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/5-references/3-search-methods.md @@ -0,0 +1,109 @@ +--- +title: Search Methods +--- +Karpor is an open-source project that offers robust capabilities for searching resources across multiple clusters. This document outlines the two main search methods supported by Karpor: DSL (Domain Specific Language) and SQL (Structured Query Language), and explains how to utilize them for resource searches. + +## Keywords + +Karpor facilitates resource searches using two methods: DSL and SQL. Both methodologies leverage the following keywords for resource discovery: + +- cluster +- apiVersion +- kind +- namespace +- name +- creationTimestamp +- deletionTimestamp +- ownerReferences +- resourceVersion +- labels.`key` +- annotations.`key` +- content + +## SQL + +Karpor offers a SQL-like approach for querying Kubernetes resources, enabling users to employ SQL syntax for their searches. Below are examples illustrating the use of SQL syntax for various search scenarios: + +**Query resources of the Namespace kind** + +```sql +select * from resources where kind='Namespace' +``` + +**Query resources where the labels contain the key 'key1' with value 'value1'** + +```sql +select * from resources where labels.key1='value1' +``` + +**Query resources where the annotations contain the key 'key1' with value 'value1'** + +```sql +select * from resources where annotations.key1='value1' +``` + +**Query resources that are not of the Pod kind** + +```sql +select * from resources where kind!='Pod' +``` + +**Query resources of the Pod kind within a specific cluster** + +```sql +select * from resources where cluster='demo' and kind='Pod' +``` + +**Query resources of kind within a specified list** + +```sql +select * from resources where kind in ('pod','service') +``` + +**Query resources of kinds not within a specified list** + +```sql +select * from resources where kind not in ('pod','service') +``` + +**Query resources where the namespace starts with appl (where % represents any number of characters)** + +```sql +select * from resources where namespace like 'appl%' +``` + +**Query resources where the namespace contains banan (where \_ represents any single character)** + +```sql +select * from resources where namespace like 'banan_' +``` + +**Query resources where the namespace does not start with appl** + +```sql +select * from resources where namespace not like 'appl%' +``` + +**Query resources where the namespace does not contain banan** + +```sql +select * from resources where namespace notlike 'banan_' +``` + +**Query resources of kind Deployment and created before January 1, 2024, at 18:00:00** + +```sql +select * from resources where kind='Deployment' and creationTimestamp < '2024-01-01T18:00:00Z' +``` + +**Query resources of kind Service and order by creation timestamp in descending order** + +```sql +select * from resources where kind='Service' order by creationTimestamp desc +``` + +**Query resources whose content contains apple** + +```sql +select * from resources where contains(content, 'apple') +``` diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/current/5-references/_category_.json b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/5-references/_category_.json new file mode 100644 index 00000000..1fd07096 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/5-references/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "References" +} diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/current/6-roadmap/README.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/6-roadmap/README.md new file mode 100644 index 00000000..bd58b0bc --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/6-roadmap/README.md @@ -0,0 +1,18 @@ +--- +title: 路线图 +--- +Karpor 是一个新兴的开源项目,我们致力于将其打造成一个小而美/厂商中立/开发者友好/社区驱动的开源项目🚀。未来,我们将重点放在以下几个领域: + +- 提升 Karpor 的**可用性**,降低入门门槛,确保其足够“用户友好”。 +- 加强 Karpor 的**可靠性**,确保其在生产环境中可信赖。 +- 深化与更多社区工具的**生态系统整合**,以确保开放性。 +- 探索 **AI + Karpor**,创造更多可能性。 +- 拥抱开源社区:我们热爱**开源精神**,如果你对开源感兴趣,那么从这里开始! +- ...... + +Karpor 遵循 [发布流程与节奏指南](../4-developer-guide/2-conventions/1-release-process.md),但行动可能不会严格遵守路线图。我们可能会根据社区会议的反馈和 [GitHub 问题](https://github.com/KusionStack/karpor/issues) 调整里程碑,期望所有社区成员加入讨论。关于最终决策,请参考 [GitHub 里程碑](https://github.com/KusionStack/karpor/milestones)。 + +以下是详细的路线图,我们将持续更新 ⬇️ + +- **2024 路线图**: [https://github.com/KusionStack/karpor/issues/273](https://github.com/KusionStack/karpor/issues/273) + diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4.json b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4.json new file mode 100644 index 00000000..d9639158 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4.json @@ -0,0 +1,46 @@ +{ + "version.label": { + "message": "v0.4", + "description": "The label for version v0.4" + }, + "sidebar.karpor.category.Getting Started": { + "message": "入门", + "description": "The label for category Getting Started in sidebar karpor" + }, + "sidebar.karpor.category.Concepts": { + "message": "概念", + "description": "The label for category Concepts in sidebar karpor" + }, + "sidebar.karpor.category.User Guide": { + "message": "用户手册", + "description": "The label for category User Guide in sidebar karpor" + }, + "sidebar.karpor.category.How to Insight": { + "message": "如何洞察", + "description": "The label for category How to Insight in sidebar karpor" + }, + "sidebar.karpor.category.Best Production Practices": { + "message": "生产最佳实践", + "description": "The label for category Best Production Practices in sidebar karpor" + }, + "sidebar.karpor.category.Developer Guide": { + "message": "开发者手册", + "description": "The label for category Developer Guide in sidebar karpor" + }, + "sidebar.karpor.category.Contribution Guide": { + "message": "贡献指南", + "description": "The label for category Contribution Guide in sidebar karpor" + }, + "sidebar.karpor.category.Conventions": { + "message": "规约", + "description": "The label for category Conventions in sidebar karpor" + }, + "sidebar.karpor.category.References": { + "message": "参考手册", + "description": "The label for category References in sidebar karpor" + }, + "sidebar.karpor.category.CLI Commands": { + "message": "CLI Commands", + "description": "The label for category CLI Commands in sidebar karpor" + } +} \ No newline at end of file diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/1-getting-started/1-overview.mdx b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/1-getting-started/1-overview.mdx new file mode 100644 index 00000000..f45330a8 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/1-getting-started/1-overview.mdx @@ -0,0 +1,418 @@ +--- +id: overview +title: 概览 +slug: / +--- + +import { + AiOutlineArrowRight, + AiFillCheckCircle, + AiFillCloseCircle, +} from "react-icons/ai"; +import logoImg from "@site/static/karpor/assets/logo/logo-full.png"; +import searchImg from "@site/static/karpor/assets/overview/search.png"; +import insightImg from "@site/static/karpor/assets/overview/insight.png"; +import visionImg from "@site/static/karpor/assets/overview/vision.png"; +import comingSoonImg from "@site/static/karpor/assets/misc/coming-soon.jpeg"; +import KarporButton from "@site/src/components/KarporButton"; +import GithubStar from "@site/src/components/GithubStars"; +import ReactPlayer from "react-player"; +import Typed from "typed.js"; + +export const FeatureBlock = ({ + title, + reverse = false, + imgSrc, + imgAlt, + children, +}) => { + const isMobile = typeof window !== "undefined" && window.innerWidth <= 768; + return ( + <> +

{title}

+
+
+ {imgAlt} +
+
{children}
+
+ + ); +}; + +export const Content = () => { + const karporVsOthers = [ + { + label: "用户界面", + karpor: true, + kubernetesDashboard: true, + labelDesc: "", + }, + { + label: "多集群", + karpor: true, + kubernetesDashboard: false, + labelDesc: "能够同时连接到多个集群", + }, + { + label: "聚合资源视图", + karpor: true, + kubernetesDashboard: false, + labelDesc: "人类友好的资源视图", + }, + { + label: "安全合规", + karpor: true, + kubernetesDashboard: false, + labelDesc: "自动扫描风险,评估健康分", + }, + { + label: "资源关系拓扑", + karpor: true, + kubernetesDashboard: false, + labelDesc: "洞察资源的上下文关系", + }, + ]; + const h2Style = { + paddingBottom: "14px", + borderBottom: "2px solid #f1f1f1", + fontSize: 28, + }; + const flexDirectionStyle = { + display: "flex", + flexDirection: "column", + alignItems: "center", + }; + // Setup typed animation + const el = React.useRef(null); + React.useEffect(() => { + const typed = new Typed(el.current, { + strings: [ + "帮助开发者快速定位资源", + "帮助管理员深入洞察集群", + "帮助平台和多集群建立连接", + ], + typeSpeed: 40, + backDelay: 1500, + loop: true, + }); + return () => { + // Destroy Typed instance during cleanup to stop animation + typed.destroy(); + }; + }, []); + return ( + <> +
+
+ +
+
+
+ +
+
+ +
+
+
+ Intelligence for Kubernetes ✨ +
+
+ +
+
+
+
+
+

📖 Karpor 是什么?

+
+ Karpor 是智能化的 Kubernetes 平台,它为 Kubernetes + 带来了高级的 🔍 搜索、💡 洞察和 ✨ AI 功能,本质上是一个 + Kubernetes 可视化工具。通过 + Karpor,您可以在任何云平台上获得对 Kubernetes + 集群的关键可见性。 +
+
+ 我们立志成为一个 + + 小而美、厂商中立、开发者友好、社区驱动 + + 的开源项目! 🚀 +
+
+
+
+ +
+
+
+
+

💡 为什么选择 Karpor?

+
+ +
+ ⚡️ 自动同步 +
+ 自动同步您在多云平台管理的任何集群中的资源 +
+
+ 🔍 强大灵活的查询 +
+ 以快速简单的方式有效地检索和定位跨集群的资源 +
+
+
+ +
+ 🔒 安全合规 +
+ 了解您在多个集群和合规标准中的合规性状态 +
+
+ 📊 资源拓扑 +
+ 提供包含资源运行上下文信息的关系拓扑和逻辑视图 +
+
+ 📉 成本优化 +
+ 即将推出 +
+
+
+ +
+ 💬 自然语言操作 +
+ 使用自然语言与 Kubernetes 交互,实现更直观的操作 +
+
+ 📦 情境化 AI 响应 +
+ 获得智能的、情境化的辅助,满足您的需求 +
+
+ 🤖 Kubernetes AIOps +
+ 利用 AI 驱动的洞察,自动化和优化 Kubernetes 管理 +
+
+
+
+
+
+
+

🌈 Our Vision

+
+ 现如今,Kubernetes + 生态系统日益复杂是一个不可否认的趋势,这一趋势越来越难以驾驭。这种复杂性不仅增加了运维的难度,也降低了用户采纳新技术的速度,从而限制了他们充分利用 + Kubernetes 的潜力。 +
+
+ 我们希望 Karpor 围绕着 🔍 搜索、📊 洞察和✨AI,击穿 + Kubernetes 愈演愈烈的复杂性,达成以下价值主张: +
+
+
+ +
+
+
+

🙌 Karpor vs. Kubernetes Dashboard

+
+ + {karporVsOthers?.map((item) => { + return ( +
+
+
{item?.label}
+ {item?.labelDesc && ( +
{item?.labelDesc}
+ )} +
+
+ {item?.karpor ? ( + + ) : ( + + )} +
+
+ {item?.kubernetesDashboard ? ( + + ) : ( + + )} +
+
+ ); + })} +
+

🎖️ 项目贡献者

+
+

感谢这些了不起的人! 🍻

+

+ 查看{" "} + 贡献指南, + 欢迎加入我们! 👇 +

+ +
+

👉 下一步

+
+ +
+ + ); +}; + + + diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/1-getting-started/2-installation.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/1-getting-started/2-installation.md new file mode 100644 index 00000000..fd0fc451 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/1-getting-started/2-installation.md @@ -0,0 +1,48 @@ +--- +title: 安装 +--- +## 前提条件 + +* 确保有一个可用的 Kubernetes 集群来安装 Karpor。对于本地安装,你可以使用 Minikube 或 Kind。 + +## 使用 Helm 安装 + +Karpor 可以通过 Helm v3.5+ 轻松安装,它是一个简单的命令行工具,你可以从 [这里](https://helm.sh/docs/intro/install/) 获取。 + +```shell +helm repo add kusionstack https://kusionstack.github.io/charts +helm repo update +helm install karpor kusionstack/karpor +``` + +![安装](./assets/2-installation/install.gif) + +## 使用 Helm 升级 + +```shell +helm repo add kusionstack https://kusionstack.github.io/charts +helm repo update + +# 升级到最新版本 +helm upgrade karpor kusionstack/karpor + +# 升级到指定版本 +helm upgrade karpor kusionstack/karpor --version 1.2.3 +``` + +## 本地使用 Helm 安装/升级 + +如果你在生产环境中连接到 [https://kusionstack.github.io/charts/](https://kusionstack.github.io/charts/) 有问题,你可能需要从 [这里](https://github.com/KusionStack/charts) 手动下载 chart,并使用它来本地安装或升级。 + +```shell +git clone https://github.com/KusionStack/charts.git +helm install/upgrade karpor charts/karpor +``` + +## 卸载 + +执行以下命令卸载 karpor: + +```shell +helm uninstall karpor +``` diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/1-getting-started/3-quick-start.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/1-getting-started/3-quick-start.md new file mode 100644 index 00000000..10210510 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/1-getting-started/3-quick-start.md @@ -0,0 +1,105 @@ +--- +title: 快速开始 +--- +## 前提条件 + +* 确保已安装 [kubectl](https://kubernetes.io/docs/tasks/tools/)。 +* 确保已安装 [helm](https://helm.sh/docs/intro/install/)。 +* 如果你没有现成的集群,你仍然需要一个 [kind](https://kind.sigs.k8s.io/docs/user/quick-start/#installation/)。 + +## 创建集群(可选) + +首先,如果你没有现成的集群,可以使用 `kind` 工具在本地环境中创建一个 Kubernetes 集群。按照以下步骤操作: + +1. 创建集群。你可以使用以下命令创建名为 `demo-cluster` 的集群: + ```shell + kind create cluster --name demo-cluster + ``` + + 这将在你的本地 Docker 环境中创建一个新的 Kubernetes 集群。稍等片刻,直到集群创建完成。 +2. 通过执行以下命令验证集群是否正常运行: + ```shell + kubectl cluster-info + ``` + + 如果一切设置正确,你将看到你的 Kubernetes 集群信息。 + +## 安装 + +要安装 Karpor,请在终端中执行以下命令: + +```shell +helm repo add kusionstack https://kusionstack.github.io/charts +helm repo update +helm install karpor kusionstack/karpor +``` + +更多的安装详情,请参考 [安装文档](2-installation.md)。 + +![安装](./assets/2-installation/install.gif) + +## 访问 Karpor Web 界面 + +1. 运行以下命令来访问运行在集群中的 Karpor 服务: + ```shell + kubectl -n karpor port-forward service/karpor-server 7443:7443 + ``` + + 执行这条命令后,如果你访问本地机器上的 7443 端口,流量会被转发到 Kubernetes 集群中 karpor-server 服务的 7443 端口。 +2. 打开浏览器并输入以下 URL: + ```shell + https://127.0.0.1:7443 + ``` + +这将打开 Karpor 的 Web 界面。👇 + +![在浏览器中打开](./assets/2-installation/open-in-browser.gif) + +祝贺你!🎉 你已成功安装 Karpor。现在你可以开始使用 Karpor 探索和洞察多集群中的资源。 + +## 注册集群 + +要向 Karpor 注册新集群,请按照以下步骤操作: + +1. 打开 Karpor Web 界面中的 集群管理 部分。 +2. 点击 接入集群 按钮。 +3. 按照界面上的说明完成集群注册过程。 + +以下是 `注册集群` 页面的示例: + +![](/karpor/assets/cluster-mng/cluster-mng-register-new-cluster.png) + +有关注册过程的更详细解释,请参阅 [多集群管理](../3-user-guide/1-multi-cluster-management.md) 指南。 + +## 搜索资源 + +Karpor 提供了一个强大的搜索功能,允许你快速跨集群查找资源。要使用此功能: + +1. 打开 Karpor Web 界面中的 搜索 页面。 +2. 输入你要查找的资源的搜索条件。 + +以下是 `搜索` 页面的示例: + +![](/karpor/assets/search/search-auto-complete.png) +![](/karpor/assets/search/search-result.png) + +要了解更多关于搜索功能以及如何有效使用它们的说明,请查看 [搜索方法](../5-references/3-search-methods.md) 指南。 + +## 资源洞察 + +通过点击搜索结果,你可以进入到资源的**洞察**页面,在这里你可以查看资源风险报告、健康分、资源关系拓扑图等经过我们提炼的信息。 + +以下是 `洞察` 页面的示例: + +![](/karpor/assets/insight/insight-home.png) +![](/karpor/assets/insight/insight-single-issue.png) +![](/karpor/assets/insight/insight-topology.png) + +## 结论 + +请注意,本指南仅提供 Karpor 的快速入门,你可能需要参考其他文档和资源来深入地了解每个功能。 + +## 下一步 + +- 了解 Karpor 的 [架构](../concepts/architecture) 和 [术语表](../concepts/glossary)。 +- 查看 [用户指南](../user-guide/multi-cluster-management) 以了解 Karpor 的更多功能。 diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/1-getting-started/_category_.json b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/1-getting-started/_category_.json new file mode 100644 index 00000000..41f4c00e --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/1-getting-started/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Getting Started" +} diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/1-getting-started/assets/2-installation/install.gif b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/1-getting-started/assets/2-installation/install.gif new file mode 100644 index 00000000..68889793 Binary files /dev/null and b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/1-getting-started/assets/2-installation/install.gif differ diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/1-getting-started/assets/2-installation/open-in-browser.gif b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/1-getting-started/assets/2-installation/open-in-browser.gif new file mode 100644 index 00000000..00adfb18 Binary files /dev/null and b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/1-getting-started/assets/2-installation/open-in-browser.gif differ diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/2-concepts/1-architecture.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/2-concepts/1-architecture.md new file mode 100644 index 00000000..018f5f3f --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/2-concepts/1-architecture.md @@ -0,0 +1,24 @@ +--- +title: 架构 +--- +![](assets/1-architecture/architecture.png) + +## 组件 + +- `Dashboard`:Karpor 的 Web UI 界面。 +- `Server`:Karpor 的核心后端服务。 +- `Syncer`:用于实时同步集群资源的独立服务。 +- `Storage`:用于存储已同步的资源和用户数据的存储后端。 + +## Karpor 的工作原理 + +1. 安装后,用户可以将感兴趣的集群注册到 Karpor 中。 +2. Syncer 组件会自动将已注册集群中的资源实时同步到 Storage 中,同时会确保资源的实时变化也会自动同步到 Storage 中。 +3. 当用户需要查找特定资源时,只需在 Dashboard 的搜索框中输入查询语句。Dashboard 会与 Server 的搜索接口交互,Server 内的搜索模块将解析这些语句,并在 Storage 中查找相应的资源,然后将搜索结果返回给 Dashboard。 +4. 点击搜索结果后,用户将被引导至资源洞察页面。Dashboard 调用 Server 的洞察接口,其中 Server 的洞察模块对资源进行静态扫描,生成问题报告,并定位其相关资源,以绘制包含所有父资源和子资源的资源拓扑图。 +5. 洞察页面同样适用于资源组,比如洞察特定 Group-Version-Kind 的资源组、单个命名空间,或是用户自定义的资源组。 + +## 下一步 + +- 学习 Karpor 的 [术语表](../concepts/glossary)。 +- 查看 [用户指南](../user-guide/multi-cluster-management) 以了解更多关于你能够通过 Karpor 实现的内容。 diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/2-concepts/3-glossary.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/2-concepts/3-glossary.md new file mode 100644 index 00000000..c76d8303 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/2-concepts/3-glossary.md @@ -0,0 +1,48 @@ +--- +title: 术语表 +--- +## 集群 + +等同于 `Kubernetes` 中的集群概念,例如名为 `democluster` 的集群。 + +`Karpor` 可以管理多个集群,包括集群注册、证书轮换、生成和查看洞察,以及通过 Dashboard 进行的其他操作。它还支持使用 `Karpor` 发放的统一证书,通过 `kubectl` 和 `kubectx` 等命令行工具访问任何被管理的集群。 + +更多细节,请参考最佳实践:[告别集群证书切换,让你“一卡通行”](../3-user-guide/4-best-production-practices/1-one-pass-with-proxy.md)。 + +## 资源 + +等同于 `Kubernetes` 中的资源概念,如名为 `mockDeployment` 的 `Deployment`。 + +`Karpor` 对其管理集群中的资源进行实时同步、搜索和洞察。资源是 `Karpor` 里搜索和洞察的最小粒度对象。 + +## 资源组 + +**资源组是一种逻辑上的组织结构**,用于将相关的 `Kubernetes` 资源组合起来,以便于更直观的查看、搜索和洞察。例如,可以创建一个名为 `mockapp` 的 `Application` 资源组,其中包括一个 `Namespace`、一个 `Deployment` 和多个具有特定标签(如 `app.kubernetes.io/name: mockapp`)的 `Pods`。 + +## 资源组规则 + +**资源组规则是一套规则**,将特定资源分组到适当的资源组中。这些规则旨在基于 `annotations`、`labels`、`namespace` 等属性,将资源组织成逻辑单元。例如,要定义一个应用程序资源组规则,可以指定 `annotations` 为 `app.kubernetes.io/name` 作为分组条件。 +`Karpor` 预设了一个资源组规则 `Namespace` 以及自定义资源组规则。 + +![](assets/3-glossary/image-20240326171327110.png) + +## 拓扑 + +在 `Karpor` 中,拓扑是指**给定资源组内相关资源之间的关系和依赖**。利用可视化的拓扑图可以更容易地查看和理解资源组的内部结构,这对于故障排查和定位问题很有帮助。 + +## 审计 + +审计是指**对给定资源组内的所有资源执行合规性扫描**。其目的是帮助用户发现潜在风险。当前系统内置使用的扫描工具和规则,但我们将来会支持自定义方式进行扫描。 + +## 问题 + +**审计的输出被称为问题**。如果被扫描对象没有问题,则审计结果将为空。否则,所有识别到的风险将根据其风险等级进行分类并显示,包括每个风险的描述、相关资源等,用来指导用户解决问题,确保集群资源的安全和合规。 + +## 健康分 + +评分用于反映资源组或资源的**整体健康状况**,提醒用户及时调整和采取措施。健康评分是基于资源组的审计结果计算得出。影响评分的因素包括:**风险等级**、**风险数量**和**资源总数**。 + +## 下一步 + +- 学习 Karpor 的 [架构](../concepts/architecture)。 +- 查看 [用户指南](../user-guide/multi-cluster-management),了解更多有关你可以通过 Karpor 实现的内容。 diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/2-concepts/_category_.json b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/2-concepts/_category_.json new file mode 100644 index 00000000..bccddbf1 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/2-concepts/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Concepts" +} diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/2-concepts/assets/1-architecture/architecture.png b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/2-concepts/assets/1-architecture/architecture.png new file mode 100644 index 00000000..afec9346 Binary files /dev/null and b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/2-concepts/assets/1-architecture/architecture.png differ diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/2-concepts/assets/3-glossary/image-20240326171327110.png b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/2-concepts/assets/3-glossary/image-20240326171327110.png new file mode 100644 index 00000000..f5673eb8 Binary files /dev/null and b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/2-concepts/assets/3-glossary/image-20240326171327110.png differ diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/3-user-guide/1-multi-cluster-management.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/3-user-guide/1-multi-cluster-management.md new file mode 100644 index 00000000..76a54f2b --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/3-user-guide/1-multi-cluster-management.md @@ -0,0 +1,34 @@ +--- +title: 多集群管理 +--- +多集群管理是将集群注册进 Karpor 的入口,使能在大量集群间进行搜索和洞察。 + +## 注册集群 + +1. 点击 集群管理 标签页。 +2. 点击 注册集群 按钮。 + ![](/karpor/assets/cluster-mng/cluster-mng-empty.png) +3. 添加集群名字。集群名称必须唯一且一旦创建不能更改。 +4. 上传该集群的 KubeConfig 文件(一个具有读权限的文件就足够了)。 +5. 点击 验证并提交 按钮。 + ![](/karpor/assets/cluster-mng/cluster-mng-register-new-cluster.png) +6. 一旦验证通过,集群将会被添加到 集群管理 页面。 + ![](/karpor/assets/cluster-mng/cluster-mng-register-success.png) + +## 编辑集群 + +编辑 按钮允许修改 显示名称描述,从而改变仪表盘中集群名称和描述的显示方式。 + +![](/karpor/assets/cluster-mng/cluster-mng-edit-cluster.png) + +## 轮换证书 + +当 KubeConfig 过期时,你可以通过点击 轮换证书 来更新证书。 +![](/karpor/assets/cluster-mng/cluster-mng-rotate-cluster-1.png) +![](/karpor/assets/cluster-mng/cluster-mng-rotate-cluster-2.png) +![](/karpor/assets/cluster-mng/cluster-mng-rotate-cluster-3.png) + +## 移除集群 + +通过 删除 按钮方便地移除已注册的集群。 +![](/karpor/assets/cluster-mng/cluster-mng-delete-cluster.png) diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/3-user-guide/2-search.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/3-user-guide/2-search.md new file mode 100644 index 00000000..55828ac6 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/3-user-guide/2-search.md @@ -0,0 +1,34 @@ +--- +title: 如何搜索 +--- +在本节中,我们将探索如何使用 Karpor 执行多集群资源搜索,本指南完全通过 Dashboard 进行。 + +我们支持三种搜索方法: + +- **通过 SQL 搜索**:使用 SQL 查询语言执行资源搜索。 +- **通过 DSL 搜索**:通过 `Karpor` 的特定领域语言(DSL)进行资源搜索。 +- **通过自然语言搜索**:使用自然语言进行资源搜索。 + +## 通过 SQL 搜索 + +Karpor 提供了一个方便的 SQL 查询功能,允许你使用熟悉的 SQL 语法搜索和过滤所有托管集群中的 Kubernetes 资源,并为多集群资源搜索提供了针对性的优化和增强。 + +SQL 是软件工程行业从业者容易获取的技能之一,理论上使得学习曲线相当低。因此,这种搜索方法是为你准备的!特别适合 Karpor 的初学者。 + +以下是使用 SQL 搜索的步骤: + +1. **进入搜索页面**:我们将首页设计为搜索的入口点,因此打开 `Karpor` 的 Web UI 立即呈现给你搜索页面。 + ![](/karpor/assets/search/search-home.png) +2. **编写 SQL 查询语句**:使用 SQL 语法编写你的查询语句,指定你希望搜索的集群名称、资源类型、条件和过滤器。此外,如果你输入关键词并按空格,搜索框将弹出带有下拉菜单的自动完成提示,建议你可以输入的下一个可能的关键词。 + ![](/karpor/assets/search/search-auto-complete.png) +3. **执行查询**:点击 `搜索` 按钮执行查询,并被发送到搜索结果页面。Karpor 将返回与 SQL 查询匹配的资源列表。 + ![](/karpor/assets/search/search-result.png) +4. **高级功能**:利用我们的内置高级 SQL 语法,如排序、全文搜索等,进一步细化你的搜索。详情请参阅:[搜索方法文档](../5-references/3-search-methods.md)。 + +## 通过 DSL 搜索 + +敬请期待。🚧 + +## 通过自然语言搜索 + +敬请期待。🚧 diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/3-user-guide/3-insight/1-inspecting-any-resource-group-and-resource.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/3-user-guide/3-insight/1-inspecting-any-resource-group-and-resource.md new file mode 100644 index 00000000..3ac1df52 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/3-user-guide/3-insight/1-inspecting-any-resource-group-and-resource.md @@ -0,0 +1,27 @@ +--- +title: 检查任何资源组和资源 +--- +在这部分内容中,我们将通过清晰的步骤和实例详细解释如何使用 Karpor 来检查任何资源组或资源。 + +如果你不熟悉相关概念,可以参考 [术语表](../../2-concepts/3-glossary.md) 章节。 + +## 检查具体资源 + +1. 搜索你感兴趣的资源: + ![](/karpor/assets/search/search-home.png) +2. 在搜索结果页,所有通过条件筛选的资源将会被列出: + ![](/karpor/assets/search/search-result.png) +3. 点击任意资源名称,即可跳转到该资源的洞察页面: + ![](/karpor/assets/insight/insight-home.png) + +## 检查具体资源组 + +你可能已经注意到,在每一个搜索结果条目中,资源的 `Cluster`、`Kind`、`Namespace` 等标签都列了出来。请注意,这些标签是**超链接**,我们称之为 "**锚点**"。它们代表了指向特定资源组或资源的链接。通过点击这些**锚点**,你可以快速跳转到该资源组或资源的洞察页面。 + +![](/karpor/assets/search/search-result.png) + +## 在资源组 / 资源间灵活切换 + +实际上,除了前述搜索结果中的标签外,在任何页面上看到的任何资源 / 资源组名称,都可以作为**锚点**重定向,就像是时空虫洞,允许你在任何维度之间来回穿梭,直到找到你正在搜索的资源。搜索和锚点都是加速检索的手段,它们是 Karpor 作为 Kubernetes 探索器的关键特性。 + +![](/karpor/assets/insight/insight-breadcrumbs.png) diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/3-user-guide/3-insight/2-custom-resource-group.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/3-user-guide/3-insight/2-custom-resource-group.md new file mode 100644 index 00000000..e009d79d --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/3-user-guide/3-insight/2-custom-resource-group.md @@ -0,0 +1,92 @@ +--- +title: 自定义资源组 +--- +## 创建自定义资源组 + +本节将重点介绍如何在 Karpor 中创建自定义资源组。通过自定义资源组,你可以根据自己的需求和逻辑概念,在 Karpor 中灵活管理和组织资源。我们将逐步指导你创建和定义自定义资源组,并展示如何使用这些组进行资源洞察和管理。 + +如果你不熟悉**资源组**和**资源组规则**相关概念,可以参考 [词汇表](../../2-concepts/3-glossary.md) 部分。 + +**假设**在你的组织或公司内,有一个 `应用单元` 的概念,代表**某个环境中应用的所有资源**。 + +我们在**标签中标记应用的名称和环境**。例如,以下是 `生产环境` 中 `mock-apple` 的 `应用单元`: + +```yaml +apiVersion: v1 +kind: Namespace +metadata: + labels: + app.kubernetes.io/name: mock-apple + name: mock-apple +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/environment: prod + app.kubernetes.io/name: mock-apple +spec: + replicas: 3 + selector: + matchLabels: + app.kubernetes.io/environment: prod + app.kubernetes.io/name: mock-apple + template: + metadata: + labels: + app.kubernetes.io/environment: prod + app.kubernetes.io/name: mock-apple + fruit: apple + spec: + containers: + - image: nginx:latest + name: mock-container + dnsPolicy: ClusterFirst + restartPolicy: Always +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/environment: prod + app.kubernetes.io/name: mock-apple + name: mock-service-apple-prod + namespace: mock-apple +spec: + ports: + - port: 80 + protocol: TCP + targetPort: 80 + selector: + app.kubernetes.io/environment: prod + app.kubernetes.io/name: mock-apple + type: ClusterIP +``` + +现在,我们将按照以下步骤创建一个名为 `应用单元` 的自定义 `资源组规则`。它将根据用户指定的规则对集群中的所有资源进行分类,并列出所有符合规则的 `资源组`。 + +1. 点击 洞察 标签进入洞察首页。 +2. 在页面底部,你将看到一个默认的资源组规则 `命名空间`,这是按命名空间分类的单一规则。 + ![](/karpor/assets/insight/insight-homepage.png) +3. 点击创建资源组按钮 +,并在弹出窗口中填入 `应用单元` 的**基本信息和分类规则**。 + ![](/karpor/assets/insight/insight-create-app-resource-group-rule.png) +4. 点击 提交 按钮,然后点击新出现的 应用单元 标签,列出所有应用单元。 + ![](/karpor/assets/insight/insight-list-app-resource-groups.png) +5. 你可以在搜索框中输入关键词,快速找到 `生产` 环境中的 `mock-apple` 应用单元。 + ![](/karpor/assets/insight/insight-search-app-resource-group.png) +6. 你可以点击资源组卡片上的 查看 按钮,跳转到相应的 `资源组洞察页面`,查看某个应用单元的所有资源、拓扑关系、合规报告等聚合信息。 +7. 如有需要,你也可以使用相同的步骤创建 `环境资源组`。 + ![](/karpor/assets/insight/insight-create-env-resource-group-rule.png) + ![](/karpor/assets/insight/insight-list-env-resource-groups.png) + +## 编辑自定义资源组 + +你可以点击自定义资源组选项卡右侧的按钮 来修改弹出窗口中的基本信息和分类规则。 + +![](/karpor/assets/insight/insight-edit-env-resource-group.png) + +## 删除自定义资源组 + +你可以点击自定义资源组标签右侧的按钮 然后在弹出窗口中点击 删除,以删除当前资源组规则。 + +![](/karpor/assets/insight/insight-delete-env-resource-group.png) diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/3-user-guide/3-insight/3-summary.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/3-user-guide/3-insight/3-summary.md new file mode 100644 index 00000000..639426ff --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/3-user-guide/3-insight/3-summary.md @@ -0,0 +1,23 @@ +--- +title: 概览 +--- +在本节中,我们将了解 Karpor 洞察页面上的 `概览卡片`,它们用于快速查看和理解当前资源组或资源的关键指标。 + +在不同的资源组下,`概览卡片` 显示的内容也可能有所不同。 + +如果你查看的是: + +1. **资源组洞察页面**: + + 1. **集群洞察页面**,概览卡片显示的是集群的**节点、Pod 数量、CPU、内存容量以及 Kubernetes 版本**。 + ![](/karpor/assets/insight/insight-summary-cluster.png) + 2. **资源种类洞察页面**,概览卡片显示的是**所属集群、GVK(Group Version Kind)信息,以及当前集群下该类型资源的数量**。 + ![](/karpor/assets/insight/insight-summary-kind.png) + 3. **命名空间洞察页面**,概览卡片显示的是**所属集群、命名空间,以及当前命名空间下最丰富的资源类型**。 + ![](/karpor/assets/insight/insight-summary-namespace.png) + 4. **自定义资源组洞察页面**,概览卡片显示的是**每个规则的关键值,以及当前资源组下的几个资源统计数据**。 + ![](/karpor/assets/insight/insight-summary-custom-resource-group.png) +2. **资源洞察页面**,概览卡片显示的是**当前资源的名称、GVK 信息、所属集群和命名空间**。 + ![](/karpor/assets/insight/insight-summary-resource.png) + +⚠️ **注意**:无论你处于哪个资源组洞察页面,概览卡片总会显示一个健康评分,该评分基于实体的风险合规状态计算得出。 diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/3-user-guide/3-insight/4-compliance-report.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/3-user-guide/3-insight/4-compliance-report.md new file mode 100644 index 00000000..68b60d87 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/3-user-guide/3-insight/4-compliance-report.md @@ -0,0 +1,16 @@ +--- +title: 合规报告 +--- +本节将介绍合规扫描功能,主要用于检测和评估当前资源或资源组中的所有资源是否符合特定的合规标准和安全政策。在本节中,你将了解如何有效利用合规扫描功能以确保集群和资源的安全与合规。 + +如果你不熟悉**合规报告**或**风险**相关概念,你可以参考 [术语表](../../2-concepts/3-glossary.md) 章节。 + +1. 按照 [检查任何资源组和资源](#%E6%A3%80%E6%9F%A5%E4%BB%BB%E4%BD%95%E8%B5%84%E6%BA%90%E7%BB%84%E5%92%8C%E8%B5%84%E6%BA%90) 的指引,导航至特定资源组 / 资源的洞察页面。 +2. 你可以看到资源的**合规报告**卡片。 + ![](/karpor/assets/insight/insight-home.png) +3. 该卡片显示了对当前资源或资源组下所有资源进行扫描时识别出的**风险**,按风险等级分类。在每个风险等级标签下,风险按发生频率从高到低排序。每个风险条目显示标题、描述、发生次数以及发现问题的扫描工具。 +4. 点击特定风险将显示一个弹出窗口,展示风险的详细信息。 + ![](/karpor/assets/insight/insight-single-issue.png) +5. 点击 查看所有风险 ,将弹出一个抽屉,列出所有风险。这里,你可以搜索、分类、分页等。 + ![](/karpor/assets/insight/insight-all-issues.png) +6. 一旦你按照其指示解决了一个风险,可以点击 重新扫描 按钮,这将触发对资源组下所有资源进行全面的合规扫描。一旦扫描完成,仪表板将显示新的结果。 diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/3-user-guide/3-insight/5-topology.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/3-user-guide/3-insight/5-topology.md new file mode 100644 index 00000000..1bbaedd5 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/3-user-guide/3-insight/5-topology.md @@ -0,0 +1,19 @@ +--- +title: 拓扑结构 +--- +## 拓扑结构 + +在本节中,我们将探索 Karpor 中的拓扑功能。拓扑视图将帮助你更直观地理解集群中各种资源之间的关系和依赖。以下是如何使用拓扑视图。 + +1. 按照 [检查任意资源组和资源](#%E6%A3%80%E6%9F%A5%E4%BB%BB%E4%BD%95%E8%B5%84%E6%BA%90%E7%BB%84%E5%92%8C%E8%B5%84%E6%BA%90) 的指引,导航至特定资源组 / 资源的洞察页面。 +2. 在页面底部,你可以看到资源拓扑图。 + ![](/karpor/assets/insight/insight-topology.png) +3. 根据当前页面情况: + 1. 资源洞察页面: + 1. 该图将展示与当前资源相关的上游和下游资源。例如,如果当前资源是一个 Deployment(部署),拓扑图将展示 Deployment 下的 ReplicaSet(副本集)以及 ReplicaSet 下的 Pods(容器组)。 + ![](/karpor/assets/insight/insight-topology-example.png) + 2. 点击资源拓扑图中的一个节点,等同于点击特定资源的锚点,这将直接导航至该资源的洞察页面。 + 2. 资源组洞察页面: + 1. 该图将直观显示当前资源组下各种资源类型的数量与关系。 + 2. 点击资源拓扑图中的一个节点,等同于点击资源类型,下方列表将刷新显示当前资源组中特定类型下的所有资源。 + ![](/karpor/assets/insight/insight-linkage.png) diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/3-user-guide/3-insight/_category_.json b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/3-user-guide/3-insight/_category_.json new file mode 100644 index 00000000..c39e5397 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/3-user-guide/3-insight/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "How to Insight" +} diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/3-user-guide/3-insight/index.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/3-user-guide/3-insight/index.md new file mode 100644 index 00000000..9cec8507 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/3-user-guide/3-insight/index.md @@ -0,0 +1,6 @@ +--- +title: 如何洞察 +--- +在本节中,我们将介绍如何使用 Karpor 对集群内的资源进行全面洞察。你可以通过多种方式访问洞察页面,并且可以轻松地在不同范围(如集群、类型、命名空间或单个资源)的洞察页面之间切换。如果你当前组织内有特定领域的逻辑范围,你甚至可以通过设置资源组规则来自定义资源组(如应用程序、环境等)。我们还提供功能以便对这些自定义资源组进行洞察。 + +本指南将完全在 Karpor 仪表板上操作。 diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/3-user-guide/4-best-production-practices/1-one-pass-with-proxy.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/3-user-guide/4-best-production-practices/1-one-pass-with-proxy.md new file mode 100644 index 00000000..44f92637 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/3-user-guide/4-best-production-practices/1-one-pass-with-proxy.md @@ -0,0 +1,42 @@ +--- +title: 告别集群证书切换,让你“一卡通行” +--- +## 挑战与需求 + +### 大规模多集群的规模挑战 + +2014 年 6 月,Kubernetes 从 Google 的内部 Borg 项目诞生,引人注目地亮相。在科技巨头的支持和一个蓬勃发展的开源社区的帮助下,它逐渐成为了容器编排领域的事实标准。随着公司开始在生产环境中部署 Kubernetes,单个 Kubernetes 集群无法再满足内部日益复杂的需求。单个集群中的节点数量超过社区推荐的限制(5,000)是很常见的,使得扩展到多集群成为一个自然的选择。 + +### 多集群访问者的基本需求 + +随着多集群的蓬勃发展,各种平台可能需要跨不同集群访问资源,需要获取每个集群的 KubeConfig。 + +随着用户和集群数量的增加,集群管理员面临着巨大的时间成本:如果有 `M` 个集群和 `N` 个用户,管理 KubeConfig 的时间复杂度将变为 `O (M*N)`。此外,用户在访问不同集群时需要切换不同的 KubeConfig,不同集群的 KubeConfig 对应的权限也各不相同,无疑增加了使用的复杂度。 + +![直接连接:用户需要维护多个 KubeConfigs](assets/1-one-pass-with-proxy/image-20240326163622363.png) + +在这种情况下,有没有一种方法能方便地访问不同集群中的资源,而无需维护大量的 KubeConfig 和管理跨集群的各种用户权限问题?此外,这种方法理想地应该是云原生的,可以通过 kubectl 和 Kubernetes 官方客户端访问,以降低过渡到使用这种方法的成本。`Karpor` 的出现就是为了解决这些问题。 + +## "一站式通行" 的理念 + +我们开发了 `Karpor`,一个开源项目。作为一个 Kubernetes 资源探索器,在搜索和洞察集群资源方面具有独特优势,它的基础多集群管理组件,具备集群证书颁发和多集群请求代理的特点,使其高度适合作为平台对多个集群的统一访问入口。该组件支持以云原生方式转发用户请求到指定集群,允许用户维护一套 KubeConfig 来访问不同的集群,使访问多集群像访问单个集群一样简单。那么,它是如何工作的呢?下面,我们介绍 `Karpor` 的架构和功能。 + +![使用多集群网关:用户只需要维护一套 KubeConfigs](assets/1-one-pass-with-proxy/image-20240326164141400.png) + +### 多集群请求路由和代理 + +`Karpor` 包含一个应用层网关,能够将任何 Kubernetes 风格的请求转发给指定的 Kubernetes 集群。`Karpor` 也是基于 Kubernetes 框架开发的,作为 kube-apiserver,可以独立运行或作为现有 kube-apiserver 的扩展。`Karpor` 支持处理两种类型的扩展资源:`Cluster` 和 `Cluster/Proxy`,前者用于存储集群信息,后者用于将用户请求转发到特定集群。用户可以通过 Kubernetes 官方 CLI(`kubectl`)或 SDK(`client-go`、`client-java` 等)进行访问。 + +`Karpor` 将所有对 `Cluster/Proxy` 子资源的访问代理到目标集群。例如,要从 `Cluster1` 集群检索 Pod 信息,用户需要向 `Karpor` 发送 `GET /apis/kusionstack.io/Cluster/cluster1/proxy/api/v1/pods` 请求。`Karpor` 将从 `Cluster/Cluster1` 资源生成一个 KubeConfig 以访问该集群,并将 `/api/v1/pods` 请求代理到 `Cluster1` 集群。 + +![使用 kubectl 和 karpor 证书访问任何管理的集群](assets/1-one-pass-with-proxy/image-20240326165247891.png) + +### 支持所有 Kubernetes 原生请求 + +`Karpor` 支持转发所有 kube-apiserver 请求。具体来说,`Karpor` 是一个应用层网关,通过 HTTP connect 协议代理 HTTP 请求。除了支持对资源的 `get`、`create`、`update` 和 `delete` 操作外,它还支持 `watch`、`log`、`exec`、`attach` 等。(由于用于 `exec` 和 `attach` 的 SPDY 协议不支持 http2,`Karpor` 在转发这些请求时会禁用 http2,切换到 http1.1 并支持 hijacker 处理)。 + +![](assets/1-one-pass-with-proxy/image-20240326165632158.png) + +## 总结 + +从上文中可以看出,利用 `Karpor` 的多集群管理组件,为用户提供了一个可控权限的 “多集群通行证”。用户不再需要关心频繁切换集群证书和新集群的接入等问题。有了这个“一证通行”,访问多个集群的成本降低了,满足了大多数用户在多集群平台上的最基本需求。 diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/3-user-guide/4-best-production-practices/_category_.json b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/3-user-guide/4-best-production-practices/_category_.json new file mode 100644 index 00000000..82dd90e3 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/3-user-guide/4-best-production-practices/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Best Production Practices" +} diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/3-user-guide/4-best-production-practices/assets/1-one-pass-with-proxy/image-20240326163622363.png b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/3-user-guide/4-best-production-practices/assets/1-one-pass-with-proxy/image-20240326163622363.png new file mode 100644 index 00000000..ab8051fe Binary files /dev/null and b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/3-user-guide/4-best-production-practices/assets/1-one-pass-with-proxy/image-20240326163622363.png differ diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/3-user-guide/4-best-production-practices/assets/1-one-pass-with-proxy/image-20240326164141400.png b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/3-user-guide/4-best-production-practices/assets/1-one-pass-with-proxy/image-20240326164141400.png new file mode 100644 index 00000000..de950079 Binary files /dev/null and b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/3-user-guide/4-best-production-practices/assets/1-one-pass-with-proxy/image-20240326164141400.png differ diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/3-user-guide/4-best-production-practices/assets/1-one-pass-with-proxy/image-20240326165247891.png b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/3-user-guide/4-best-production-practices/assets/1-one-pass-with-proxy/image-20240326165247891.png new file mode 100644 index 00000000..27fffb47 Binary files /dev/null and b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/3-user-guide/4-best-production-practices/assets/1-one-pass-with-proxy/image-20240326165247891.png differ diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/3-user-guide/4-best-production-practices/assets/1-one-pass-with-proxy/image-20240326165632158.png b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/3-user-guide/4-best-production-practices/assets/1-one-pass-with-proxy/image-20240326165632158.png new file mode 100644 index 00000000..99053c68 Binary files /dev/null and b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/3-user-guide/4-best-production-practices/assets/1-one-pass-with-proxy/image-20240326165632158.png differ diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/3-user-guide/_category_.json b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/3-user-guide/_category_.json new file mode 100644 index 00000000..8f01ba26 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/3-user-guide/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "User Guide" +} diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/4-developer-guide/1-contribution-guide/1-non-code-contribute.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/4-developer-guide/1-contribution-guide/1-non-code-contribute.md new file mode 100644 index 00000000..0c723628 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/4-developer-guide/1-contribution-guide/1-non-code-contribute.md @@ -0,0 +1,40 @@ +--- +title: 非代码贡献指南 +--- +你可以用以下任何感兴趣的方式贡献。 + +## 贡献用户示例和 Demo + +* 如果你正在使用 Karpor,最简单的贡献方式就是 [向社区表达感谢](https://github.com/KusionStack/karpor/issues/343)。 + +## 报告漏洞 + +在提交新的 issue 之前,请确保该问题没有被提交过。 + +检查 [Issue 列表](https://github.com/KusionStack/karpor/issues) 是否有类似 issue。 + +通过 [报告漏洞](https://github.com/KusionStack/karpor/issues/new?assignees=&labels=kind%2Fbug&projects=&template=bug-report.yaml) 提交漏洞报告,确保你提供了足够的信息帮助复现该漏洞。 + +遵循下面的 issue 模板并且添加额外信息来帮助我们复现该问题。 + +## 安全性 issue + +如果你确信发现了安全漏洞,请阅读我们的 [安全策略](https://github.com/KusionStack/karpor/blob/main/SECURITY.md) 获取更多细节。 + +## 提议增强特性 + +如果你有提升 Karpor 的好点子,请提交 [特性请求](https://github.com/KusionStack/karpor/issues/new?assignees=&labels=kind%2Ffeature&projects=&template=enhancement.yaml)。 + +## 回答问题 + +如果你有疑问并且在 [文档](https://www.kusionstack.io/karpor/) 中找不到答案,下一步是在 [GitHub 论坛](https://github.com/KusionStack/karpor/discussions) 中提问。 + +帮助这些用户对我们很重要,我们很需要你的帮助。你可以通过回答 [他们的问题](https://github.com/KusionStack/karpor/discussions) 来帮助其他的 Karpor 用户。 + +## 贡献文档 + +贡献文档需要一些提交 pull request 到 Github 的知识,按照下面的指南这将会非常简单。 + +* [kusionstack.io 开发者指南](https://github.com/KusionStack/kusionstack.io/blob/main/README.md) + +查看 [开源指南](https://opensource.guide/how-to-contribute/) 获取更多贡献方式。 diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/4-developer-guide/1-contribution-guide/2-code-contribute.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/4-developer-guide/1-contribution-guide/2-code-contribute.md new file mode 100644 index 00000000..018f8ec4 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/4-developer-guide/1-contribution-guide/2-code-contribute.md @@ -0,0 +1,174 @@ +--- +title: 代码贡献指南 +--- +在本代码贡献指南,你将会了解下列内容: + +- [如何在本地运行 Karpor](#%E5%A6%82%E4%BD%95%E5%9C%A8%E6%9C%AC%E5%9C%B0%E8%BF%90%E8%A1%8C-karpor) +- [如何创建拉取请求(pull request)](#%E5%88%9B%E5%BB%BA%E6%8B%89%E5%8F%96%E8%AF%B7%E6%B1%82pull-request) +- [代码审查指导规则](#%E4%BB%A3%E7%A0%81%E5%AE%A1%E6%9F%A5) +- [Pull request 格式指南](#pull-request-%E6%A0%BC%E5%BC%8F%E6%8C%87%E5%8D%97) +- [更新文档和网站](#%E6%9B%B4%E6%96%B0%E6%96%87%E6%A1%A3%E5%92%8C%E7%BD%91%E7%AB%99) + +## 如何在本地运行 Karpor + +本指南将会帮助你开始 Karpor 开发。 + +### 前提条件 + +* Golang 版本 1.19+ + +
+ 安装 Golang + +1. 从 [官方网站](https://go.dev/dl/) 安装 golang 1.19+。解压二进制文件并放置到某个位置,假设该位置是 home 目录下的 `~/go/`,下面是一个示例命令,你应当选择适合你系统的正确二进制文件。 + +``` +wget https://go.dev/dl/go1.20.2.linux-amd64.tar.gz +tar xzf go1.20.2.linux-amd64.tar.gz +``` + +如果你想在本地开发环境维护多个 golang 版本,你可以下载包并解压到某个位置,比如 `~/go/go1.19.1`,然后根据下面的命令相应地改变路径。 + +1. 为 Golang 设置环境变量 + +``` +export PATH=~/go/bin/:$PATH +export GOROOT=~/go/ +export GOPATH=~/gopath/ +``` + +如果 `gopath` 目录不存在,可以使用 `mkdir ~/gopath` 创建。这些命令将会把 go 二进制文件所在的目录添加到 `PATH` 环境变量 (让其成为 go 命令的优先选择)并且设置 `GOROOT` 环境到该 go 目录。如果将这些行添加到你的 `~/.bashrc` or `~/.zshrc` 文件,你就不用每次打开新的终端时设置这些环境变量。 + +1. (可选) 在一些地区,例如中国大陆,连接到默认的 go 仓库可能会很慢;你可以配置 GOPROXY 来加速下载过程。 + +``` +go env -w GOPROXY=https://goproxy.cn,direct +``` + +
+ +* Kubernetes 版本 v1.20+ ,且配置文件保存在 `~/.kube/config`。 +* golangci-lint 版本 v1.52.2+, 通过运行 `make lint` 可以自动安装,如果自动安装失败,你可以手动安装。 + +
+ 手动安装 golangci-lint + +你可以根据 [安装指南](https://golangci-lint.run/welcome/install)手动安装,或者使用以下命令: + +``` +cd ~/go/ && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s v1.52.2 +``` + +
+ +### 构建 + +- 克隆项目 + +```shell +git clone git@github.com:KusionStack/karpor.git +``` + +- 本地构建 + +执行 `make build-all` 将会为所有平台创建可执行文件;如果你只想为特定平台构建,执行 `make build-${PlatformName}`,例如 `make build-darwin`。查看所有可用命令,执行 `make help`。 + +### 测试 + +为了保证代码质量,编写测试代码是必不可少的,你可以在项目根目录运行以下命令执行单元测试: + +```shell +make test +``` + +如果你需要生成额外的覆盖率报告,执行: + +```shell +make cover +``` + +接下来你可以执行下列命令,来从浏览器中阅读测试覆盖率报告: + +```shell +make cover-html +``` + +## 创建拉取请求(Pull Request) + +我们很高兴你考虑为 Karpor 项目作出贡献! + +本文档将会指导你完成 [创建拉取请求](./index.md#contribute-a-pull-request) 的过程。 + +### 在你开始之前 + +我们知道你对于创建第一个 pull request 非常兴奋。在我们开始之前,请确保你的代码符合相关的 [代码规约](../2-conventions/2-code-conventions.md)。 + +### 你的第一个 Pull Request + +在提交你的 PR 之前,运行下面的命令并确保它们都成功了: + +``` +make test +make lint +``` + +如果这是你第一次向 Github 上的开源项目贡献,请确保你已经阅读了 [创建拉取请求](https://docs.github.com/zh/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/creating-a-pull-request)。 + +为了提高你的 pull request 被接受的机会,请确保你的 pull rquest 符合以下指导规则: + +- 标题和描述与实现相符。 +- pull request 中的所有 commit 都符合 [格式指南](#Formatting-guidelines)。 +- pull request 会关闭一个相关 issue。 +- pull request 包含了必要的测试,以验证预期行为。 +- 如果你的 pull request 有冲突,请将你的分支 rebase 到 main 分支。 + +如果 pull request 修复了一个漏洞: + +- pull request 的描述中必须包含 `Closes #` 或 `Fixes #`。 +- 为了避免回归问题,pull request 必须包含验证该漏洞被修复的测试。 + +## 代码审查 + +一旦你创建了一个 pull requset,下一步就是让其他人审查你的改动。代码审查对审查者和 pull request 作者都是很好的学习机会。 + +如果你觉得某个特定的人应当审查你的 pull request,你可以在描述或评论中标记他们。 +通过输入 `@` 符号和其用户名来标记用户。 + +我们建议你阅读 [如何进行代码审查](https://google.github.io/eng-practices/review/reviewer/) 来了解更多关于代码审查的知识。 + +## Pull request 格式指南 + +精心编写的 pull request 可以最大程度地缩短你的更改被接受的时间。这些指南将帮助你为 pull requset 编写条理清晰的提交消息和说明。 + +### Commit 信息格式 + +了解更多:[Commit 规约](../2-conventions/4-commit-conventions.md) + +### Pull Request 标题 + +在接受 pull request 时,Karpor 团队会将所有的 commit 合并为一个。 + +Pull request 的标题将会成为合并后的 commit 信息的描述。 + +我们仍然鼓励贡献者撰写详细的 commit 信息,因为它们将会作为 git commit 正文的一部分。 + +我们在生成发布更新日志时将会使用 pull request 的标题。因此,我们会努力使标题尽可能具有信息量。 + +确保你的 pull request 标题使用与 commit 信息相同的格式。如果不遵循该格式,我们将会在该 pull request 添加 `title-needs-formatting` 标签。 + +### 通过所有 CI 检查 + +在合并之前,所有的测试 CI 都应该通过: + +- 覆盖率不应该下降。当前,pull request 的覆盖率应当至少为 70%。 +- Karpor 使用 **CLA** 作为贡献者协议。它要求你在第一次合并 pull request 之前签署。 + +## 更新文档和网站 + +如果你的 pull request 被合并了,而且它引入了新的特性或增强,你需要更新文档并且提交 pull requset 到 [kusionstack.io](https://github.com/KusionStack/kusionstack.io) 仓库。 + +根据下面的文档了解如何编写文档: + +- [kusionstack.io 开发者指南](https://github.com/KusionStack/kusionstack.io/blob/main/README.md) + +太棒了,你已经完成了代码贡献的整个生命周期! diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/4-developer-guide/1-contribution-guide/3-roles.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/4-developer-guide/1-contribution-guide/3-roles.md new file mode 100644 index 00000000..206d98ea --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/4-developer-guide/1-contribution-guide/3-roles.md @@ -0,0 +1,36 @@ +--- +title: 角色 +--- +感谢您对本开源项目的关注和支持!本文档将阐述贡献者在项目中的角色、职责以及如何从 Contributor 升级为 Maintainer,以及 Maintainer 降级为 Contributor 的规则。我们希望通过这份文档,让每位贡献者都能清楚地了解自己的成长路径,并为项目的发展做出更大的贡献。 + +## 贡献者角色及职责 + +在本开源项目中,我们主要设有两个贡献者角色:Contributor 和 Maintainer。 +以下是对这两个角色的简要介绍: + +1. Contributor:项目的贡献者,可以是代码贡献者、文档贡献者、测试贡献者等。Contributor 为项目提供了宝贵的资源,帮助项目不断完善和发展。 +2. Maintainer:项目的维护者,负责项目的日常维护工作,包括审查和合并 PR、处理 Issue、发布版本等。Maintainer 是项目的核心成员,对项目的发展方向和决策具有重要的影响力。 + +## Contributor 升级为 Maintainer + +我们非常欢迎每位 Contributor 为项目的发展做出贡献,并鼓励 Contributor 向 Maintainer 的角色发展。 +以下是从 Contributor 升级为 Maintainer 的条件: + +1. 持续贡献:Contributor 需要在一段时间内(例如 3 个月)持续为项目贡献代码、文档或其他资源。这表明 Contributor 对项目的关注度和热情。 +2. 质量保证:Contributor 提交的代码或文档等资源需要保持较高的质量,符合项目的规范要求,并对项目产生积极的影响。 +3. 积极参与:Contributor 需要积极参与到项目的讨论和决策中来,为项目的发展提供建设性的意见和建议。 +4. 团队协作:Contributor 需要具备良好的团队协作精神,能够与其他贡献者和 Maintainer 友好沟通,共同解决问题。 +5. 责任担当:Contributor 需要具备一定的责任心,愿意承担项目维护的部分工作,包括审查 PR、处理 Issue 等。 + +当 Contributor 满足以上条件时,现有的 Maintainer 将会对其进行评估,如果达到 Maintainer 的要求,将会邀请其成为新的 Maintainer。 + +## Maintainer 降级为 Contributor + +Maintainer 在项目中承担了重要的职责,我们希望每位 Maintainer 都能够保持对项目的关注和热情。 +然而,我们也理解每个人的时间和精力是有限的,因此,当 Maintainer 无法继续履行职责时,将会降级为 Contributor: + +1. 长时间不活跃:如果 Maintainer 在一段时间内(例如 3 个月)没有参与项目的维护工作,包括审查 PR、处理 Issue 等,将被视为不活跃。 +2. 质量问题:如果 Maintainer 在项目中的工作出现严重的质量问题,导致项目的发展受到影响,将被视为不符合 Maintainer 的要求。 +3. 团队协作问题:如果 Maintainer 在与其他贡献者和 Maintainer 的协作过程中出现严重的沟通问题或团队协作问题,如不尊重他人意见、频繁产生冲突、拒绝协作等,影响到项目的正常运作和氛围,将被视为不符合 Maintainer 的要求。 +4. 违反规定:如果 Maintainer 违反了项目的规定或行为准则,包括但不限于泄露敏感信息、滥用权限等,将被视为不符合 Maintainer 的要求。 +5. 主动申请:如果 Maintainer 由于个人原因无法继续履行职责,可以主动申请降级为 Contributor。 diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/4-developer-guide/1-contribution-guide/_category_.json b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/4-developer-guide/1-contribution-guide/_category_.json new file mode 100644 index 00000000..09eab23b --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/4-developer-guide/1-contribution-guide/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Contribution Guide" +} diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/4-developer-guide/1-contribution-guide/index.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/4-developer-guide/1-contribution-guide/index.md new file mode 100644 index 00000000..57c4cca4 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/4-developer-guide/1-contribution-guide/index.md @@ -0,0 +1,117 @@ +# 贡献指南 + +贡献指南介绍了如何参与社区发展和向社区贡献。 + +为了帮助我们为所有人建立安全和积极的社区体验,我们要求所有的参与者遵守 CNCF 社区 [行为准则](https://github.com/cncf/foundation/blob/main/code-of-conduct-languages/zh.md)。 + +## 开始贡献之前 + +### 找到一个贡献点 + +有多种方式对 Karpor 贡献,包括代码和非代码贡献,我们对任何人对社区的任何方式的努力都非常感谢。 + +这里是一些示例: + +* 贡献代码仓库和文档。 +* 报告和分类 issue。 +* 在你的地区组织会议和用户群组。 +* 回答 Karpor 相关问题帮助别人。 + +并且: + +- 如果你不知道如何开始,我们准备了一份 [新手任务清单 | Community tasks 🎖︎](https://github.com/KusionStack/karpor/issues/463),或者你可以通过 issue 跟踪器过滤 [help wanted | 需要帮助](https://github.com/KusionStack/karpor/issues?q=is%3Aopen+is%3Aissue+label%3A%22help+wanted%22) 或 [good first issue | 新手任务](https://github.com/KusionStack/karpor/issues?q=is%3Aopen+is%3Aissue++label%3A%22good+first+issue%22) 标签. 你可以从任何感兴趣的 issue 开始。 +- 如果你有任何问题,欢迎 [提交 Issue](https://github.com/KusionStack/karpor/issues/new/choose) 或者 [发帖讨论](https://github.com/KusionStack/karpor/discussions/new/choose),我们会尽快回答。 + +### 如何进行非代码贡献 + +我们认为对社区存续和未来发展而言,非代码贡献和代码贡献同样重要。 + +- 参考 [非代码贡献指南](./non-code-contribute) 获取更多细节 + +### 如何进行代码贡献 + +不确定从哪里开始向 Karpor 代码库贡献?可以从浏览带有 `good first issue` 或 `help wanted` 标签的 issue 开始。 + +- [Good first issue | 新手任务](https://github.com/KusionStack/karpor/labels/good%20first%20issue) 通常很容易解决的任务。 +- [Help wantet | 需要帮助](https://github.com/KusionStack/karpor/labels/help%20wanted) 和复杂程度无关, 我们希望能够在社区解决的问题。 +- 参考 [代码贡献指南](./code-contribute) 获取更多细节。 + +学习 [代码规约](../conventions/code-conventions) 和 [测试规约](../conventions/test-conventions),并了解在写代码时要注意的地方。 + +然后阅读 [发布流程与节奏指南](../conventions/release-process),了解你的代码什么时候会发布。 + +## 贡献一个拉取请求(Pull Request) + +在打开或者认领 issue 之后,你可以通过提交一个拉取请求(Pull Request)为 karpor 进行代码或非代码贡献。这里是你应该遵循的一些步骤: + +### Fork 仓库 + +Karpor 遵循主干开发模式,也就是说,用于发布的代码维护在 main 分支。 + +那么,为了开发 Karpor,你需要从 [karpor](https://github.com/KusionStack/karpor) Fork 一个项目到你自己的工作空间,然后检出一个新的分支用于开发代码。 + +### 开发代码和非代码 + +现在你可以开始解决 issue 。为了维护 Karpor 的代码质量,提交 PR 之后,一些必要的检查会被触发。 + +开发结束之后,你需要 commit 代码然后将代码 push 到你 fork 出的仓库。由于 PR 的标题将作为 commit message,你的 PR 标题需要符合 [commit 规约](../2-conventions/4-commit-conventions.md)。 + +以下是一些简单的解释: + +PR 的标题需要按照以下结构组织: + +``` +<类型>[可选 范围]: <描述> + +[可选 正文] +``` + +要求中的类型可以帮助更好地确认这次提交的范围,基于 [Angular 指南](https://github.com/angular/angular/blob/22b96b9/CONTRIBUTING.md#-commit-message-guidelines)。 + +我们使用小写的 `<类型>`,以避免在大小写敏感的问题上浪费时间。`<类型>` 可以是以下之一: + +``` +feat: 新特性 +fix: 漏洞修复 +docs: 仅文档改动 +build: 关于构建系统和外部依赖的改动 +style: 不影响代码含义的改动(如空行、格式、缺少分号等) +refactor: 不属于漏洞修复或者增加特性的代码改动 +perf: 提升性能的代码改动 +test: 增加缺少的测试用例或者修正现有的测试用例 +chore: 构建过程或辅助工具和库(如文档生成)的修改 +``` + +### 打开一个拉取请求(Pull Request) + +[打开一个拉取请求(Pull Request)](https://github.com/KusionStack/karpor/pulls):打开一个从你 fork 的仓库的开发分支到 karpor main 分支的拉取请求(Pull Request)。你需要清楚地描述你的 PR 做了什么,并且链接到一个 issue。除此之外,PR 的标题应该按照前面提到的 commit 规约,并且长度在 5-256 个字符之间,不允许使用 `WIP` 和 `[WIP]` 前缀。 + +### 签署贡献者许可协议(Contributor License Agreement,CLA) + +如果这是你的第一个 PR ,你需要签署我们的 [CLA(贡献者许可协议)](https://github.com/KusionStack/.github/blob/main/CLA.md)。 你唯一需要做的事情的是在当前 PR 按以下格式发表评论: + +`I have read the CLA Document and I hereby sign the CLA` + +如果你的 CLA 签署失败了,可能有以下原因: + +* 评论的格式必须与上面完全一致,例如不能有额外的空格、空行等。 +* git commit 的作者和 Karpor PR 的作者必须一致。 + +### PR 检查 + +为了维持 karpor 项目的可靠性,以下检查将会自动触发: + +* 单元测试 +* Golang 代码风格检查 +* Commit 风格检查 +* PR 标题检查 +* 代码许可证检查 +* Markdown 格式检查 + +请确保你的 PR 通过这些检查。 + +## 成为社区成员 + +如果你对成为社区成员感兴趣或者想了解更多关于治理的内容,请查看 [角色](./3-roles.md) 获取更多细节。 + +在 Karpor 的世界中享受编码和协作吧! diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/4-developer-guide/2-conventions/1-release-process.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/4-developer-guide/2-conventions/1-release-process.md new file mode 100644 index 00000000..0454cc00 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/4-developer-guide/2-conventions/1-release-process.md @@ -0,0 +1,49 @@ +--- +title: 发布流程和节奏 +--- +## 发布计划 + +我们将通过 [GitHub 里程碑](https://github.com/KusionStack/karpor/milestones) 建立并持续根据发布计划。每个发布里程碑将包括两类任务: + +- Maintainer 承诺完成的任务。Maintainer 会在根据他们的时间和精力承诺下次发布要实现的特性。通常,这些任务会经过离线讨论并添加到里程碑。这些任务会被分配给计划实施和测试它们的 Maintainer。 +- Contributor 提出的额外事项,通常是不紧急的特性和优化。Maintainer 不承诺在当前 release 周期内完成,但承诺会对这些社区提交进行代码审查。 + +里程碑会清晰地描述最终要的特性和期望完成日期。这将清楚地告知终端用户下一版本的发布时间和内容。 + +除了下一次里程碑之外,我们也会维护未来几个发布里程碑的草稿。 + +## 发布标准 + +- 所有的 **官方发布** 都应该在 `main` 分支添加标签,并且携带类似 `alpha`、 `beta`、 `rc` 的可选先行版本后缀,例如,一个通常的官方发布版本可能是 `v1.2.3`、 `v1.2.3-alpha.0`。例如,如果我们想要在发布正式版本 `v1.2.3` 之前进行一些验证,我们可以先发布类似 `v1.2.3-alpha.0` 的先行版本,在验证完成之后再发布 `v1.2.3` 版本。 +- Maintainer 承诺完成特定的特性和增强,由 [GitHub 里程碑](https://github.com/KusionStack/karpor/milestones) 跟踪。 +- 我们会尽可能防止发布延期;如果一个特性无法按时完成,它将会被挪到下次发布。 +- **每月** 发布一个新版本。 + +## 发布标准流程 + +Maintainer 负责推动发布过程并遵循标准操作程序以确保发布的质量。 + +1. 为指定发布的 git commit 添加标签并推到上游;该标签需要满足[语义化版本控制](#%E8%AF%AD%E4%B9%89%E5%8C%96%E7%89%88%E6%9C%AC%E6%8E%A7%E5%88%B6)。 +2. 确保触发的 Github Action 流水线执行成功。一旦成功,将会自动触发一次 Github 发布,其中包括根据提交信息计算出的 Changelog,以及镜像和 tar.gz 文件等制品。 +3. 根据 **Github 发布** 编写清晰的发布说明,包括: + - 用户友好的发布亮点。 + - 已弃用和不兼容的更改。 + - 有关如何安装和升级的简要说明。 + +## 门控测试 + +在创建发布分支之前:我们会有一个 **一周** 的代码冻结期。在这期间,我们将避免合并任何功能 PR,只会修复错误。 + +Maintainer 会负责测试并修复那些在临近发布时间发现的紧急问题。 + +## 语义化版本控制 + +`Karpor` 采用 [语义化版本控制](https://semver.org/lang/zh-CN/) 作为版本号。 + +版本格式为:主版本号.次版本号.修订号,例如, `v1.2.3`。版本号 **递增规则** 如下: + +- 主版本号:当你做了不兼容的 API 修改。 +- 次版本号:当你做了向下兼容的功能性新增。 +- 修订号:当你做了向下兼容的问题修正。 + +**先行版本号及版本编译信息** 可以作为加到“主版本号.次版本号.修订号”的后面,作为延伸,比如 `v1.2.3-alpha.0`, `v1.2.3-beta.1`, `v1.2.3-rc.2`, 其中 `-alpha.0`, `-beta.1`, `-rc.2` 是先行版本号。 diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/4-developer-guide/2-conventions/2-code-conventions.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/4-developer-guide/2-conventions/2-code-conventions.md new file mode 100644 index 00000000..e30bab57 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/4-developer-guide/2-conventions/2-code-conventions.md @@ -0,0 +1,79 @@ +--- +title: 代码规约 +--- +在这部分,你将会了解 Karpor 项目中所有类型的代码规约。不必一次把这些规则全部了解,确保你在编写代码前阅读对应的部分就可以了。 + +- [Go 代码规约](#go-%E4%BB%A3%E7%A0%81%E8%A7%84%E7%BA%A6) +- [Bash 或脚本规约](#bash-%E6%88%96%E8%84%9A%E6%9C%AC%E8%A7%84%E7%BA%A6) +- [目录和文件规约](#%E7%9B%AE%E5%BD%95%E5%92%8C%E6%96%87%E4%BB%B6%E8%A7%84%E7%BA%A6) +- [Linting 和格式化](#linting-%E5%92%8C%E6%A0%BC%E5%BC%8F%E5%8C%96) + +## Go 代码规约 + +- [Go 代码评审评论](https://go.dev/wiki/CodeReviewComments) +- [高效的 Go](https://golang.org/doc/effective_go.html) +- 了解并且避免 [Go 地雷](https://gist.github.com/lavalamp/4bd23295a9f32706a48f) +- 为你的代码编写注释. + + - [Go's 注释规约](https://go.dev/blog/godoc) + - 如果代码评阅者询问你代码为什么要这么实现,这可能说明你应当为你的代码编写注释。 +- 命令行标志应该用双连接号 `--`,而不是下划线 +- 接口 + + - 根据 RFC3986,URL 是大小写敏感的。Karpor 使用“短横线命名(`kebab-case`)”作为 URL。 + - 例如:`POST /rest-api/v1/resource-group-rule` +- 命名 + + - 为接口选择名称时请考虑包名称,避免冗余。 + + - 例如: `storage.Interface` 优于 `storage.StorageInterface`。 + - 不要在包名称中使用大写字符、下划线和破折号。 + - 选择包名称时,请考虑父目录名称。 + + - 所有 `pkg/manager/cluster/foo.go` 应该命名为 `package cluster` + 而不是 `package clustermanager`。 + - 除非有充分理由,`package foo` 行应该与 .go 文件所在目录的名称相同。 + - 为了避免歧义,导入包时可以指定别名。 + - 锁应该被称为 `lock`,并且永远不应该被嵌入(总是以 `lock sync.Mutex` 的形式明确声明)。当存在多个锁时,应该遵循 Go 的命名约定给每个锁一个 buts 的名称 - `stateLock`,`mapLock` 等。 + +## Bash 或脚本规约 + +- [Shell 样式指南](https://google.github.io/styleguide/shell.xml) +- 确保构建、发布、测试和集群管理脚本可以在 macOS 上运行 + +## 目录和文件规约 + +- 避免软件包无序扩展。为新的包找到合适的子目录。 + + - 没有更合适放置位置的新包应该放在 `pkg/util` 下的子目录。 +- 避免使用通用包。使用名为 `util` 的包让人疑惑。相反地,应当根据你期望的功能推导出包名 + 例如,处理等待操作的使用功能位于 `wait` 包中,包括类似 Poll 这样的功能,所以完整名称是 `wait.Poll` +- 所有的文件名都应该是小写 +- Go 源码文件名和目录名中使用 `_`,而不是 `-` + + - 包目录名通常应当尽量避免使用分隔符(当包目录名含多个单词时,它们通常应该被放在嵌套的子目录) +- 文档的文件名和目录名中应该使用 `-`,而不是 `_` +- 用于说明系统特性的示例应该位于 `/docs/user-guide` 或 `/docs/admin`, 取决于它是主要面向部署应用的用户还是集群管理员。实际的应用示例应位于 `/example` 中 + + - 示例还应该展示 [配置和使用系统的最佳实践](https://kubernetes.io/docs/concepts/configuration/overview/) +- 第三方代码 + + - 普通的第三方依赖 Go 代码 由 [go modules](https://github.com/golang/go/wiki/Modules) 管理 + - 其他的第三方代码应该放在 `/third_party` 目录下 + + - fork 的第三方 Go 代码放在 `/third_party/forked` 目录下 + - fork 的_golang stdlib_ 代码放在 `/third_party/forked/golang` 目录下 + - 第三方代码必须包含许可证 + - 这也包括修改过的第三方代码和引用 + +## Linting 和格式化 + +为了确保 Go 代码库之间的一致性,我们要求所有代码通过多个 linter 检查。 + +要运行所有的 linter,请使用 `lint` 作为 Makefile 目标: + +```shell +make lint +``` + +该命令将清理代码并进行一些 lint 检查。检查结束后请记得检查所有变更。 diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/4-developer-guide/2-conventions/3-test-conventions.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/4-developer-guide/2-conventions/3-test-conventions.md new file mode 100644 index 00000000..892c7f9e --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/4-developer-guide/2-conventions/3-test-conventions.md @@ -0,0 +1,266 @@ +--- +title: 测试规约 +--- +## 测试原则 + +在 Karpor 中,我们主要关注以下三种测试: + +- 单元测试:主要关注 **最小可测试单元**(例如函数,方法,实用类等) +- 集成测试:针对 **多个单元(或模块)**间相互作用和集成的测试 +- 端到端测试(e2e tests): 针对 **整个系统行为** 的测试,通常需要模拟真实用户场景。 + +每种测试都有优势,劣势和适用场景。为了实现更好的开发体验,我们在编写测试时应遵循以下原则。 + +**测试原则**: + +- 单个测试用例应该仅覆盖单个场景 +- 遵循 **7-2-1 原则**,即 70% 的单元测试,20% 的集成测试和 10% 的端到端测试 +- **非必要情况下,避免在单元测试中使用框架**(比如 `golang/mock`)。如果你觉得需要在单元测试中使用 mock 框架,那么你可能应该实现的是集成测试甚至端到端测试。 + +## 技术选择 + +在当前时间点,在 Go 语言生态中最流行的测试框架主要有 [Ginkgo](https://onsi.github.io/ginkgo/)/[Gomega](https://onsi.github.io/gomega/) 和 [Testify](https://github.com/stretchr/testify)。因此,本节主要讨论这两个测试框架的的特点、优缺点以及最终的选择。 + +### Ginkgo/Gomega + +**优点**: + +1. **BDD 支持**:Ginkgo 因为支持行为驱动开发(Behavior-Driven Development,BDD)风格而收到许多开发人员的青睐。它提供了丰富的 DSL 语法,通过 `Describe`、`Context`、`It` 等关键字使测试用例更具描述性和可读性。 +2. **并行执行**:Ginkgo 能够以多进程并行执行测试,提高了测试执行的效率。 +3. **丰富的匹配器**:与 Gomega 匹配器库结合使用时,它提供了丰富的断言能力,使测试更加直观和方便。 +4. **异步支持**:Ginkgo 提供了对处理复杂异步场景的原生支持,降低了死锁和超时的风险。 +5. **测试用例组织**:支持将测试用例组织到测试套件中,便于管理和扩展。 + +**缺点**: + +1. **学习曲线过于陡峭**:对不熟悉 BDD 测试框架的开发者来说,Ginkgo 的 DSL 语法可能需要一些时间熟悉。 +2. **并行测试的复杂性**:尽管 Ginkgo 支持并行测试,但是管理用于并行测试的资源和环境可能会引入额外的复杂性。 + +### Testify + +**优点**: + +1. **简化的接口 API**: Testify 提供了简单明了的 API,容易上手,特别是对于熟悉了 `testing` 包的开发者。 +2. **Mock 支持**: 提供了强大的 Mock 功能,便于模拟依赖和接口进行单元测试。 +3. **表格驱动测试**: 支持表格驱动测试,允许测试同一个函数使用各种不同输入和预期输出,增强了测试用例的可重用性。 +4. **与 `testing` 包的兼容性**: Testify 与 Go 标准库的 testing 包高度兼容,可以无缝集成到现有的测试工作流中。 +5. **文档**: Testify 的 [官方文档](https://pkg.go.dev/github.com/stretchr/testify) 也提供了丰富的介绍,如何使用其断言和 Mock 功能。 + +**缺点**: + +1. **缺少 BDD 支持**: Testify 不支持 BDD 风格,对于寻求提高测试用例可读性的开发人员可能直观性较差。 +2. **功能相对简单**: 与 Ginkgo 相比,Testify 的功能相对简单,可能不满足一些复杂测试场景的需求。 + +### 总结 + +简而言之,Ginkgo/Gomega 提供了更好的可读性和可维护性,产生清晰明了的测试代码,但需要熟悉 BDD 风格,学习曲线比较陡峭。Testify 更简单、更实用,学习曲线较为平缓,但随着时间的推移,测试代码风格可能变得更加多样化,降低可维护性。 + +考虑到 Karpor 的实际情况和两种框架的优缺点,我们决定结合使用这两个框架: + +- 使用 Testify 进行单元测试,坚持使用 [表格驱动测试](https://go.dev/wiki/TableDrivenTests) 来约束代码风格,防止退化; +- 利用 Ginkgo 的 BDD 特性编写更高级别的集成和端到端测试; + +这种组合充分发挥了两种框架的优势,提高了测试的整体效率、可读性和质量。 + +## 编写规范 + +### 测试风格 + +[表格驱动测试](https://go.dev/wiki/TableDrivenTests) 是编写测试用例的最佳实践,类似于编程中的设计模式,它也是官方 Go 语言推荐的风格。表格驱动测试使用表格提供各种输入和预期输出,允许同一个测试函数验证不同的场景。这种方法的优点是它增加了测试用例的可重用性,减少了重复代码,并使测试更加清晰易于维护。 + +虽然 Go 的 `testing` 包中没有直接支持表格驱动测试的语法,但可以通过编写辅助函数和使用匿名函数来模拟实现。 + +这是一个在 Go 标准库的 `fmt` 包中实现的表格驱动测试的示例: + +```go +var flagtests = []struct { + in string + out string +}{ + {"%a", "[%a]"}, + {"%-a", "[%-a]"}, + {"%+a", "[%+a]"}, + {"%#a", "[%#a]"}, + {"% a", "[% a]"}, + {"%0a", "[%0a]"}, + {"%1.2a", "[%1.2a]"}, + {"%-1.2a", "[%-1.2a]"}, + {"%+1.2a", "[%+1.2a]"}, + {"%-+1.2a", "[%+-1.2a]"}, + {"%-+1.2abc", "[%+-1.2a]bc"}, + {"%-1.2abc", "[%-1.2a]bc"}, +} +func TestFlagParser(t *testing.T) { + var flagprinter flagPrinter + for _, tt := range flagtests { + t.Run(tt.in, func(t *testing.T) { + s := Sprintf(tt.in, &flagprinter) + if s != tt.out { + t.Errorf("got %q, want %q", s, tt.out) + } + }) + } +} +``` + +值得注意的是,目前大部分主流 IDE 都已经集成了 [gotests](https://github.com/cweill/gotests),可以自动生成表格驱动风格的 Go 单元测试,相信这可以提升大家编写单元测试的效率: + +- [GoLand](https://blog.jetbrains.com/go/2020/03/13/test-driven-development-with-goland/) +- [Visual Studio Code](https://juandes.com/go-test-vsc/) + +### 文件命名 + +- **规范内容**:测试函数必须以 `Test` 开头,后面跟着被测试函数的名称,使用驼峰式命名法。 +- **正面示例**:`xxx_test.go` +- **反面示例**:`testFile.go`、`test_xxx.go` + +### 测试函数命名 + +- **规范内容**:测试函数名必须以 `Test` 开头,后面跟着被测试函数的名称,使用驼峰式命名法。 +- **正面示例**: + ```go + func TestAdd(t *testing.T) { + // 测试逻辑 ... + } + ``` +- **反面示例**: + ```go + func TestAddWrong(t *testing.T) { + // 测试逻辑 ... + } + ``` + +### 测试函数签名 + +- **规范内容**:测试函数签名必须是 `func TestXxx(t *testing.T)` 形式,其中 `t` 是类型为 `*testing.T` 的对象,并且不应该有其他的参数和返回值。 +- **正面示例**: + ```go + func TestSubtraction(t *testing.T) { + // 测试逻辑 ... + } + ``` +- **反面示例**: + ```go + func TestSubtraction(value int) { + // 测试逻辑 ... + } + ``` + +### 测试组织 + +- **规范内容**:测试用例应当相互独立,避免测试之间相互影响;使用子测试 `t.Run` 来组织复杂的测试场景。 +- **正面示例**: + ```go + func TestMathOperations(t *testing.T) { + t.Run("Addition", func(t *testing.T) { + // addition 的测试逻辑 ... + }) + t.Run("Subtraction", func(t *testing.T) { + // subtraction 的测试逻辑 ... + }) + } + ``` +- **反面示例**: + ```go + func TestMathOperations(t *testing.T) { + // 混合 addition 和 subtraction 的测试逻辑 ... + } + ``` + +### Test Coverage + +- **规范内容**:需要注意测试覆盖率,使用 `go test -cover` 命令检查代码的测试覆盖率。 +- **正面示例**: + + ```shell + $ go test -cover + ``` +- **反面示例**: + + ```shell + $ go test # 不检查测试覆盖率 + ``` +- **注意**: Karpor 将此命令包装为 `make cover`,它将在命令行中输出每个包的覆盖率和总覆盖率。如果你想在浏览器中查看覆盖率报告,请执行 `make cover-html`。 + +### 性能测试 + +- **规范内容**:性能测试应当以 `Benchmark` 开头,并且接受 `*testing.B` 类型的参数,专注于性能测试。 +- **正面示例**: + ```go + func BenchmarkAdd(b *testing.B) { + for i := 0; i < b.N; i++ { + add(1, 1) + } + } + ``` +- **反面示例**: + ```go + func BenchmarkAddWrong(b *testing.B) { + for i := 0; i < 1000; i++ { + add(1, 1) + } + } + ``` + +### 并发测试 + +- **规范内容**:对于并发代码,应该编写适当的测试用例,以确保并发逻辑的正确性。 +- **正面示例**: + ```go + func TestConcurrentAccess(t *testing.T) { + // 设置并发环境 ... + // 并发访问测试逻辑 ... + } + ``` +- **反面示例**: + ```go + func TestConcurrentAccess(t *testing.T) { + // 仅测试单线程逻辑... + } + ``` + +### 测试帮助函数 + +- **规范内容**:可以在测试文件中定义辅助函数来帮助设置测试环境或清理资源。 +- **正面示例**: + ```go + func setupTest(t *testing.T) { + // 设置测试环境 ... + } + + func tearDownTest(t *testing.T) { + // 清理资源 ... + } + + func TestMyFunction(t *testing.T) { + t.Run("TestSetup", func(t *testing.T) { + setupTest(t) + // 测试逻辑 ... + }) + } + ``` +- **反面示例**: + ```go + // 直接在测试中设置环境和清理资源 + func TestMyFunction(t *testing.T) { + // 设置测试环境 ... + // 测试逻辑 ... + // 清理资源 ... + } + ``` + +### 避免使用全局变量 + +- **规范内容**: 尽量避免在测试中使用全局变量以确保测试独立。 +- **正面示例**: 在测试函数内部声明并使用必要的变量。 +- **反面示例**: 在测试文件的顶部声明全局变量。 + +### 清晰的错误信息 + +- **规范内容**: 当测试失败时,输出清晰易懂的错误信息,帮助开发人员定位问题。 +- **正面示例**: + - `t.Errorf("Expected value %d, but got %d", expected, real)` +- **反面示例**: + - `t.Errorf("Error occurred")` + - `fmt.Println("Error occurred")` + - `panic("Error occurred")` diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/4-developer-guide/2-conventions/4-commit-conventions.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/4-developer-guide/2-conventions/4-commit-conventions.md new file mode 100644 index 00000000..979b7631 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/4-developer-guide/2-conventions/4-commit-conventions.md @@ -0,0 +1,71 @@ +--- +title: Commit 规约 +--- +## Commit 信息结构 + +Karpor 遵循 [约定式提交](https://www.conventionalcommits.org/zh-hans/v1.0.0/)。 + +Commit 信息应当组织为以下结构: + +``` +<类型>[可选 范围]: <描述> + +[可选 正文] +``` + +## 示例 + +带范围的 Commit 信息: + +``` +feat(lang): add polish language +``` + +不带正文的 Commit 信息: + +``` +docs: correct spelling of CHANGELOG +``` + +带多个正文段落的 Commit 信息: + +``` +fix: correct minor typos in code + +see the issue for details + +on typos fixed. + +reviewed-by: Z +refs #133 +``` + +## `<类型>`(必须) + +必须填写的类型有助于更容易确定这次提交的范围,基于 [Angular 指南](https://github.com/angular/angular/blob/22b96b9/CONTRIBUTING.md#-commit-message-guidelines)。 + +我们在 `<类型>` 使用小写,以避免花费时间在大小写敏感问题上。`<类型>` 可以是以下之一: + +- **feat**:新特性 +- **fix**:漏洞修复 +- **docs**:仅文档改动 +- **build**:关于构建系统和外部依赖的改动 +- **style**:不影响代码含义的改动(如空行、格式、缺少分号等) +- **refactor**:不属于漏洞修复或者增加特性的代码改动 +- **perf**:提升性能的代码改动 +- **test**: 增加缺少的测试用例或者修正现有的测试用例 +- **chore**: 构建过程或辅助工具和库(如文档生成)的修改 + +## `<范围>`(可选) + +范围是可选的,可以包含在括号中为类型提供更多的上下文信息。可以指定这次 commit 的任何内容。Github issue 也是有效的范围,例如 `fix(ui)`、`feat(api)`、`fix(#233)` 等。 + +当改动影响多个范围时,可以使用 `*`。 + +## `<描述>`(必须) + +描述必须紧跟在类型/范围前缀后面的冒号和空格。它是代码更改的简明摘要,例如 `fix: array parsing issue when multiple spaces were contained in string`,而不是 `fix: bug`。 + +## `<正文>`(可选) + +在简短的描述后可以添加较长的正文,提供有关代码更改的更多上下文信息。正文必须位于描述之后一行。 diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/4-developer-guide/2-conventions/_category_.json b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/4-developer-guide/2-conventions/_category_.json new file mode 100644 index 00000000..3287fa06 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/4-developer-guide/2-conventions/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Conventions" +} diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/4-developer-guide/_category_.json b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/4-developer-guide/_category_.json new file mode 100644 index 00000000..8de262b6 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/4-developer-guide/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Developer Guide" +} diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/5-references/1-cli-commands/1-karpor.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/5-references/1-cli-commands/1-karpor.md new file mode 100644 index 00000000..891809d7 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/5-references/1-cli-commands/1-karpor.md @@ -0,0 +1,230 @@ +--- +title: karpor +--- +### Synopsis + +Launch an API server + +``` +karpor [flags] +``` + +### Options + +``` + --admission-control-config-file string File with admission control configuration. + --advertise-address ip The IP address on which to advertise the apiserver to members of the cluster. This address must be reachable by the rest of the cluster. If blank, the --bind-address will be used. If --bind-address is unspecified, the host's default interface will be used. + --anonymous-auth Enables anonymous requests to the secure port of the API server. Requests that are not rejected by another authentication method are treated as anonymous requests. Anonymous requests have a username of system:anonymous, and a group name of system:unauthenticated. (default true) + --api-audiences strings Identifiers of the API. The service account token authenticator will validate that tokens used against the API are bound to at least one of these audiences. If the --service-account-issuer flag is configured and this flag is not, this field defaults to a single element list containing the issuer URL. + --audit-log-batch-buffer-size int The size of the buffer to store events before batching and writing. Only used in batch mode. (default 10000) + --audit-log-batch-max-size int The maximum size of a batch. Only used in batch mode. (default 1) + --audit-log-batch-max-wait duration The amount of time to wait before force writing the batch that hadn't reached the max size. Only used in batch mode. + --audit-log-batch-throttle-burst int Maximum number of requests sent at the same moment if ThrottleQPS was not utilized before. Only used in batch mode. + --audit-log-batch-throttle-enable Whether batching throttling is enabled. Only used in batch mode. + --audit-log-batch-throttle-qps float32 Maximum average number of batches per second. Only used in batch mode. + --audit-log-compress If set, the rotated log files will be compressed using gzip. + --audit-log-format string Format of saved audits. "legacy" indicates 1-line text format for each event. "json" indicates structured json format. Known formats are legacy,json. (default "json") + --audit-log-maxage int The maximum number of days to retain old audit log files based on the timestamp encoded in their filename. + --audit-log-maxbackup int The maximum number of old audit log files to retain. Setting a value of 0 will mean there's no restriction on the number of files. + --audit-log-maxsize int The maximum size in megabytes of the audit log file before it gets rotated. + --audit-log-mode string Strategy for sending audit events. Blocking indicates sending events should block server responses. Batch causes the backend to buffer and write events asynchronously. Known modes are batch,blocking,blocking-strict. (default "blocking") + --audit-log-path string If set, all requests coming to the apiserver will be logged to this file. '-' means standard out. + --audit-log-truncate-enabled Whether event and batch truncating is enabled. + --audit-log-truncate-max-batch-size int Maximum size of the batch sent to the underlying backend. Actual serialized size can be several hundreds of bytes greater. If a batch exceeds this limit, it is split into several batches of smaller size. (default 10485760) + --audit-log-truncate-max-event-size int Maximum size of the audit event sent to the underlying backend. If the size of an event is greater than this number, first request and response are removed, and if this doesn't reduce the size enough, event is discarded. (default 102400) + --audit-log-version string API group and version used for serializing audit events written to log. (default "audit.k8s.io/v1") + --audit-policy-file string Path to the file that defines the audit policy configuration. + --audit-webhook-batch-buffer-size int The size of the buffer to store events before batching and writing. Only used in batch mode. (default 10000) + --audit-webhook-batch-max-size int The maximum size of a batch. Only used in batch mode. (default 400) + --audit-webhook-batch-max-wait duration The amount of time to wait before force writing the batch that hadn't reached the max size. Only used in batch mode. (default 30s) + --audit-webhook-batch-throttle-burst int Maximum number of requests sent at the same moment if ThrottleQPS was not utilized before. Only used in batch mode. (default 15) + --audit-webhook-batch-throttle-enable Whether batching throttling is enabled. Only used in batch mode. (default true) + --audit-webhook-batch-throttle-qps float32 Maximum average number of batches per second. Only used in batch mode. (default 10) + --audit-webhook-config-file string Path to a kubeconfig formatted file that defines the audit webhook configuration. + --audit-webhook-initial-backoff duration The amount of time to wait before retrying the first failed request. (default 10s) + --audit-webhook-mode string Strategy for sending audit events. Blocking indicates sending events should block server responses. Batch causes the backend to buffer and write events asynchronously. Known modes are batch,blocking,blocking-strict. (default "batch") + --audit-webhook-truncate-enabled Whether event and batch truncating is enabled. + --audit-webhook-truncate-max-batch-size int Maximum size of the batch sent to the underlying backend. Actual serialized size can be several hundreds of bytes greater. If a batch exceeds this limit, it is split into several batches of smaller size. (default 10485760) + --audit-webhook-truncate-max-event-size int Maximum size of the audit event sent to the underlying backend. If the size of an event is greater than this number, first request and response are removed, and if this doesn't reduce the size enough, event is discarded. (default 102400) + --audit-webhook-version string API group and version used for serializing audit events written to webhook. (default "audit.k8s.io/v1") + --authorization-mode strings Ordered list of plug-ins to do authorization on secure port. Comma-delimited list of: AlwaysAllow,AlwaysDeny,ABAC,Webhook,RBAC,Node. (default [RBAC]) + --authorization-policy-file string File with authorization policy in json line by line format, used with --authorization-mode=ABAC, on the secure port. + --authorization-webhook-cache-authorized-ttl duration The duration to cache 'authorized' responses from the webhook authorizer. (default 5m0s) + --authorization-webhook-cache-unauthorized-ttl duration The duration to cache 'unauthorized' responses from the webhook authorizer. (default 30s) + --authorization-webhook-config-file string File with webhook configuration in kubeconfig format, used with --authorization-mode=Webhook. The API server will query the remote service to determine access on the API server's secure port. + --authorization-webhook-version string The API version of the authorization.k8s.io SubjectAccessReview to send to and expect from the webhook. (default "v1beta1") + --bind-address ip The IP address on which to listen for the --secure-port port. The associated interface(s) must be reachable by the rest of the cluster, and by CLI/web clients. If blank or an unspecified address (0.0.0.0 or ::), all interfaces will be used. (default 0.0.0.0) + --cert-dir string The directory where the TLS certs are located. If --tls-cert-file and --tls-private-key-file are provided, this flag will be ignored. (default "apiserver.local.config/certificates") + --client-ca-file string If set, any request presenting a client certificate signed by one of the authorities in the client-ca-file is authenticated with an identity corresponding to the CommonName of the client certificate. + --contention-profiling Enable lock contention profiling, if profiling is enabled + --cors-allowed-origins strings List of allowed origins for CORS, comma separated. An allowed origin can be a regular expression to support subdomain matching. If this list is empty CORS will not be enabled. (default [.*]) + --delete-collection-workers int Number of workers spawned for DeleteCollection call. These are used to speed up namespace cleanup. (default 1) + --disable-admission-plugins strings admission plugins that should be disabled although they are in the default enabled plugins list (NamespaceLifecycle, MutatingAdmissionWebhook, ValidatingAdmissionPolicy, ValidatingAdmissionWebhook). Comma-delimited list of admission plugins: MutatingAdmissionWebhook, NamespaceLifecycle, ValidatingAdmissionPolicy, ValidatingAdmissionWebhook. The order of plugins in this flag does not matter. (default [MutatingAdmissionWebhook,NamespaceLifecycle,ValidatingAdmissionWebhook,ValidatingAdmissionPolicy]) + --egress-selector-config-file string File with apiserver egress selector configuration. + --elastic-search-addresses strings The elastic search address + --elastic-search-password string The elastic search password + --elastic-search-username string The elastic search username + --enable-admission-plugins strings admission plugins that should be enabled in addition to default enabled ones (NamespaceLifecycle, MutatingAdmissionWebhook, ValidatingAdmissionPolicy, ValidatingAdmissionWebhook). Comma-delimited list of admission plugins: MutatingAdmissionWebhook, NamespaceLifecycle, ValidatingAdmissionPolicy, ValidatingAdmissionWebhook. The order of plugins in this flag does not matter. + --enable-garbage-collector Enables the generic garbage collector. MUST be synced with the corresponding flag of the kube-controller-manager. (default true) + --enable-priority-and-fairness If true and the APIPriorityAndFairness feature gate is enabled, replace the max-in-flight handler with an enhanced one that queues and dispatches with priority and fairness (default true) + --encryption-provider-config string The file containing configuration for encryption providers to be used for storing secrets in etcd + --encryption-provider-config-automatic-reload Determines if the file set by --encryption-provider-config should be automatically reloaded if the disk contents change. Setting this to true disables the ability to uniquely identify distinct KMS plugins via the API server healthz endpoints. + --etcd-cafile string SSL Certificate Authority file used to secure etcd communication. + --etcd-certfile string SSL certification file used to secure etcd communication. + --etcd-compaction-interval duration The interval of compaction requests. If 0, the compaction request from apiserver is disabled. (default 5m0s) + --etcd-count-metric-poll-period duration Frequency of polling etcd for number of resources per type. 0 disables the metric collection. (default 1m0s) + --etcd-db-metric-poll-interval duration The interval of requests to poll etcd and update metric. 0 disables the metric collection (default 30s) + --etcd-healthcheck-timeout duration The timeout to use when checking etcd health. (default 2s) + --etcd-keyfile string SSL key file used to secure etcd communication. + --etcd-prefix string The prefix to prepend to all resource paths in etcd. (default "/registry/karpor") + --etcd-readycheck-timeout duration The timeout to use when checking etcd readiness (default 2s) + --etcd-servers strings List of etcd servers to connect with (scheme://ip:port), comma separated. + --etcd-servers-overrides strings Per-resource etcd servers overrides, comma separated. The individual override format: group/resource#servers, where servers are URLs, semicolon separated. Note that this applies only to resources compiled into this server binary. + --external-hostname string The hostname to use when generating externalized URLs for this master (e.g. Swagger API Docs or OpenID Discovery). + --feature-gates mapStringBool A set of key=value pairs that describe feature gates for alpha/experimental features. Options are: + APIListChunking=true|false (BETA - default=true) + APIPriorityAndFairness=true|false (BETA - default=true) + APIResponseCompression=true|false (BETA - default=true) + APISelfSubjectReview=true|false (ALPHA - default=false) + APIServerIdentity=true|false (BETA - default=true) + APIServerTracing=true|false (ALPHA - default=false) + AggregatedDiscoveryEndpoint=true|false (ALPHA - default=false) + AllAlpha=true|false (ALPHA - default=false) + AllBeta=true|false (BETA - default=false) + AnyVolumeDataSource=true|false (BETA - default=true) + AppArmor=true|false (BETA - default=true) + CPUManagerPolicyAlphaOptions=true|false (ALPHA - default=false) + CPUManagerPolicyBetaOptions=true|false (BETA - default=true) + CPUManagerPolicyOptions=true|false (BETA - default=true) + CSIMigrationPortworx=true|false (BETA - default=false) + CSIMigrationRBD=true|false (ALPHA - default=false) + CSINodeExpandSecret=true|false (ALPHA - default=false) + CSIVolumeHealth=true|false (ALPHA - default=false) + ComponentSLIs=true|false (ALPHA - default=false) + ContainerCheckpoint=true|false (ALPHA - default=false) + CronJobTimeZone=true|false (BETA - default=true) + CrossNamespaceVolumeDataSource=true|false (ALPHA - default=false) + CustomCPUCFSQuotaPeriod=true|false (ALPHA - default=false) + CustomResourceValidationExpressions=true|false (BETA - default=true) + DisableCloudProviders=true|false (ALPHA - default=false) + DisableKubeletCloudCredentialProviders=true|false (ALPHA - default=false) + DownwardAPIHugePages=true|false (BETA - default=true) + DynamicResourceAllocation=true|false (ALPHA - default=false) + EventedPLEG=true|false (ALPHA - default=false) + ExpandedDNSConfig=true|false (BETA - default=true) + ExperimentalHostUserNamespaceDefaulting=true|false (BETA - default=false) + GRPCContainerProbe=true|false (BETA - default=true) + GracefulNodeShutdown=true|false (BETA - default=true) + GracefulNodeShutdownBasedOnPodPriority=true|false (BETA - default=true) + HPAContainerMetrics=true|false (ALPHA - default=false) + HPAScaleToZero=true|false (ALPHA - default=false) + HonorPVReclaimPolicy=true|false (ALPHA - default=false) + IPTablesOwnershipCleanup=true|false (ALPHA - default=false) + InTreePluginAWSUnregister=true|false (ALPHA - default=false) + InTreePluginAzureDiskUnregister=true|false (ALPHA - default=false) + InTreePluginAzureFileUnregister=true|false (ALPHA - default=false) + InTreePluginGCEUnregister=true|false (ALPHA - default=false) + InTreePluginOpenStackUnregister=true|false (ALPHA - default=false) + InTreePluginPortworxUnregister=true|false (ALPHA - default=false) + InTreePluginRBDUnregister=true|false (ALPHA - default=false) + InTreePluginvSphereUnregister=true|false (ALPHA - default=false) + JobMutableNodeSchedulingDirectives=true|false (BETA - default=true) + JobPodFailurePolicy=true|false (BETA - default=true) + JobReadyPods=true|false (BETA - default=true) + KMSv2=true|false (ALPHA - default=false) + KubeletInUserNamespace=true|false (ALPHA - default=false) + KubeletPodResources=true|false (BETA - default=true) + KubeletPodResourcesGetAllocatable=true|false (BETA - default=true) + KubeletTracing=true|false (ALPHA - default=false) + LegacyServiceAccountTokenTracking=true|false (ALPHA - default=false) + LocalStorageCapacityIsolationFSQuotaMonitoring=true|false (ALPHA - default=false) + LogarithmicScaleDown=true|false (BETA - default=true) + MatchLabelKeysInPodTopologySpread=true|false (ALPHA - default=false) + MaxUnavailableStatefulSet=true|false (ALPHA - default=false) + MemoryManager=true|false (BETA - default=true) + MemoryQoS=true|false (ALPHA - default=false) + MinDomainsInPodTopologySpread=true|false (BETA - default=false) + MinimizeIPTablesRestore=true|false (ALPHA - default=false) + MultiCIDRRangeAllocator=true|false (ALPHA - default=false) + NetworkPolicyStatus=true|false (ALPHA - default=false) + NodeInclusionPolicyInPodTopologySpread=true|false (BETA - default=true) + NodeOutOfServiceVolumeDetach=true|false (BETA - default=true) + NodeSwap=true|false (ALPHA - default=false) + OpenAPIEnums=true|false (BETA - default=true) + OpenAPIV3=true|false (BETA - default=true) + PDBUnhealthyPodEvictionPolicy=true|false (ALPHA - default=false) + PodAndContainerStatsFromCRI=true|false (ALPHA - default=false) + PodDeletionCost=true|false (BETA - default=true) + PodDisruptionConditions=true|false (BETA - default=true) + PodHasNetworkCondition=true|false (ALPHA - default=false) + PodSchedulingReadiness=true|false (ALPHA - default=false) + ProbeTerminationGracePeriod=true|false (BETA - default=true) + ProcMountType=true|false (ALPHA - default=false) + ProxyTerminatingEndpoints=true|false (BETA - default=true) + QOSReserved=true|false (ALPHA - default=false) + ReadWriteOncePod=true|false (ALPHA - default=false) + RecoverVolumeExpansionFailure=true|false (ALPHA - default=false) + RemainingItemCount=true|false (BETA - default=true) + RetroactiveDefaultStorageClass=true|false (BETA - default=true) + RotateKubeletServerCertificate=true|false (BETA - default=true) + SELinuxMountReadWriteOncePod=true|false (ALPHA - default=false) + SeccompDefault=true|false (BETA - default=true) + ServerSideFieldValidation=true|false (BETA - default=true) + SizeMemoryBackedVolumes=true|false (BETA - default=true) + StatefulSetAutoDeletePVC=true|false (ALPHA - default=false) + StatefulSetStartOrdinal=true|false (ALPHA - default=false) + StorageVersionAPI=true|false (ALPHA - default=false) + StorageVersionHash=true|false (BETA - default=true) + TopologyAwareHints=true|false (BETA - default=true) + TopologyManager=true|false (BETA - default=true) + TopologyManagerPolicyAlphaOptions=true|false (ALPHA - default=false) + TopologyManagerPolicyBetaOptions=true|false (BETA - default=false) + TopologyManagerPolicyOptions=true|false (ALPHA - default=false) + UserNamespacesStatelessPodsSupport=true|false (ALPHA - default=false) + ValidatingAdmissionPolicy=true|false (ALPHA - default=false) + VolumeCapacityPriority=true|false (ALPHA - default=false) + WinDSR=true|false (ALPHA - default=false) + WinOverlay=true|false (BETA - default=true) + WindowsHostNetwork=true|false (ALPHA - default=true) (default APIPriorityAndFairness=true) + --goaway-chance float To prevent HTTP/2 clients from getting stuck on a single apiserver, randomly close a connection (GOAWAY). The client's other in-flight requests won't be affected, and the client will reconnect, likely landing on a different apiserver after going through the load balancer again. This argument sets the fraction of requests that will be sent a GOAWAY. Clusters with single apiservers, or which don't use a load balancer, should NOT enable this. Min is 0 (off), Max is .02 (1/50 requests); .001 (1/1000) is a recommended starting point. + -h, --help help for karpor + --http2-max-streams-per-connection int The limit that the server gives to clients for the maximum number of streams in an HTTP/2 connection. Zero means to use golang's default. (default 1000) + --lease-reuse-duration-seconds int The time in seconds that each lease is reused. A lower value could avoid large number of objects reusing the same lease. Notice that a too small value may cause performance problems at storage layer. (default 60) + --livez-grace-period duration This option represents the maximum amount of time it should take for apiserver to complete its startup sequence and become live. From apiserver's start time to when this amount of time has elapsed, /livez will assume that unfinished post-start hooks will complete successfully and therefore return true. + --max-mutating-requests-inflight int This and --max-requests-inflight are summed to determine the server's total concurrency limit (which must be positive) if --enable-priority-and-fairness is true. Otherwise, this flag limits the maximum number of mutating requests in flight, or a zero value disables the limit completely. (default 200) + --max-requests-inflight int This and --max-mutating-requests-inflight are summed to determine the server's total concurrency limit (which must be positive) if --enable-priority-and-fairness is true. Otherwise, this flag limits the maximum number of non-mutating requests in flight, or a zero value disables the limit completely. (default 400) + --min-request-timeout int An optional field indicating the minimum number of seconds a handler must keep a request open before timing it out. Currently only honored by the watch request handler, which picks a randomized value above this number as the connection timeout, to spread out load. (default 1800) + --permit-address-sharing If true, SO_REUSEADDR will be used when binding the port. This allows binding to wildcard IPs like 0.0.0.0 and specific IPs in parallel, and it avoids waiting for the kernel to release sockets in TIME_WAIT state. [default=false] + --permit-port-sharing If true, SO_REUSEPORT will be used when binding the port, which allows more than one instance to bind on the same address and port. [default=false] + --profiling Enable profiling via web interface host:port/debug/pprof/ (default true) + --read-only-mode turn on the read only mode + --request-timeout duration An optional field indicating the duration a handler must keep a request open before timing it out. This is the default request timeout for requests but may be overridden by flags such as --min-request-timeout for specific types of requests. (default 1m0s) + --requestheader-allowed-names strings List of client certificate common names to allow to provide usernames in headers specified by --requestheader-username-headers. If empty, any client certificate validated by the authorities in --requestheader-client-ca-file is allowed. + --requestheader-client-ca-file string Root certificate bundle to use to verify client certificates on incoming requests before trusting usernames in headers specified by --requestheader-username-headers. WARNING: generally do not depend on authorization being already done for incoming requests. + --requestheader-extra-headers-prefix strings List of request header prefixes to inspect. X-Remote-Extra- is suggested. + --requestheader-group-headers strings List of request headers to inspect for groups. X-Remote-Group is suggested. + --requestheader-username-headers strings List of request headers to inspect for usernames. X-Remote-User is common. + --search-storage-type string The search storage type + --secure-port int The port on which to serve HTTPS with authentication and authorization. If 0, don't serve HTTPS at all. (default 443) + --shutdown-delay-duration duration Time to delay the termination. During that time the server keeps serving requests normally. The endpoints /healthz and /livez will return success, but /readyz immediately returns failure. Graceful termination starts after this delay has elapsed. This can be used to allow load balancer to stop sending traffic to this server. + --shutdown-send-retry-after If true the HTTP Server will continue listening until all non long running request(s) in flight have been drained, during this window all incoming requests will be rejected with a status code 429 and a 'Retry-After' response header, in addition 'Connection: close' response header is set in order to tear down the TCP connection when idle. + --storage-backend string The storage backend for persistence. Options: 'etcd3' (default). + --storage-media-type string The media type to use to store objects in storage. Some resources or storage backends may only support a specific media type and will ignore this setting. Supported media types: [application/json, application/yaml, application/vnd.kubernetes.protobuf] (default "application/json") + --strict-transport-security-directives strings List of directives for HSTS, comma separated. If this list is empty, then HSTS directives will not be added. Example: 'max-age=31536000,includeSubDomains,preload' + --tls-cert-file string File containing the default x509 Certificate for HTTPS. (CA cert, if any, concatenated after server cert). If HTTPS serving is enabled, and --tls-cert-file and --tls-private-key-file are not provided, a self-signed certificate and key are generated for the public address and saved to the directory specified by --cert-dir. (default "apiserver.local.config/certificates/apiserver.crt") + --tls-cipher-suites strings Comma-separated list of cipher suites for the server. If omitted, the default Go cipher suites will be used. + Preferred values: TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_256_CBC_SHA, TLS_RSA_WITH_AES_256_GCM_SHA384. + Insecure values: TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_RSA_WITH_RC4_128_SHA, TLS_RSA_WITH_3DES_EDE_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA256, TLS_RSA_WITH_RC4_128_SHA. + --tls-min-version string Minimum TLS version supported. Possible values: VersionTLS10, VersionTLS11, VersionTLS12, VersionTLS13 + --tls-private-key-file string File containing the default x509 private key matching --tls-cert-file. (default "apiserver.local.config/certificates/apiserver.key") + --tls-sni-cert-key namedCertKey A pair of x509 certificate and private key file paths, optionally suffixed with a list of domain patterns which are fully qualified domain names, possibly with prefixed wildcard segments. The domain patterns also allow IP addresses, but IPs should only be used if the apiserver has visibility to the IP address requested by a client. If no domain patterns are provided, the names of the certificate are extracted. Non-wildcard matches trump over wildcard matches, explicit domain patterns trump over extracted names. For multiple key/certificate pairs, use the --tls-sni-cert-key multiple times. Examples: "example.crt,example.key" or "foo.crt,foo.key:*.foo.com,foo.com". (default []) + --tracing-config-file string File with apiserver tracing configuration. + --watch-cache Enable watch caching in the apiserver (default true) + --watch-cache-sizes strings Watch cache size settings for some resources (pods, nodes, etc.), comma separated. The individual setting format: resource[.group]#size, where resource is lowercase plural (no version), group is omitted for resources of apiVersion v1 (the legacy core API) and included for others, and size is a number. This option is only meaningful for resources built into the apiserver, not ones defined by CRDs or aggregated from external servers, and is only consulted if the watch-cache is enabled. The only meaningful size setting to supply here is zero, which means to disable watch caching for the associated resource; all non-zero values are equivalent and mean to not disable watch caching for that resource +``` + +### SEE ALSO + +* [karpor syncer](2-karpor-syncer.md) - start a resource syncer to sync resource from clusters + +###### Auto generated by spf13/cobra on 7-May-2024 diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/5-references/1-cli-commands/2-karpor-syncer.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/5-references/1-cli-commands/2-karpor-syncer.md new file mode 100644 index 00000000..d25245ae --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/5-references/1-cli-commands/2-karpor-syncer.md @@ -0,0 +1,25 @@ +--- +title: karpor syncer +--- +## karpor syncer + +start a resource syncer to sync resource from clusters + +``` +karpor syncer [flags] +``` + +### Options + +``` + --elastic-search-addresses strings The elastic search address. + --health-probe-bind-address string The address the probe endpoint binds to. (default ":8081") + -h, --help help for syncer + --metrics-bind-address string The address the metric endpoint binds to. (default ":8080") +``` + +### SEE ALSO + +* [karpor](1-karpor.md) - Launch an API server + +###### Auto generated by spf13/cobra on 7-May-2024 diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/5-references/1-cli-commands/_category_.json b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/5-references/1-cli-commands/_category_.json new file mode 100644 index 00000000..41757f5f --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/5-references/1-cli-commands/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "CLI Commands" +} diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/5-references/2-openapi.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/5-references/2-openapi.md new file mode 100644 index 00000000..81c0321d --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/5-references/2-openapi.md @@ -0,0 +1,1862 @@ +--- +title: OpenAPI +--- +## Informations + +### Version + +1.0 + +### Contact + +## Content negotiation + +### URI Schemes + +* http + +### Consumes + +* application/json +* multipart/form-data +* text/plain + +### Produces + +* application/json +* text/plain + +## All endpoints + +### cluster + +| Method | URI | Name | Summary | +| ------ | ------------------------------------ | ------------------------------------------------------------------------------------- | -------------------------------------------- | +| DELETE | /rest-api/v1/cluster/{clusterName} | [delete rest API v1 cluster cluster name](#delete-rest-api-v1-cluster-cluster-name) | Delete removes a cluster resource by name. | +| GET | /rest-api/v1/cluster/{clusterName} | [get rest API v1 cluster cluster name](#get-rest-api-v1-cluster-cluster-name) | Get returns a cluster resource by name. | +| GET | /rest-api/v1/clusters | [get rest API v1 clusters](#get-rest-api-v1-clusters) | List lists all cluster resources. | +| POST | /rest-api/v1/cluster/{clusterName} | [post rest API v1 cluster cluster name](#post-rest-api-v1-cluster-cluster-name) | Create creates a cluster resource. | +| POST | /rest-api/v1/cluster/config/file | [post rest API v1 cluster config file](#post-rest-api-v1-cluster-config-file) | Upload kubeConfig file for cluster | +| POST | /rest-api/v1/cluster/config/validate | [post rest API v1 cluster config validate](#post-rest-api-v1-cluster-config-validate) | Validate KubeConfig | +| PUT | /rest-api/v1/cluster/{clusterName} | [put rest API v1 cluster cluster name](#put-rest-api-v1-cluster-cluster-name) | Update updates the cluster metadata by name. | + +### debug + +| Method | URI | Name | Summary | +| ------ | ---------- | ------------------------------- | ---------------------------- | +| GET | /endpoints | [get endpoints](#get-endpoints) | List all available endpoints | + +### insight + +| Method | URI | Name | Summary | +| ------ | ----------------------------- | --------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------- | +| GET | /rest-api/v1/insight/audit | [get rest API v1 insight audit](#get-rest-api-v1-insight-audit) | Audit based on resource group. | +| GET | /rest-api/v1/insight/detail | [get rest API v1 insight detail](#get-rest-api-v1-insight-detail) | GetDetail returns a Kubernetes resource by name, namespace, cluster, apiVersion and kind. | +| GET | /rest-api/v1/insight/events | [get rest API v1 insight events](#get-rest-api-v1-insight-events) | GetEvents returns events for a Kubernetes resource by name, namespace, cluster, apiVersion and kind. | +| GET | /rest-api/v1/insight/score | [get rest API v1 insight score](#get-rest-api-v1-insight-score) | ScoreHandler calculates a score for the audited manifest. | +| GET | /rest-api/v1/insight/stats | [get rest API v1 insight stats](#get-rest-api-v1-insight-stats) | Get returns a global statistics info. | +| GET | /rest-api/v1/insight/summary | [get rest API v1 insight summary](#get-rest-api-v1-insight-summary) | Get returns a Kubernetes resource summary by name, namespace, cluster, apiVersion and kind. | +| GET | /rest-api/v1/insight/topology | [get rest API v1 insight topology](#get-rest-api-v1-insight-topology) | GetTopology returns a topology map for a Kubernetes resource by name, namespace, cluster, apiVersion and kind. | + +### resourcegroup + +| Method | URI | Name | Summary | +| ------ | ---------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | ------------------------------------------- | +| GET | /rest-api/v1/resource-groups/{resourceGroupRuleName} | [get rest API v1 resource groups resource group rule name](#get-rest-api-v1-resource-groups-resource-group-rule-name) | List lists all ResourceGroups by rule name. | + +### resourcegrouprule + +| Method | URI | Name | Summary | +| ------ | -------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------ | +| DELETE | /rest-api/v1/resource-group-rule/{resourceGroupRuleName} | [delete rest API v1 resource group rule resource group rule name](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name) | Delete removes a ResourceGroupRule by name. | +| GET | /rest-api/v1/resource-group-rule/{resourceGroupRuleName} | [get rest API v1 resource group rule resource group rule name](#get-rest-api-v1-resource-group-rule-resource-group-rule-name) | Get returns a ResourceGroupRule by name. | +| GET | /rest-api/v1/resource-group-rules | [get rest API v1 resource group rules](#get-rest-api-v1-resource-group-rules) | List lists all ResourceGroupRules. | +| POST | /rest-api/v1/resource-group-rule | [post rest API v1 resource group rule](#post-rest-api-v1-resource-group-rule) | Create creates a ResourceGroupRule. | +| PUT | /rest-api/v1/resource-group-rule | [put rest API v1 resource group rule](#put-rest-api-v1-resource-group-rule) | Update updates the ResourceGroupRule metadata by name. | + +### search + +| Method | URI | Name | Summary | +| ------ | ------------------- | ------------------------------------------------- | ----------------------------------------------------------------------------------------------------- | +| GET | /rest-api/v1/search | [get rest API v1 search](#get-rest-api-v1-search) | SearchForResource returns an array of Kubernetes runtime Object matched using the query from context. | + +## Paths + +### Delete removes a cluster resource by name. (*DeleteRestAPIV1ClusterClusterName*) + +``` +DELETE /rest-api/v1/cluster/{clusterName} +``` + +This endpoint deletes the cluster resource by name. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ----------- | ------ | ------ | -------- | --------- | :------: | ------- | ----------------------- | +| clusterName | `path` | string | `string` | | ✓ | | The name of the cluster | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| --------------------------------------------------- | --------------------- | --------------------- | :---------: | ------------------------------------------------------------- | +| [200](#delete-rest-api-v1-cluster-cluster-name-200) | OK | Operation status | | [schema](#delete-rest-api-v1-cluster-cluster-name-200-schema) | +| [400](#delete-rest-api-v1-cluster-cluster-name-400) | Bad Request | Bad Request | | [schema](#delete-rest-api-v1-cluster-cluster-name-400-schema) | +| [401](#delete-rest-api-v1-cluster-cluster-name-401) | Unauthorized | Unauthorized | | [schema](#delete-rest-api-v1-cluster-cluster-name-401-schema) | +| [404](#delete-rest-api-v1-cluster-cluster-name-404) | Not Found | Not Found | | [schema](#delete-rest-api-v1-cluster-cluster-name-404-schema) | +| [405](#delete-rest-api-v1-cluster-cluster-name-405) | Method Not Allowed | Method Not Allowed | | [schema](#delete-rest-api-v1-cluster-cluster-name-405-schema) | +| [429](#delete-rest-api-v1-cluster-cluster-name-429) | Too Many Requests | Too Many Requests | | [schema](#delete-rest-api-v1-cluster-cluster-name-429-schema) | +| [500](#delete-rest-api-v1-cluster-cluster-name-500) | Internal Server Error | Internal Server Error | | [schema](#delete-rest-api-v1-cluster-cluster-name-500-schema) | + +#### Responses + +##### 200 - Operation status + +Status: OK + +###### Schema + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### Delete removes a ResourceGroupRule by name. (*DeleteRestAPIV1ResourceGroupRuleResourceGroupRuleName*) + +``` +DELETE /rest-api/v1/resource-group-rule/{resourceGroupRuleName} +``` + +This endpoint deletes the ResourceGroupRule by name. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| --------------------- | ------ | ------ | -------- | --------- | :------: | ------- | ----------------------------------- | +| resourceGroupRuleName | `path` | string | `string` | | ✓ | | The name of the resource group rule | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| --------------------------------------------------------------------------- | --------------------- | --------------------- | :---------: | ------------------------------------------------------------------------------------- | +| [200](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-200) | OK | Operation status | | [schema](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-200-schema) | +| [400](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-400) | Bad Request | Bad Request | | [schema](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-400-schema) | +| [401](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-401) | Unauthorized | Unauthorized | | [schema](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-401-schema) | +| [404](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-404) | Not Found | Not Found | | [schema](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-404-schema) | +| [405](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-405) | Method Not Allowed | Method Not Allowed | | [schema](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-405-schema) | +| [429](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-429) | Too Many Requests | Too Many Requests | | [schema](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-429-schema) | +| [500](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-500) | Internal Server Error | Internal Server Error | | [schema](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-500-schema) | + +#### Responses + +##### 200 - Operation status + +Status: OK + +###### Schema + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### List all available endpoints (*GetEndpoints*) + +``` +GET /endpoints +``` + +List all registered endpoints in the router + +#### Consumes + +* text/plain + +#### Produces + +* text/plain + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ------------------------- | ------ | ----------------------------- | :---------: | ----------------------------------- | +| [200](#get-endpoints-200) | OK | Endpoints listed successfully | | [schema](#get-endpoints-200-schema) | + +#### Responses + +##### 200 - Endpoints listed successfully + +Status: OK + +###### Schema + +### Get returns a cluster resource by name. (*GetRestAPIV1ClusterClusterName*) + +``` +GET /rest-api/v1/cluster/{clusterName} +``` + +This endpoint returns a cluster resource by name. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ----------- | ------- | ------ | -------- | --------- | :------: | ------- | -------------------------------------------------- | +| clusterName | `path` | string | `string` | | ✓ | | The name of the cluster | +| format | `query` | string | `string` | | | | The format of the response. Either in json or yaml | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ------------------------------------------------ | --------------------- | --------------------- | :---------: | ---------------------------------------------------------- | +| [200](#get-rest-api-v1-cluster-cluster-name-200) | OK | Unstructured object | | [schema](#get-rest-api-v1-cluster-cluster-name-200-schema) | +| [400](#get-rest-api-v1-cluster-cluster-name-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-cluster-cluster-name-400-schema) | +| [401](#get-rest-api-v1-cluster-cluster-name-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-cluster-cluster-name-401-schema) | +| [404](#get-rest-api-v1-cluster-cluster-name-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-cluster-cluster-name-404-schema) | +| [405](#get-rest-api-v1-cluster-cluster-name-405) | Method Not Allowed | Method Not Allowed | | [schema](#get-rest-api-v1-cluster-cluster-name-405-schema) | +| [429](#get-rest-api-v1-cluster-cluster-name-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-cluster-cluster-name-429-schema) | +| [500](#get-rest-api-v1-cluster-cluster-name-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-cluster-cluster-name-500-schema) | + +#### Responses + +##### 200 - Unstructured object + +Status: OK + +###### Schema + +[UnstructuredUnstructured](#unstructured-unstructured) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### List lists all cluster resources. (*GetRestAPIV1Clusters*) + +``` +GET /rest-api/v1/clusters +``` + +This endpoint lists all cluster resources. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ---------- | ------- | ------- | -------- | --------- | :------: | ------- | -------------------------------------------------------------- | +| descending | `query` | boolean | `bool` | | | | Whether to sort the list in descending order. Default to false | +| orderBy | `query` | string | `string` | | | | The order to list the cluster. Default to order by name | +| summary | `query` | boolean | `bool` | | | | Whether to display summary or not. Default to false | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ------------------------------------ | --------------------- | ----------------------- | :---------: | ---------------------------------------------- | +| [200](#get-rest-api-v1-clusters-200) | OK | List of cluster objects | | [schema](#get-rest-api-v1-clusters-200-schema) | +| [400](#get-rest-api-v1-clusters-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-clusters-400-schema) | +| [401](#get-rest-api-v1-clusters-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-clusters-401-schema) | +| [404](#get-rest-api-v1-clusters-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-clusters-404-schema) | +| [405](#get-rest-api-v1-clusters-405) | Method Not Allowed | Method Not Allowed | | [schema](#get-rest-api-v1-clusters-405-schema) | +| [429](#get-rest-api-v1-clusters-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-clusters-429-schema) | +| [500](#get-rest-api-v1-clusters-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-clusters-500-schema) | + +#### Responses + +##### 200 - List of cluster objects + +Status: OK + +###### Schema + +[][UnstructuredUnstructured](#unstructured-unstructured) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### Audit based on resource group. (*GetRestAPIV1InsightAudit*) + +``` +GET /rest-api/v1/insight/audit +``` + +This endpoint audits based on the specified resource group. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ---------- | ------- | ------- | -------- | --------- | :------: | ------- | ----------------------------------------------------- | +| apiVersion | `query` | string | `string` | | | | The specified apiVersion, such as 'apps/v1' | +| cluster | `query` | string | `string` | | | | The specified cluster name, such as 'example-cluster' | +| forceNew | `query` | boolean | `bool` | | | | Switch for forced scanning, default is 'false' | +| kind | `query` | string | `string` | | | | The specified kind, such as 'Deployment' | +| name | `query` | string | `string` | | | | The specified resource name, such as 'foo' | +| namespace | `query` | string | `string` | | | | The specified namespace, such as 'default' | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ----------------------------------------- | --------------------- | --------------------- | :---------: | --------------------------------------------------- | +| [200](#get-rest-api-v1-insight-audit-200) | OK | Audit results | | [schema](#get-rest-api-v1-insight-audit-200-schema) | +| [400](#get-rest-api-v1-insight-audit-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-insight-audit-400-schema) | +| [401](#get-rest-api-v1-insight-audit-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-insight-audit-401-schema) | +| [404](#get-rest-api-v1-insight-audit-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-insight-audit-404-schema) | +| [429](#get-rest-api-v1-insight-audit-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-insight-audit-429-schema) | +| [500](#get-rest-api-v1-insight-audit-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-insight-audit-500-schema) | + +#### Responses + +##### 200 - Audit results + +Status: OK + +###### Schema + +[ScannerAuditData](#scanner-audit-data) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### GetDetail returns a Kubernetes resource by name, namespace, cluster, apiVersion and kind. (*GetRestAPIV1InsightDetail*) + +``` +GET /rest-api/v1/insight/detail +``` + +This endpoint returns a Kubernetes resource by name, namespace, cluster, apiVersion and kind. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ---------- | ------- | ------ | -------- | --------- | :------: | ------- | ---------------------------------------------------------------------- | +| apiVersion | `query` | string | `string` | | | | The specified apiVersion, such as 'apps/v1'. Should be percent-encoded | +| cluster | `query` | string | `string` | | | | The specified cluster name, such as 'example-cluster' | +| format | `query` | string | `string` | | | | The format of the response. Either in json or yaml. Default to json | +| kind | `query` | string | `string` | | | | The specified kind, such as 'Deployment' | +| name | `query` | string | `string` | | | | The specified resource name, such as 'foo' | +| namespace | `query` | string | `string` | | | | The specified namespace, such as 'default' | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ------------------------------------------ | --------------------- | --------------------- | :---------: | ---------------------------------------------------- | +| [200](#get-rest-api-v1-insight-detail-200) | OK | Unstructured object | | [schema](#get-rest-api-v1-insight-detail-200-schema) | +| [400](#get-rest-api-v1-insight-detail-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-insight-detail-400-schema) | +| [401](#get-rest-api-v1-insight-detail-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-insight-detail-401-schema) | +| [404](#get-rest-api-v1-insight-detail-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-insight-detail-404-schema) | +| [405](#get-rest-api-v1-insight-detail-405) | Method Not Allowed | Method Not Allowed | | [schema](#get-rest-api-v1-insight-detail-405-schema) | +| [429](#get-rest-api-v1-insight-detail-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-insight-detail-429-schema) | +| [500](#get-rest-api-v1-insight-detail-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-insight-detail-500-schema) | + +#### Responses + +##### 200 - Unstructured object + +Status: OK + +###### Schema + +[UnstructuredUnstructured](#unstructured-unstructured) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### GetEvents returns events for a Kubernetes resource by name, namespace, cluster, apiVersion and kind. (*GetRestAPIV1InsightEvents*) + +``` +GET /rest-api/v1/insight/events +``` + +This endpoint returns events for a Kubernetes resource YAML by name, namespace, cluster, apiVersion and kind. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ---------- | ------- | ------ | -------- | --------- | :------: | ------- | ---------------------------------------------------------------------- | +| apiVersion | `query` | string | `string` | | | | The specified apiVersion, such as 'apps/v1'. Should be percent-encoded | +| cluster | `query` | string | `string` | | | | The specified cluster name, such as 'example-cluster' | +| kind | `query` | string | `string` | | | | The specified kind, such as 'Deployment' | +| name | `query` | string | `string` | | | | The specified resource name, such as 'foo' | +| namespace | `query` | string | `string` | | | | The specified namespace, such as 'default' | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ------------------------------------------ | --------------------- | --------------------- | :---------: | ---------------------------------------------------- | +| [200](#get-rest-api-v1-insight-events-200) | OK | List of events | | [schema](#get-rest-api-v1-insight-events-200-schema) | +| [400](#get-rest-api-v1-insight-events-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-insight-events-400-schema) | +| [401](#get-rest-api-v1-insight-events-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-insight-events-401-schema) | +| [404](#get-rest-api-v1-insight-events-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-insight-events-404-schema) | +| [405](#get-rest-api-v1-insight-events-405) | Method Not Allowed | Method Not Allowed | | [schema](#get-rest-api-v1-insight-events-405-schema) | +| [429](#get-rest-api-v1-insight-events-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-insight-events-429-schema) | +| [500](#get-rest-api-v1-insight-events-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-insight-events-500-schema) | + +#### Responses + +##### 200 - List of events + +Status: OK + +###### Schema + +[][UnstructuredUnstructured](#unstructured-unstructured) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### ScoreHandler calculates a score for the audited manifest. (*GetRestAPIV1InsightScore*) + +``` +GET /rest-api/v1/insight/score +``` + +This endpoint calculates a score for the provided manifest based on the number and severity of issues detected during the audit. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ---------- | ------- | ------- | -------- | --------- | :------: | ------- | ----------------------------------------------------- | +| apiVersion | `query` | string | `string` | | | | The specified apiVersion, such as 'apps/v1' | +| cluster | `query` | string | `string` | | | | The specified cluster name, such as 'example-cluster' | +| forceNew | `query` | boolean | `bool` | | | | Switch for forced compute score, default is 'false' | +| kind | `query` | string | `string` | | | | The specified kind, such as 'Deployment' | +| name | `query` | string | `string` | | | | The specified resource name, such as 'foo' | +| namespace | `query` | string | `string` | | | | The specified namespace, such as 'default' | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ----------------------------------------- | --------------------- | ------------------------ | :---------: | --------------------------------------------------- | +| [200](#get-rest-api-v1-insight-score-200) | OK | Score calculation result | | [schema](#get-rest-api-v1-insight-score-200-schema) | +| [400](#get-rest-api-v1-insight-score-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-insight-score-400-schema) | +| [401](#get-rest-api-v1-insight-score-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-insight-score-401-schema) | +| [404](#get-rest-api-v1-insight-score-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-insight-score-404-schema) | +| [429](#get-rest-api-v1-insight-score-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-insight-score-429-schema) | +| [500](#get-rest-api-v1-insight-score-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-insight-score-500-schema) | + +#### Responses + +##### 200 - Score calculation result + +Status: OK + +###### Schema + +[InsightScoreData](#insight-score-data) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### Get returns a global statistics info. (*GetRestAPIV1InsightStats*) + +``` +GET /rest-api/v1/insight/stats +``` + +This endpoint returns a global statistics info. + +#### Produces + +* application/json + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ----------------------------------------- | --------------------- | ---------------------- | :---------: | --------------------------------------------------- | +| [200](#get-rest-api-v1-insight-stats-200) | OK | Global statistics info | | [schema](#get-rest-api-v1-insight-stats-200-schema) | +| [400](#get-rest-api-v1-insight-stats-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-insight-stats-400-schema) | +| [401](#get-rest-api-v1-insight-stats-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-insight-stats-401-schema) | +| [404](#get-rest-api-v1-insight-stats-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-insight-stats-404-schema) | +| [405](#get-rest-api-v1-insight-stats-405) | Method Not Allowed | Method Not Allowed | | [schema](#get-rest-api-v1-insight-stats-405-schema) | +| [429](#get-rest-api-v1-insight-stats-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-insight-stats-429-schema) | +| [500](#get-rest-api-v1-insight-stats-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-insight-stats-500-schema) | + +#### Responses + +##### 200 - Global statistics info + +Status: OK + +###### Schema + +[InsightStatistics](#insight-statistics) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### Get returns a Kubernetes resource summary by name, namespace, cluster, apiVersion and kind. (*GetRestAPIV1InsightSummary*) + +``` +GET /rest-api/v1/insight/summary +``` + +This endpoint returns a Kubernetes resource summary by name, namespace, cluster, apiVersion and kind. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ---------- | ------- | ------ | -------- | --------- | :------: | ------- | ---------------------------------------------------------------------- | +| apiVersion | `query` | string | `string` | | | | The specified apiVersion, such as 'apps/v1'. Should be percent-encoded | +| cluster | `query` | string | `string` | | | | The specified cluster name, such as 'example-cluster' | +| kind | `query` | string | `string` | | | | The specified kind, such as 'Deployment' | +| name | `query` | string | `string` | | | | The specified resource name, such as 'foo' | +| namespace | `query` | string | `string` | | | | The specified namespace, such as 'default' | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ------------------------------------------- | --------------------- | --------------------- | :---------: | ----------------------------------------------------- | +| [200](#get-rest-api-v1-insight-summary-200) | OK | Resource Summary | | [schema](#get-rest-api-v1-insight-summary-200-schema) | +| [400](#get-rest-api-v1-insight-summary-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-insight-summary-400-schema) | +| [401](#get-rest-api-v1-insight-summary-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-insight-summary-401-schema) | +| [404](#get-rest-api-v1-insight-summary-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-insight-summary-404-schema) | +| [405](#get-rest-api-v1-insight-summary-405) | Method Not Allowed | Method Not Allowed | | [schema](#get-rest-api-v1-insight-summary-405-schema) | +| [429](#get-rest-api-v1-insight-summary-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-insight-summary-429-schema) | +| [500](#get-rest-api-v1-insight-summary-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-insight-summary-500-schema) | + +#### Responses + +##### 200 - Resource Summary + +Status: OK + +###### Schema + +[InsightResourceSummary](#insight-resource-summary) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### GetTopology returns a topology map for a Kubernetes resource by name, namespace, cluster, apiVersion and kind. (*GetRestAPIV1InsightTopology*) + +``` +GET /rest-api/v1/insight/topology +``` + +This endpoint returns a topology map for a Kubernetes resource by name, namespace, cluster, apiVersion and kind. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ---------- | ------- | ------- | -------- | --------- | :------: | ------- | ---------------------------------------------------------------------- | +| apiVersion | `query` | string | `string` | | | | The specified apiVersion, such as 'apps/v1'. Should be percent-encoded | +| cluster | `query` | string | `string` | | | | The specified cluster name, such as 'example-cluster' | +| forceNew | `query` | boolean | `bool` | | | | Force re-generating the topology, default is 'false' | +| kind | `query` | string | `string` | | | | The specified kind, such as 'Deployment' | +| name | `query` | string | `string` | | | | The specified resource name, such as 'foo' | +| namespace | `query` | string | `string` | | | | The specified namespace, such as 'default' | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| -------------------------------------------- | --------------------- | -------------------------------------------- | :---------: | ------------------------------------------------------ | +| [200](#get-rest-api-v1-insight-topology-200) | OK | map from string to resource.ResourceTopology | | [schema](#get-rest-api-v1-insight-topology-200-schema) | +| [400](#get-rest-api-v1-insight-topology-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-insight-topology-400-schema) | +| [401](#get-rest-api-v1-insight-topology-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-insight-topology-401-schema) | +| [404](#get-rest-api-v1-insight-topology-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-insight-topology-404-schema) | +| [405](#get-rest-api-v1-insight-topology-405) | Method Not Allowed | Method Not Allowed | | [schema](#get-rest-api-v1-insight-topology-405-schema) | +| [429](#get-rest-api-v1-insight-topology-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-insight-topology-429-schema) | +| [500](#get-rest-api-v1-insight-topology-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-insight-topology-500-schema) | + +#### Responses + +##### 200 - map from string to resource.ResourceTopology + +Status: OK + +###### Schema + +map of [InsightResourceTopology](#insight-resource-topology) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### Get returns a ResourceGroupRule by name. (*GetRestAPIV1ResourceGroupRuleResourceGroupRuleName*) + +``` +GET /rest-api/v1/resource-group-rule/{resourceGroupRuleName} +``` + +This endpoint returns a ResourceGroupRule by name. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| --------------------- | ------ | ------ | -------- | --------- | :------: | ------- | ----------------------------------- | +| resourceGroupRuleName | `path` | string | `string` | | ✓ | | The name of the resource group rule | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ------------------------------------------------------------------------ | --------------------- | --------------------- | :---------: | ---------------------------------------------------------------------------------- | +| [200](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-200) | OK | Unstructured object | | [schema](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-200-schema) | +| [400](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-400-schema) | +| [401](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-401-schema) | +| [404](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-404-schema) | +| [405](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-405) | Method Not Allowed | Method Not Allowed | | [schema](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-405-schema) | +| [429](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-429-schema) | +| [500](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-500-schema) | + +#### Responses + +##### 200 - Unstructured object + +Status: OK + +###### Schema + +[UnstructuredUnstructured](#unstructured-unstructured) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### List lists all ResourceGroupRules. (*GetRestAPIV1ResourceGroupRules*) + +``` +GET /rest-api/v1/resource-group-rules +``` + +This endpoint lists all ResourceGroupRules. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ---------- | ------- | ------- | -------- | --------- | :------: | ------- | ----------------------------------------------------------------- | +| descending | `query` | boolean | `bool` | | | | Whether to sort the list in descending order. Default to false | +| orderBy | `query` | string | `string` | | | | The order to list the resourceGroupRule. Default to order by name | +| summary | `query` | boolean | `bool` | | | | Whether to display summary or not. Default to false | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ------------------------------------------------ | --------------------- | --------------------------------- | :---------: | ---------------------------------------------------------- | +| [200](#get-rest-api-v1-resource-group-rules-200) | OK | List of resourceGroupRule objects | | [schema](#get-rest-api-v1-resource-group-rules-200-schema) | +| [400](#get-rest-api-v1-resource-group-rules-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-resource-group-rules-400-schema) | +| [401](#get-rest-api-v1-resource-group-rules-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-resource-group-rules-401-schema) | +| [404](#get-rest-api-v1-resource-group-rules-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-resource-group-rules-404-schema) | +| [405](#get-rest-api-v1-resource-group-rules-405) | Method Not Allowed | Method Not Allowed | | [schema](#get-rest-api-v1-resource-group-rules-405-schema) | +| [429](#get-rest-api-v1-resource-group-rules-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-resource-group-rules-429-schema) | +| [500](#get-rest-api-v1-resource-group-rules-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-resource-group-rules-500-schema) | + +#### Responses + +##### 200 - List of resourceGroupRule objects + +Status: OK + +###### Schema + +[][UnstructuredUnstructured](#unstructured-unstructured) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### List lists all ResourceGroups by rule name. (*GetRestAPIV1ResourceGroupsResourceGroupRuleName*) + +``` +GET /rest-api/v1/resource-groups/{resourceGroupRuleName} +``` + +This endpoint lists all ResourceGroups. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| --------------------- | ------ | ------ | -------- | --------- | :------: | ------- | ----------------------------------- | +| resourceGroupRuleName | `path` | string | `string` | | ✓ | | The name of the resource group rule | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| -------------------------------------------------------------------- | --------------------- | ----------------------------- | :---------: | ------------------------------------------------------------------------------ | +| [200](#get-rest-api-v1-resource-groups-resource-group-rule-name-200) | OK | List of resourceGroup objects | | [schema](#get-rest-api-v1-resource-groups-resource-group-rule-name-200-schema) | +| [400](#get-rest-api-v1-resource-groups-resource-group-rule-name-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-resource-groups-resource-group-rule-name-400-schema) | +| [401](#get-rest-api-v1-resource-groups-resource-group-rule-name-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-resource-groups-resource-group-rule-name-401-schema) | +| [404](#get-rest-api-v1-resource-groups-resource-group-rule-name-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-resource-groups-resource-group-rule-name-404-schema) | +| [405](#get-rest-api-v1-resource-groups-resource-group-rule-name-405) | Method Not Allowed | Method Not Allowed | | [schema](#get-rest-api-v1-resource-groups-resource-group-rule-name-405-schema) | +| [429](#get-rest-api-v1-resource-groups-resource-group-rule-name-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-resource-groups-resource-group-rule-name-429-schema) | +| [500](#get-rest-api-v1-resource-groups-resource-group-rule-name-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-resource-groups-resource-group-rule-name-500-schema) | + +#### Responses + +##### 200 - List of resourceGroup objects + +Status: OK + +###### Schema + +[][UnstructuredUnstructured](#unstructured-unstructured) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### SearchForResource returns an array of Kubernetes runtime Object matched using the query from context. (*GetRestAPIV1Search*) + +``` +GET /rest-api/v1/search +``` + +This endpoint returns an array of Kubernetes runtime Object matched using the query from context. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| -------- | ------- | ------ | -------- | --------- | :------: | ------- | ------------------------------------------------------ | +| page | `query` | string | `string` | | | | The current page to fetch. Default to 1 | +| pageSize | `query` | string | `string` | | | | The size of the page. Default to 10 | +| pattern | `query` | string | `string` | | ✓ | | The search pattern. Can be either sql or dsl. Required | +| query | `query` | string | `string` | | ✓ | | The query to use for search. Required | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ---------------------------------- | --------------------- | ----------------------- | :---------: | -------------------------------------------- | +| [200](#get-rest-api-v1-search-200) | OK | Array of runtime.Object | | [schema](#get-rest-api-v1-search-200-schema) | +| [400](#get-rest-api-v1-search-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-search-400-schema) | +| [401](#get-rest-api-v1-search-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-search-401-schema) | +| [404](#get-rest-api-v1-search-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-search-404-schema) | +| [405](#get-rest-api-v1-search-405) | Method Not Allowed | Method Not Allowed | | [schema](#get-rest-api-v1-search-405-schema) | +| [429](#get-rest-api-v1-search-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-search-429-schema) | +| [500](#get-rest-api-v1-search-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-search-500-schema) | + +#### Responses + +##### 200 - Array of runtime.Object + +Status: OK + +###### Schema + +[][interface{}](#interface) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### Create creates a cluster resource. (*PostRestAPIV1ClusterClusterName*) + +``` +POST /rest-api/v1/cluster/{clusterName} +``` + +This endpoint creates a new cluster resource using the payload. + +#### Consumes + +* application/json +* text/plain + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ----------- | ------ | ------------------------------------------------- | ------------------------------ | --------- | :------: | ------- | ---------------------------------------------------- | +| clusterName | `path` | string | `string` | | ✓ | | The name of the cluster | +| request | `body` | [ClusterClusterPayload](#cluster-cluster-payload) | `models.ClusterClusterPayload` | | ✓ | | cluster to create (either plain text or JSON format) | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ------------------------------------------------- | --------------------- | --------------------- | :---------: | ----------------------------------------------------------- | +| [200](#post-rest-api-v1-cluster-cluster-name-200) | OK | Unstructured object | | [schema](#post-rest-api-v1-cluster-cluster-name-200-schema) | +| [400](#post-rest-api-v1-cluster-cluster-name-400) | Bad Request | Bad Request | | [schema](#post-rest-api-v1-cluster-cluster-name-400-schema) | +| [401](#post-rest-api-v1-cluster-cluster-name-401) | Unauthorized | Unauthorized | | [schema](#post-rest-api-v1-cluster-cluster-name-401-schema) | +| [404](#post-rest-api-v1-cluster-cluster-name-404) | Not Found | Not Found | | [schema](#post-rest-api-v1-cluster-cluster-name-404-schema) | +| [405](#post-rest-api-v1-cluster-cluster-name-405) | Method Not Allowed | Method Not Allowed | | [schema](#post-rest-api-v1-cluster-cluster-name-405-schema) | +| [429](#post-rest-api-v1-cluster-cluster-name-429) | Too Many Requests | Too Many Requests | | [schema](#post-rest-api-v1-cluster-cluster-name-429-schema) | +| [500](#post-rest-api-v1-cluster-cluster-name-500) | Internal Server Error | Internal Server Error | | [schema](#post-rest-api-v1-cluster-cluster-name-500-schema) | + +#### Responses + +##### 200 - Unstructured object + +Status: OK + +###### Schema + +[UnstructuredUnstructured](#unstructured-unstructured) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### Upload kubeConfig file for cluster (*PostRestAPIV1ClusterConfigFile*) + +``` +POST /rest-api/v1/cluster/config/file +``` + +Uploads a KubeConfig file for cluster, with a maximum size of 2MB. + +#### Consumes + +* multipart/form-data + +#### Produces + +* text/plain + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ----------- | ---------- | ------ | --------------- | --------- | :------: | ------- | ---------------------------------- | +| description | `formData` | string | `string` | | ✓ | | cluster description | +| displayName | `formData` | string | `string` | | ✓ | | cluster display name | +| file | `formData` | file | `io.ReadCloser` | | ✓ | | Upload file with field name 'file' | +| name | `formData` | string | `string` | | ✓ | | cluster name | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ------------------------------------------------ | --------------------- | --------------------------------------------------------- | :---------: | ---------------------------------------------------------- | +| [200](#post-rest-api-v1-cluster-config-file-200) | OK | Returns the content of the uploaded KubeConfig file. | | [schema](#post-rest-api-v1-cluster-config-file-200-schema) | +| [400](#post-rest-api-v1-cluster-config-file-400) | Bad Request | The uploaded file is too large or the request is invalid. | | [schema](#post-rest-api-v1-cluster-config-file-400-schema) | +| [500](#post-rest-api-v1-cluster-config-file-500) | Internal Server Error | Internal server error. | | [schema](#post-rest-api-v1-cluster-config-file-500-schema) | + +#### Responses + +##### 200 - Returns the content of the uploaded KubeConfig file. + +Status: OK + +###### Schema + +[ClusterUploadData](#cluster-upload-data) + +##### 400 - The uploaded file is too large or the request is invalid. + +Status: Bad Request + +###### Schema + +##### 500 - Internal server error. + +Status: Internal Server Error + +###### Schema + +### Validate KubeConfig (*PostRestAPIV1ClusterConfigValidate*) + +``` +POST /rest-api/v1/cluster/config/validate +``` + +Validates the provided KubeConfig using cluster manager methods. + +#### Consumes + +* application/json +* text/plain + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ------- | ------ | --------------------------------------------------- | ------------------------------- | --------- | :------: | ------- | ------------------------------ | +| request | `body` | [ClusterValidatePayload](#cluster-validate-payload) | `models.ClusterValidatePayload` | | ✓ | | KubeConfig payload to validate | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ---------------------------------------------------- | --------------------- | ---------------------------------- | :---------: | -------------------------------------------------------------- | +| [200](#post-rest-api-v1-cluster-config-validate-200) | OK | Verification passed server version | | [schema](#post-rest-api-v1-cluster-config-validate-200-schema) | +| [400](#post-rest-api-v1-cluster-config-validate-400) | Bad Request | Bad Request | | [schema](#post-rest-api-v1-cluster-config-validate-400-schema) | +| [401](#post-rest-api-v1-cluster-config-validate-401) | Unauthorized | Unauthorized | | [schema](#post-rest-api-v1-cluster-config-validate-401-schema) | +| [404](#post-rest-api-v1-cluster-config-validate-404) | Not Found | Not Found | | [schema](#post-rest-api-v1-cluster-config-validate-404-schema) | +| [429](#post-rest-api-v1-cluster-config-validate-429) | Too Many Requests | Too Many Requests | | [schema](#post-rest-api-v1-cluster-config-validate-429-schema) | +| [500](#post-rest-api-v1-cluster-config-validate-500) | Internal Server Error | Internal Server Error | | [schema](#post-rest-api-v1-cluster-config-validate-500-schema) | + +#### Responses + +##### 200 - Verification passed server version + +Status: OK + +###### Schema + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### Create creates a ResourceGroupRule. (*PostRestAPIV1ResourceGroupRule*) + +``` +POST /rest-api/v1/resource-group-rule +``` + +This endpoint creates a new ResourceGroupRule using the payload. + +#### Consumes + +* application/json +* text/plain + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ------- | ------ | ------------------------------------------------------------------------------------------- | -------------------------------------------------- | --------- | :------: | ------- | -------------------------------------------------------------- | +| request | `body` | [ResourcegroupruleResourceGroupRulePayload](#resourcegrouprule-resource-group-rule-payload) | `models.ResourcegroupruleResourceGroupRulePayload` | | ✓ | | resourceGroupRule to create (either plain text or JSON format) | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ------------------------------------------------ | --------------------- | --------------------- | :---------: | ---------------------------------------------------------- | +| [200](#post-rest-api-v1-resource-group-rule-200) | OK | Unstructured object | | [schema](#post-rest-api-v1-resource-group-rule-200-schema) | +| [400](#post-rest-api-v1-resource-group-rule-400) | Bad Request | Bad Request | | [schema](#post-rest-api-v1-resource-group-rule-400-schema) | +| [401](#post-rest-api-v1-resource-group-rule-401) | Unauthorized | Unauthorized | | [schema](#post-rest-api-v1-resource-group-rule-401-schema) | +| [404](#post-rest-api-v1-resource-group-rule-404) | Not Found | Not Found | | [schema](#post-rest-api-v1-resource-group-rule-404-schema) | +| [405](#post-rest-api-v1-resource-group-rule-405) | Method Not Allowed | Method Not Allowed | | [schema](#post-rest-api-v1-resource-group-rule-405-schema) | +| [429](#post-rest-api-v1-resource-group-rule-429) | Too Many Requests | Too Many Requests | | [schema](#post-rest-api-v1-resource-group-rule-429-schema) | +| [500](#post-rest-api-v1-resource-group-rule-500) | Internal Server Error | Internal Server Error | | [schema](#post-rest-api-v1-resource-group-rule-500-schema) | + +#### Responses + +##### 200 - Unstructured object + +Status: OK + +###### Schema + +[UnstructuredUnstructured](#unstructured-unstructured) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### Update updates the cluster metadata by name. (*PutRestAPIV1ClusterClusterName*) + +``` +PUT /rest-api/v1/cluster/{clusterName} +``` + +This endpoint updates the display name and description of an existing cluster resource. + +#### Consumes + +* application/json +* text/plain + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ----------- | ------ | ------------------------------------------------- | ------------------------------ | --------- | :------: | ------- | ---------------------------------------------------- | +| clusterName | `path` | string | `string` | | ✓ | | The name of the cluster | +| request | `body` | [ClusterClusterPayload](#cluster-cluster-payload) | `models.ClusterClusterPayload` | | ✓ | | cluster to update (either plain text or JSON format) | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ------------------------------------------------ | --------------------- | --------------------- | :---------: | ---------------------------------------------------------- | +| [200](#put-rest-api-v1-cluster-cluster-name-200) | OK | Unstructured object | | [schema](#put-rest-api-v1-cluster-cluster-name-200-schema) | +| [400](#put-rest-api-v1-cluster-cluster-name-400) | Bad Request | Bad Request | | [schema](#put-rest-api-v1-cluster-cluster-name-400-schema) | +| [401](#put-rest-api-v1-cluster-cluster-name-401) | Unauthorized | Unauthorized | | [schema](#put-rest-api-v1-cluster-cluster-name-401-schema) | +| [404](#put-rest-api-v1-cluster-cluster-name-404) | Not Found | Not Found | | [schema](#put-rest-api-v1-cluster-cluster-name-404-schema) | +| [405](#put-rest-api-v1-cluster-cluster-name-405) | Method Not Allowed | Method Not Allowed | | [schema](#put-rest-api-v1-cluster-cluster-name-405-schema) | +| [429](#put-rest-api-v1-cluster-cluster-name-429) | Too Many Requests | Too Many Requests | | [schema](#put-rest-api-v1-cluster-cluster-name-429-schema) | +| [500](#put-rest-api-v1-cluster-cluster-name-500) | Internal Server Error | Internal Server Error | | [schema](#put-rest-api-v1-cluster-cluster-name-500-schema) | + +#### Responses + +##### 200 - Unstructured object + +Status: OK + +###### Schema + +[UnstructuredUnstructured](#unstructured-unstructured) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### Update updates the ResourceGroupRule metadata by name. (*PutRestAPIV1ResourceGroupRule*) + +``` +PUT /rest-api/v1/resource-group-rule +``` + +This endpoint updates the display name and description of an existing ResourceGroupRule. + +#### Consumes + +* application/json +* text/plain + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ------- | ------ | ------------------------------------------------------------------------------------------- | -------------------------------------------------- | --------- | :------: | ------- | -------------------------------------------------------------- | +| request | `body` | [ResourcegroupruleResourceGroupRulePayload](#resourcegrouprule-resource-group-rule-payload) | `models.ResourcegroupruleResourceGroupRulePayload` | | ✓ | | resourceGroupRule to update (either plain text or JSON format) | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ----------------------------------------------- | --------------------- | --------------------- | :---------: | --------------------------------------------------------- | +| [200](#put-rest-api-v1-resource-group-rule-200) | OK | Unstructured object | | [schema](#put-rest-api-v1-resource-group-rule-200-schema) | +| [400](#put-rest-api-v1-resource-group-rule-400) | Bad Request | Bad Request | | [schema](#put-rest-api-v1-resource-group-rule-400-schema) | +| [401](#put-rest-api-v1-resource-group-rule-401) | Unauthorized | Unauthorized | | [schema](#put-rest-api-v1-resource-group-rule-401-schema) | +| [404](#put-rest-api-v1-resource-group-rule-404) | Not Found | Not Found | | [schema](#put-rest-api-v1-resource-group-rule-404-schema) | +| [405](#put-rest-api-v1-resource-group-rule-405) | Method Not Allowed | Method Not Allowed | | [schema](#put-rest-api-v1-resource-group-rule-405-schema) | +| [429](#put-rest-api-v1-resource-group-rule-429) | Too Many Requests | Too Many Requests | | [schema](#put-rest-api-v1-resource-group-rule-429-schema) | +| [500](#put-rest-api-v1-resource-group-rule-500) | Internal Server Error | Internal Server Error | | [schema](#put-rest-api-v1-resource-group-rule-500-schema) | + +#### Responses + +##### 200 - Unstructured object + +Status: OK + +###### Schema + +[UnstructuredUnstructured](#unstructured-unstructured) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +## Models + +### cluster.ClusterPayload + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| ----------- | ------ | -------- | :------: | ------- | --------------------------------------------------------------- | ------- | +| description | string | `string` | | | ClusterDescription is the description of cluster to be created | | +| displayName | string | `string` | | | ClusterDisplayName is the display name of cluster to be created | | +| kubeconfig | string | `string` | | | ClusterKubeConfig is the kubeconfig of cluster to be created | | + +### cluster.UploadData + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| ----------------------- | ------- | -------- | :------: | ------- | ----------- | ------- | +| content | string | `string` | | | | | +| fileName | string | `string` | | | | | +| fileSize | integer | `int64` | | | | | +| sanitizedClusterContent | string | `string` | | | | | + +### cluster.ValidatePayload + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| ---------- | ------ | -------- | :------: | ------- | ----------- | ------- | +| kubeConfig | string | `string` | | | | | + +### entity.ResourceGroup + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| ----------- | ------------- | ------------------- | :------: | ------- | ----------- | ------- | +| annotations | map of string | `map[string]string` | | | | | +| apiVersion | string | `string` | | | | | +| cluster | string | `string` | | | | | +| kind | string | `string` | | | | | +| labels | map of string | `map[string]string` | | | | | +| name | string | `string` | | | | | +| namespace | string | `string` | | | | | + +### insight.ResourceSummary + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| ----------------- | --------------------------------------------- | --------------------- | :------: | ------- | ----------- | ------- | +| creationTimestamp | string | `string` | | | | | +| resource | [EntityResourceGroup](#entity-resource-group) | `EntityResourceGroup` | | | | | +| resourceVersion | string | `string` | | | | | +| uid | string | `string` | | | | | + +### insight.ResourceTopology + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| ------------- | --------------------------------------------- | --------------------- | :------: | ------- | ----------- | ------- | +| children | []string | `[]string` | | | | | +| parents | []string | `[]string` | | | | | +| resourceGroup | [EntityResourceGroup](#entity-resource-group) | `EntityResourceGroup` | | | | | + +### insight.ScoreData + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| ------------------------------------------------------------------------- | -------------- | ------------------ | :------: | ------- | ----------------------------------------------------------------------- | ------- | +| issuesTotal | integer | `int64` | | | IssuesTotal is the total count of all issues found during the audit. | | +| This count can be used to understand the overall number of problems | | | | | | | +| that need to be addressed. | | | | | | | +| resourceTotal | integer | `int64` | | | ResourceTotal is the count of unique resources audited during the scan. | | +| score | number | `float64` | | | Score represents the calculated score of the audited manifest based on | | +| the number and severity of issues. It provides a quantitative measure | | | | | | | +| of the security posture of the resources in the manifest. | | | | | | | +| severityStatistic | map of integer | `map[string]int64` | | | SeverityStatistic is a mapping of severity levels to their respective | | +| number of occurrences. It allows for a quick overview of the distribution | | | | | | | +| of issues across different severity categories. | | | | | | | + +### insight.Statistics + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| ---------------------- | ------- | ------- | :------: | ------- | ----------- | ------- | +| clusterCount | integer | `int64` | | | | | +| resourceCount | integer | `int64` | | | | | +| resourceGroupRuleCount | integer | `int64` | | | | | + +### resourcegrouprule.ResourceGroupRulePayload + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| ----------- | -------- | ---------- | :------: | ------- | ----------- | ------- | +| description | string | `string` | | | | | +| fields | []string | `[]string` | | | | | +| name | string | `string` | | | | | + +### scanner.AuditData + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| ------------- | ------------------------------------------- | ---------------------- | :------: | ------- | ----------- | ------- | +| bySeverity | map of integer | `map[string]int64` | | | | | +| issueGroups | [][ScannerIssueGroup](#scanner-issue-group) | `[]*ScannerIssueGroup` | | | | | +| issueTotal | integer | `int64` | | | | | +| resourceTotal | integer | `int64` | | | | | + +### scanner.Issue + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| -------- | ------- | -------- | :------: | ------- | ------------------------------------------------------------------------------------- | ------- | +| message | string | `string` | | | Message provides a detailed human-readable description of the issue. | | +| scanner | string | `string` | | | Scanner is the name of the scanner that discovered the issue. | | +| severity | integer | `int64` | | | Severity indicates how critical the issue is, using the IssueSeverityLevel constants. | | +| title | string | `string` | | | Title is a brief summary of the issue. | | + +### scanner.IssueGroup + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| -------------- | ----------------------------------------------- | ------------------------ | :------: | ------- | ----------- | ------- | +| issue | [ScannerIssue](#scanner-issue) | `ScannerIssue` | | | | | +| resourceGroups | [][EntityResourceGroup](#entity-resource-group) | `[]*EntityResourceGroup` | | | | | + +### unstructured.Unstructured + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| ---------------------- | ------------------------- | ------------- | :------: | ------- | -------------------------------------------------------------------------------- | ------- | +| object | [interface{}](#interface) | `interface{}` | | | Object is a JSON compatible map with string, float, int, bool, []interface{}, or | | +| map[string]interface{} | | | | | | | +| children. | | | | | | | diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/5-references/3-search-methods.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/5-references/3-search-methods.md new file mode 100644 index 00000000..e1e63903 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/5-references/3-search-methods.md @@ -0,0 +1,109 @@ +--- +title: Search Methods +--- +Karpor is an open-source project that offers robust capabilities for searching resources across multiple clusters. This document outlines the two main search methods supported by Karpor: DSL (Domain Specific Language) and SQL (Structured Query Language), and explains how to utilize them for resource searches. + +## Keywords + +Karpor facilitates resource searches using two methods: DSL and SQL. Both methodologies leverage the following keywords for resource discovery: + +- cluster +- apiVersion +- kind +- namespace +- name +- creationTimestamp +- deletionTimestamp +- ownerReferences +- resourceVersion +- labels.`key` +- annotations.`key` +- content + +## SQL + +Karpor offers a SQL-like approach for querying Kubernetes resources, enabling users to employ SQL syntax for their searches. Below are examples illustrating the use of SQL syntax for various search scenarios: + +**Query resources of the Namespace kind** + +```sql +select * from resources where kind='Namespace' +``` + +**Query resources where the labels contain the key 'key1' with value 'value1'** + +```sql +select * from resources where labels.key1='value1' +``` + +**Query resources where the annotations contain the key 'key1' with value 'value1'** + +```sql +select * from resources where annotations.key1='value1' +``` + +**Query resources that are not of the Pod kind** + +```sql +select * from resources where kind!='Pod' +``` + +**Query resources of the Pod kind within a specific cluster** + +```sql +select * from resources where cluster='demo' and kind='Pod' +``` + +**Query resources of kind within a specified list** + +```sql +select * from resources where kind in ('pod','service') +``` + +**Query resources of kinds not within a specified list** + +```sql +select * from resources where kind not in ('pod','service') +``` + +**Query resources where the namespace starts with appl (where % represents any number of characters)** + +```sql +select * from resources where namespace like 'appl%' +``` + +**Query resources where the namespace contains banan (where \_ represents any single character)** + +```sql +select * from resources where namespace like 'banan_' +``` + +**Query resources where the namespace does not start with appl** + +```sql +select * from resources where namespace not like 'appl%' +``` + +**Query resources where the namespace does not contain banan** + +```sql +select * from resources where namespace notlike 'banan_' +``` + +**Query resources of kind Deployment and created before January 1, 2024, at 18:00:00** + +```sql +select * from resources where kind='Deployment' and creationTimestamp < '2024-01-01T18:00:00Z' +``` + +**Query resources of kind Service and order by creation timestamp in descending order** + +```sql +select * from resources where kind='Service' order by creationTimestamp desc +``` + +**Query resources whose content contains apple** + +```sql +select * from resources where contains(content, 'apple') +``` diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/5-references/_category_.json b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/5-references/_category_.json new file mode 100644 index 00000000..1fd07096 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/5-references/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "References" +} diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/6-roadmap/README.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/6-roadmap/README.md new file mode 100644 index 00000000..bd58b0bc --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.4/6-roadmap/README.md @@ -0,0 +1,18 @@ +--- +title: 路线图 +--- +Karpor 是一个新兴的开源项目,我们致力于将其打造成一个小而美/厂商中立/开发者友好/社区驱动的开源项目🚀。未来,我们将重点放在以下几个领域: + +- 提升 Karpor 的**可用性**,降低入门门槛,确保其足够“用户友好”。 +- 加强 Karpor 的**可靠性**,确保其在生产环境中可信赖。 +- 深化与更多社区工具的**生态系统整合**,以确保开放性。 +- 探索 **AI + Karpor**,创造更多可能性。 +- 拥抱开源社区:我们热爱**开源精神**,如果你对开源感兴趣,那么从这里开始! +- ...... + +Karpor 遵循 [发布流程与节奏指南](../4-developer-guide/2-conventions/1-release-process.md),但行动可能不会严格遵守路线图。我们可能会根据社区会议的反馈和 [GitHub 问题](https://github.com/KusionStack/karpor/issues) 调整里程碑,期望所有社区成员加入讨论。关于最终决策,请参考 [GitHub 里程碑](https://github.com/KusionStack/karpor/milestones)。 + +以下是详细的路线图,我们将持续更新 ⬇️ + +- **2024 路线图**: [https://github.com/KusionStack/karpor/issues/273](https://github.com/KusionStack/karpor/issues/273) + diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5.json b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5.json new file mode 100644 index 00000000..5d30926e --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5.json @@ -0,0 +1,46 @@ +{ + "version.label": { + "message": "v0.5", + "description": "The label for version v0.5" + }, + "sidebar.karpor.category.Getting Started": { + "message": "入门", + "description": "The label for category Getting Started in sidebar karpor" + }, + "sidebar.karpor.category.Concepts": { + "message": "概念", + "description": "The label for category Concepts in sidebar karpor" + }, + "sidebar.karpor.category.User Guide": { + "message": "用户手册", + "description": "The label for category User Guide in sidebar karpor" + }, + "sidebar.karpor.category.How to Insight": { + "message": "如何洞察", + "description": "The label for category How to Insight in sidebar karpor" + }, + "sidebar.karpor.category.Best Production Practices": { + "message": "生产最佳实践", + "description": "The label for category Best Production Practices in sidebar karpor" + }, + "sidebar.karpor.category.Developer Guide": { + "message": "开发者手册", + "description": "The label for category Developer Guide in sidebar karpor" + }, + "sidebar.karpor.category.Contribution Guide": { + "message": "贡献指南", + "description": "The label for category Contribution Guide in sidebar karpor" + }, + "sidebar.karpor.category.Conventions": { + "message": "规约", + "description": "The label for category Conventions in sidebar karpor" + }, + "sidebar.karpor.category.References": { + "message": "参考手册", + "description": "The label for category References in sidebar karpor" + }, + "sidebar.karpor.category.CLI Commands": { + "message": "CLI Commands", + "description": "The label for category CLI Commands in sidebar karpor" + } +} \ No newline at end of file diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/1-getting-started/1-overview.mdx b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/1-getting-started/1-overview.mdx new file mode 100644 index 00000000..665ed4f3 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/1-getting-started/1-overview.mdx @@ -0,0 +1,419 @@ +--- +id: overview +title: 概览 +slug: / +--- + +import { + AiOutlineArrowRight, + AiFillCheckCircle, + AiFillCloseCircle, +} from "react-icons/ai"; +import logoImg from "@site/static/karpor/assets/logo/logo-full.png"; +import searchImg from "@site/static/karpor/assets/overview/search.png"; +import insightImg from "@site/static/karpor/assets/overview/insight.png"; +import visionImg from "@site/static/karpor/assets/overview/vision.png"; +import comingSoonImg from "@site/static/karpor/assets/misc/coming-soon.jpeg"; +import KarporButton from "@site/src/components/KarporButton"; +import GithubStar from "@site/src/components/GithubStars"; +import ReactPlayer from "react-player"; +import Typed from "typed.js"; + +export const FeatureBlock = ({ + title, + reverse = false, + imgSrc, + imgAlt, + children, +}) => { + const isMobile = typeof window !== "undefined" && window.innerWidth <= 768; + return ( + <> +

{title}

+
+
+ {imgAlt} +
+
{children}
+
+ + ); +}; + +export const Content = () => { + const karporVsOthers = [ + { + label: "用户界面", + karpor: true, + kubernetesDashboard: true, + labelDesc: "", + }, + { + label: "多集群", + karpor: true, + kubernetesDashboard: false, + labelDesc: "能够同时连接到多个集群", + }, + { + label: "聚合资源视图", + karpor: true, + kubernetesDashboard: false, + labelDesc: "人类友好的资源视图", + }, + { + label: "安全合规", + karpor: true, + kubernetesDashboard: false, + labelDesc: "自动扫描风险,评估健康分", + }, + { + label: "资源关系拓扑", + karpor: true, + kubernetesDashboard: false, + labelDesc: "洞察资源的上下文关系", + }, + ]; + const h2Style = { + paddingBottom: "14px", + borderBottom: "2px solid #f1f1f1", + fontSize: 28, + }; + const flexDirectionStyle = { + display: "flex", + flexDirection: "column", + alignItems: "center", + }; + // Setup typed animation + const el = React.useRef(null); + React.useEffect(() => { + const typed = new Typed(el.current, { + strings: [ + "帮助开发者快速定位资源", + "帮助管理员深入洞察集群", + "帮助平台和多集群建立连接", + ], + typeSpeed: 40, + backDelay: 1500, + loop: true, + }); + return () => { + // Destroy Typed instance during cleanup to stop animation + typed.destroy(); + }; + }, []); + return ( + <> +
+
+ +
+
+
+ +
+
+ +
+
+
+ Intelligence for Kubernetes ✨ +
+
+ +
+
+
+
+
+

📖 Karpor 是什么?

+
+ Karpor 是智能化的 Kubernetes 平台,它为 Kubernetes + 带来了高级的 🔍 搜索、💡 洞察和 ✨ AI 功能,本质上是一个 + Kubernetes 可视化工具。通过 + Karpor,您可以在任何云平台上获得对 Kubernetes + 集群的关键可见性。 +
+
+ 我们立志成为一个 + + 小而美、厂商中立、开发者友好、社区驱动 + + 的开源项目! 🚀 +
+
+
+
+ +
+
+
+
+

💡 为什么选择 Karpor?

+
+ +
+ ⚡️ 自动同步 +
+ 自动同步您在多云平台管理的任何集群中的资源 +
+
+ 🔍 强大灵活的查询 +
+ 以快速简单的方式有效地检索和定位跨集群的资源 +
+
+
+ +
+ 🔒 安全合规 +
+ 了解您在多个集群和合规标准中的合规性状态 +
+
+ 📊 资源拓扑 +
+ 提供包含资源运行上下文信息的关系拓扑和逻辑视图 +
+
+ 📉 成本优化 +
+ 即将推出 +
+
+
+ +
+ 💬 自然语言操作 +
+ 使用自然语言与 Kubernetes 交互,实现更直观的操作 +
+
+ 📦 情境化 AI 响应 +
+ 获得智能的、情境化的辅助,满足您的需求 +
+
+ 🤖 Kubernetes AIOps +
+ 利用 AI 驱动的洞察,自动化和优化 Kubernetes 管理 +
+
+
+
+
+
+
+

🌈 Our Vision

+
+ 现如今,Kubernetes + 生态系统日益复杂是一个不可否认的趋势,这一趋势越来越难以驾驭。这种复杂性不仅增加了运维的难度,也降低了用户采纳新技术的速度,从而限制了他们充分利用 + Kubernetes 的潜力。 +
+
+ 我们希望 Karpor 围绕着 🔍 搜索、📊 洞察和✨AI,击穿 + Kubernetes 愈演愈烈的复杂性,达成以下价值主张: +
+
+
+ +
+
+
+

🙌 Karpor vs. Kubernetes Dashboard

+
+ + {karporVsOthers?.map((item) => { + return ( +
+
+
{item?.label}
+ {item?.labelDesc && ( +
{item?.labelDesc}
+ )} +
+
+ {item?.karpor ? ( + + ) : ( + + )} +
+
+ {item?.kubernetesDashboard ? ( + + ) : ( + + )} +
+
+ ); + })} +
+

🎖️ 项目贡献者

+
+

感谢这些了不起的人! 🍻

+

+ 查看{" "} + 贡献指南, + 欢迎加入我们! 👇 +

+ +
+

👉 下一步

+
+ +
+ + ); +}; + + + + diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/1-getting-started/2-installation.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/1-getting-started/2-installation.md new file mode 100644 index 00000000..2de5eada --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/1-getting-started/2-installation.md @@ -0,0 +1,170 @@ +--- +title: 安装 +--- + +## 使用 Helm 安装 + +如果您拥有 Kubernetes 集群,Helm 是推荐的安装方法。 + +以下教程将指导您使用 Helm 安装 Karpor,这将在命名空间 `karpor` 中以 `karpor-release` 为 Release 名称安装 Chart。 + +### 先决条件 + +* Helm v3+ +* Kubernetes 集群(最简单的方法是使用 `kind` 或 `minikube` 在本地部署 Kubernetes 集群) + +### 远程安装 + +首先,将 karpor chart 仓库添加到您的本地仓库。 + +```shell +helm repo add kusionstack https://kusionstack.github.io/charts +helm repo update +``` + +然后,您可以使用以下命令安装 Karpor 的最新版本。 + +```shell +helm install karpor-release kusionstack/karpor +``` + +![安装](./assets/2-installation/install.gif) + +**注意**:直接安装此 Chart 意味着它将使用 Karpor 的 [默认模板值](https://github.com/KusionStack/charts/blob/master/charts/karpor/values.yaml)。 + +如果将其部署到生产集群中,或者您想要自定义 Chart 配置,如 `resources`、`replicas`、`port` 等,您可以通过 `--set` 参数覆盖默认值。 + +Karpor Chart 的所有可配置参数都详细说明在 [这里](#chart-参数)。 + +```shell +helm install karpor-release kusionstack/karpor --set server.replicas=3 --set syncer.port=7654 +``` + +### 查看所有可用版本 + +您可以使用以下命令查看所有可安装的 Karpor Chart 版本。 + +```shell +helm repo update +helm search repo kusionstack/karpor --versions +``` + +### 升级到指定版本 + +您可以通过 `--version` 指定要升级的版本。 + +```shell +# 升级到最新版本 +helm upgrade karpor-release kusionstack/karpor + +# 升级到指定版本 +helm upgrade karpor-release kusionstack/karpor --version 1.2.3 +``` + +### 本地安装 + +如果您在生产中连接 [https://kusionstack.github.io/charts/](https://kusionstack.github.io/charts/) 有问题,您可能需要从 [这里](https://github.com/KusionStack/charts) 手动下载 Chart,并在本地使用它来安装或升级 Karpor 版本。 + +```shell +git clone https://github.com/KusionStack/charts.git +helm install karpor-release charts/karpor +helm upgrade karpor-release charts/karpor +``` + +### 卸载 + +卸载/删除命名空间 `karpor` 中的 `karpor-release` Helm Release: + +```shell +helm uninstall karpor-release +``` + +### 中国镜像代理 + +如果你在中国、并且从官方 DockerHub 上拉取镜像时遇到困难,那么你可以使用第三方的镜像代理服务: + +```shell +helm install karpor-release kusionstack/karpor --set registryProxy=docker.m.daocloud.io +``` + +**注意**: 以上只是一个样例,你可以根据需要替换 `registryProxy` 的值。 + +### Chart 参数 + +以下表格列出了 Chart 的所有可配置参数及其默认值。 + +#### 通用参数 + +| 键 | 类型 | 默认值 | 描述 | +|-----|------|---------|-------------| +| namespace | string | `"karpor"` | 部署的目标命名空间 | +| namespaceEnabled | bool | `true` | 是否生成命名空间 | +| registryProxy | string | `""` | 镜像代理地址,配置后将作为所有组件镜像的前缀。 比如,`golang:latest` 将替换为 `/golang:latest` | + +#### 全局参数 + +| 键 | 类型 | 默认值 | 描述 | +|-----|------|---------|-------------| +| global.image.imagePullPolicy | string | `"IfNotPresent"` | 应用于所有 Karpor 组件的镜像拉取策略 | + +#### Karpor Server + +Karpor Server 组件是主要的后端服务。它本身就是一个 `apiserver`,也提供 `/rest-api` 来服务 Web UI + +| 键 | 类型 | 默认值 | 描述 | +|-----|------|---------|-------------| +| server.image.repo | string | `"kusionstack/karpor"` | Karpor Server 镜像的仓库 | +| server.image.tag | string | `""` | Karpor Server 镜像的标签。如果未指定,则默认为 Chart 的 appVersion | +| server.name | string | `"karpor-server"` | Karpor Server 的组件名称 | +| server.port | int | `7443` | Karpor Server 的端口 | +| server.replicas | int | `1` | 要运行的 Karpor Server pod 的数量 | +| server.resources | object | `{"limits":{"cpu":"500m","ephemeral-storage":"10Gi","memory":"1Gi"},"requests":{"cpu":"250m","ephemeral-storage":"2Gi","memory":"256Mi"}}` | Karpor Server pod 的资源规格 | + +#### Karpor Syncer + +Karpor Syncer 组件是独立的服务,用于实时同步集群资源。 + +| 键 | 类型 | 默认值 | 描述 | +|-----|------|---------|-------------| +| syncer.image.repo | string | `"kusionstack/karpor"` | Karpor Syncer 镜像的仓库 | +| syncer.image.tag | string | `""` | Karpor Syncer 镜像的标签。如果未指定,则默认为 Chart 的 appVersion | +| syncer.name | string | `"karpor-syncer"` | karpor Syncer 的组件名称 | +| syncer.port | int | `7443` | karpor Syncer 的端口 | +| syncer.replicas | int | `1` | 要运行的 karpor Syncer pod 的数量 | +| syncer.resources | object | `{"limits":{"cpu":"500m","ephemeral-storage":"10Gi","memory":"1Gi"},"requests":{"cpu":"250m","ephemeral-storage":"2Gi","memory":"256Mi"}}` | karpor Syncer pod 的资源规格 | + +#### ElasticSearch + +ElasticSearch 组件用于存储同步的资源和用户数据。 + +| 键 | 类型 | 默认值 | 描述 | +|-----|------|---------|-------------| +| elasticsearch.image.repo | string | `"docker.elastic.co/elasticsearch/elasticsearch"` | ElasticSearch 镜像的仓库 | +| elasticsearch.image.tag | string | `"8.6.2"` | ElasticSearch 镜像的特定标签 | +| elasticsearch.name | string | `"elasticsearch"` | ElasticSearch 的组件名称 | +| elasticsearch.port | int | `9200` | ElasticSearch 的端口 | +| elasticsearch.replicas | int | `1` | 要运行的 ElasticSearch pod 的数量 | +| elasticsearch.resources | object | `{"limits":{"cpu":"2","ephemeral-storage":"10Gi","memory":"4Gi"},"requests":{"cpu":"2","ephemeral-storage":"10Gi","memory":"4Gi"}}` | karpor elasticsearch pod 的资源规格 | + +#### ETCD + +ETCD 组件是 Karpor Server 作为 `apiserver` 背后的存储。 + +| 键 | 类型 | 默认值 | 描述 | +|-----|------|---------|-------------| +| etcd.image.repo | string | `"quay.io/coreos/etcd"` | ETCD 镜像的仓库 | +| etcd.image.tag | string | `"v3.5.11"` | ETCD 镜像的标签 | +| etcd.name | string | `"etcd"` | ETCD 的组件名称 | +| etcd.port | int | `2379` | ETCD 的端口 | +| etcd.replicas | int | `1` | 要运行的 etcd pod 的数量 | +| etcd.resources | object | `{"limits":{"cpu":"500m","ephemeral-storage":"10Gi","memory":"1Gi"},"requests":{"cpu":"250m","ephemeral-storage":"2Gi","memory":"256Mi"}}` | karpor etcd pod 的资源规格 | + +#### Job + +这是一个一次性 Kubernetes Job,用于生成根证书和一些前置工作。Karpor Server 和 Karpor Syncer 都需要依赖它完成才能正常启动。 + +| 键 | 类型 | 默认值 | 描述 | +|-----|------|---------|-------------| +| job.image.repo | string | `"kusionstack/karpor"` | Job 镜像的仓库 | +| job.image.tag | string | `""` | Karpor 镜像的标签。如果未指定,则默认为 Chart 的 appVersion | + diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/1-getting-started/3-quick-start.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/1-getting-started/3-quick-start.md new file mode 100644 index 00000000..6d1d3afe --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/1-getting-started/3-quick-start.md @@ -0,0 +1,143 @@ +--- +title: 快速开始 +--- +## 前提条件 + +* 确保已安装 [kubectl](https://kubernetes.io/docs/tasks/tools/)。 +* 确保已安装 [helm](https://helm.sh/docs/intro/install/)。 +* 如果你没有现成的集群,你仍然需要一个 [kind](https://kind.sigs.k8s.io/docs/user/quick-start/#installation/)。 + +## 创建集群(可选) + +首先,如果你没有现成的集群,可以使用 `kind` 工具在本地环境中创建一个 Kubernetes 集群。按照以下步骤操作: + +1. 创建集群。你可以使用以下命令创建名为 `demo-cluster` 的集群: + ```shell + kind create cluster --name demo-cluster + ``` + + 这将在你的本地 Docker 环境中创建一个新的 Kubernetes 集群。稍等片刻,直到集群创建完成。 +2. 通过执行以下命令验证集群是否正常运行: + ```shell + kubectl cluster-info + ``` + + 如果一切设置正确,你将看到你的 Kubernetes 集群信息。 + +## 安装 + +要安装 Karpor,请在终端中执行以下命令: + +```shell +helm repo add kusionstack https://kusionstack.github.io/charts +helm repo update +helm install karpor kusionstack/karpor +``` + +更多的安装详情,请参考 [安装文档](2-installation.md)。 + +![安装](./assets/2-installation/install.gif) + +## 访问 Karpor Web 界面 + +1. 运行以下命令来访问运行在集群中的 Karpor 服务: + ```shell + kubectl -n karpor port-forward service/karpor-server 7443:7443 + ``` + + 执行这条命令后,如果你访问本地机器上的 7443 端口,流量会被转发到 Kubernetes 集群中 karpor-server 服务的 7443 端口。 +2. 打开浏览器并输入以下 URL: + ```shell + https://127.0.0.1:7443 + ``` + +这将打开 Karpor 的 Web 界面。👇 + +![在浏览器中打开](./assets/2-installation/open-in-browser.gif) + +祝贺你!🎉 你已成功安装 Karpor。现在你可以开始使用 Karpor 探索和洞察多集群中的资源。 + +## 创建访问令牌 + +在注册集群之前,你需要创建一个访问令牌来登录 Karpor Web 界面。以下是创建令牌的简要步骤: + +1. 导出 Hub Cluster 的 KubeConfig: + +```shell +kubectl get configmap karpor-kubeconfig -n karpor -o go-template='{{.data.config}}' > $HOME/.kube/karpor-hub-cluster.kubeconfig +``` + +2. 创建 ServiceAccount 和 ClusterRoleBinding: + +```shell +export KUBECONFIG=$HOME/.kube/karpor-hub-cluster.kubeconfig +kubectl create serviceaccount karpor-admin +kubectl create clusterrolebinding karpor-admin --clusterrole=karpor-admin --serviceaccount=default:karpor-admin +``` + +3. 创建令牌: + +```shell +kubectl create token karpor-admin --duration=1000h +``` + +复制生成的令牌,稍后将用于登录 Karpor Web 界面。 + +有关创建令牌的详细说明,请参阅 [如何创建 Token](../3-user-guide/1-how-to-create-token.md) 文档。 + +## 注册集群 + +要向 Karpor 注册新集群,请按照以下步骤操作: + +1. 使用上一步创建的令牌登录 Karpor Web 界面。 +2. 打开 Karpor Web 界面中的 集群管理 部分。 +3. 点击 接入集群 按钮。 +4. 按照界面上的说明完成集群注册过程。 + +5. 在注册集群时,请注意以下事项: + + - 集群名称必须唯一且一旦创建不能更改。 + - 确保上传的集群证书中的 server 地址(目标集群地址)与 Karpor 之间有网络连通性。 + - 如果你在本地集群中部署了 Karpor,并希望注册该本地集群,则需要将集群证书中的 server 地址修改为集群内部地址 `https://kubernetes.default.svc.cluster.local:443`,以确保 Karpor 能够直接访问目标集群。 + - 如果要注册 EKS 集群,需要对 KubeConfig 进行额外的配置,包括添加 `env`、`interactiveMode` 和 `provideClusterInfo` 字段。详细步骤请参考 [多集群管理](../3-user-guide/2-multi-cluster-management.md) 文档中的 "注册 EKS 集群" 部分。 + +6. 完成上述步骤后,点击 验证并提交 按钮。 + +以下是 `注册集群` 页面的示例: + +![](/karpor/assets/cluster-mng/cluster-mng-register-new-cluster.png) + +有关注册过程的更详细解释,请参阅 [多集群管理](../3-user-guide/2-multi-cluster-management.md) 指南。 + +## 搜索资源 + +Karpor 提供了一个强大的搜索功能,允许你快速跨集群查找资源。要使用此功能: + +1. 打开 Karpor Web 界面中的 搜索 页面。 +2. 输入你要查找的资源的搜索条件。 + +以下是 `搜索` 页面的示例: + +![](/karpor/assets/search/search-auto-complete.png) +![](/karpor/assets/search/search-result.png) + +要了解更多关于搜索功能以及如何有效使用它们的说明,请查看 [搜索方法](../5-references/3-search-methods.md) 指南。 + +## 资源洞察 + +通过点击搜索结果,你可以进入到资源的**洞察**页面,在这里你可以查看资源风险报告、健康分、资源关系拓扑图等经过我们提炼的信息。 + +以下是 `洞察` 页面的示例: + +![](/karpor/assets/insight/insight-home.png) +![](/karpor/assets/insight/insight-single-issue.png) +![](/karpor/assets/insight/insight-topology.png) + +## 结论 + +请注意,本指南仅提供 Karpor 的快速入门,你可能需要参考其他文档和资源来深入地了解每个功能。 + +## 下一步 + +- 了解 Karpor 的 [架构](../concepts/architecture) 和 [术语表](../concepts/glossary)。 +- 查看 [用户指南](../user-guide/multi-cluster-management) 以了解 Karpor 的更多功能。 diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/1-getting-started/_category_.json b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/1-getting-started/_category_.json new file mode 100644 index 00000000..41f4c00e --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/1-getting-started/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Getting Started" +} diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/1-getting-started/assets/2-installation/install.gif b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/1-getting-started/assets/2-installation/install.gif new file mode 100644 index 00000000..68889793 Binary files /dev/null and b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/1-getting-started/assets/2-installation/install.gif differ diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/1-getting-started/assets/2-installation/open-in-browser.gif b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/1-getting-started/assets/2-installation/open-in-browser.gif new file mode 100644 index 00000000..00adfb18 Binary files /dev/null and b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/1-getting-started/assets/2-installation/open-in-browser.gif differ diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/2-concepts/1-architecture.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/2-concepts/1-architecture.md new file mode 100644 index 00000000..018f5f3f --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/2-concepts/1-architecture.md @@ -0,0 +1,24 @@ +--- +title: 架构 +--- +![](assets/1-architecture/architecture.png) + +## 组件 + +- `Dashboard`:Karpor 的 Web UI 界面。 +- `Server`:Karpor 的核心后端服务。 +- `Syncer`:用于实时同步集群资源的独立服务。 +- `Storage`:用于存储已同步的资源和用户数据的存储后端。 + +## Karpor 的工作原理 + +1. 安装后,用户可以将感兴趣的集群注册到 Karpor 中。 +2. Syncer 组件会自动将已注册集群中的资源实时同步到 Storage 中,同时会确保资源的实时变化也会自动同步到 Storage 中。 +3. 当用户需要查找特定资源时,只需在 Dashboard 的搜索框中输入查询语句。Dashboard 会与 Server 的搜索接口交互,Server 内的搜索模块将解析这些语句,并在 Storage 中查找相应的资源,然后将搜索结果返回给 Dashboard。 +4. 点击搜索结果后,用户将被引导至资源洞察页面。Dashboard 调用 Server 的洞察接口,其中 Server 的洞察模块对资源进行静态扫描,生成问题报告,并定位其相关资源,以绘制包含所有父资源和子资源的资源拓扑图。 +5. 洞察页面同样适用于资源组,比如洞察特定 Group-Version-Kind 的资源组、单个命名空间,或是用户自定义的资源组。 + +## 下一步 + +- 学习 Karpor 的 [术语表](../concepts/glossary)。 +- 查看 [用户指南](../user-guide/multi-cluster-management) 以了解更多关于你能够通过 Karpor 实现的内容。 diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/2-concepts/3-glossary.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/2-concepts/3-glossary.md new file mode 100644 index 00000000..b63a52bd --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/2-concepts/3-glossary.md @@ -0,0 +1,56 @@ +--- +title: 术语表 +--- +## 集群 + +等同于 `Kubernetes` 中的集群概念,例如名为 `democluster` 的集群。 + +`Karpor` 可以管理多个集群,包括集群注册、证书轮换、生成和查看洞察,以及通过 Dashboard 进行的其他操作。它还支持使用 `Karpor` 发放的统一证书,通过 `kubectl` 和 `kubectx` 等命令行工具访问任何被管理的集群。 + +更多细节,请参考最佳实践:[告别集群证书切换,让你“一卡通行”](../3-user-guide/5-best-production-practices/1-one-pass-with-proxy.md)。 + +## Hub Cluster + +管理其他集群的集群。由于 Karpor 本身也是一个 Kubernetes Apiserver,在这个特殊的集群中我们注册了一些自定义资源用于管理集群元数据、资源回流策略等,这个特殊的集群我们叫它 Hub Cluster,和托管的用户集群区分开。 + +## Managed Cluster + +泛指被 Hub Cluster 管理的集群,一般是托管在 Karpor 中的用户集群。 + +## 资源 + +等同于 `Kubernetes` 中的资源概念,如名为 `mockDeployment` 的 `Deployment`。 + +`Karpor` 对其管理集群中的资源进行实时同步、搜索和洞察。资源是 `Karpor` 里搜索和洞察的最小粒度对象。 + +## 资源组 + +**资源组是一种逻辑上的组织结构**,用于将相关的 `Kubernetes` 资源组合起来,以便于更直观的查看、搜索和洞察。例如,可以创建一个名为 `mockapp` 的 `Application` 资源组,其中包括一个 `Namespace`、一个 `Deployment` 和多个具有特定标签(如 `app.kubernetes.io/name: mockapp`)的 `Pods`。 + +## 资源组规则 + +**资源组规则是一套规则**,将特定资源分组到适当的资源组中。这些规则旨在基于 `annotations`、`labels`、`namespace` 等属性,将资源组织成逻辑单元。例如,要定义一个应用程序资源组规则,可以指定 `annotations` 为 `app.kubernetes.io/name` 作为分组条件。 +`Karpor` 预设了一个资源组规则 `Namespace` 以及自定义资源组规则。 + +![](assets/3-glossary/image-20240326171327110.png) + +## 拓扑 + +在 `Karpor` 中,拓扑是指**给定资源组内相关资源之间的关系和依赖**。利用可视化的拓扑图可以更容易地查看和理解资源组的内部结构,这对于故障排查和定位问题很有帮助。 + +## 审计 + +审计是指**对给定资源组内的所有资源执行合规性扫描**。其目的是帮助用户发现潜在风险。当前系统内置使用的扫描工具和规则,但我们将来会支持自定义方式进行扫描。 + +## 问题 + +**审计的输出被称为问题**。如果被扫描对象没有问题,则审计结果将为空。否则,所有识别到的风险将根据其风险等级进行分类并显示,包括每个风险的描述、相关资源等,用来指导用户解决问题,确保集群资源的安全和合规。 + +## 健康分 + +评分用于反映资源组或资源的**整体健康状况**,提醒用户及时调整和采取措施。健康评分是基于资源组的审计结果计算得出。影响评分的因素包括:**风险等级**、**风险数量**和**资源总数**。 + +## 下一步 + +- 学习 Karpor 的 [架构](../concepts/architecture)。 +- 查看 [用户指南](../user-guide/multi-cluster-management),了解更多有关你可以通过 Karpor 实现的内容。 diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/2-concepts/_category_.json b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/2-concepts/_category_.json new file mode 100644 index 00000000..bccddbf1 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/2-concepts/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Concepts" +} diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/2-concepts/assets/1-architecture/architecture.png b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/2-concepts/assets/1-architecture/architecture.png new file mode 100644 index 00000000..afec9346 Binary files /dev/null and b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/2-concepts/assets/1-architecture/architecture.png differ diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/2-concepts/assets/3-glossary/image-20240326171327110.png b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/2-concepts/assets/3-glossary/image-20240326171327110.png new file mode 100644 index 00000000..f5673eb8 Binary files /dev/null and b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/2-concepts/assets/3-glossary/image-20240326171327110.png differ diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/3-user-guide/1-how-to-create-token.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/3-user-guide/1-how-to-create-token.md new file mode 100644 index 00000000..20557175 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/3-user-guide/1-how-to-create-token.md @@ -0,0 +1,82 @@ +--- +title: 如何创建 Token +--- +在这篇文档中,你将了解如何使用 token 访问 Karpor dashboard。 + +[Hub Cluster](../2-concepts/3-glossary.md#hub-cluster) 采用了与 Kubernetes 相同的基于角色的访问控制(RBAC)机制。这意味着,要访问 Hub Cluster,用户需要在 Hub Cluster 上创建 ClusterRole、ServiceAccount,以及相应的 ClusterRoleBinding 来将两者绑定。为了提升用户体验,我们预设了两种 ClusterRole:karpor-admin 和 karpor-guest。karpor-admin 角色拥有在面板上执行所有操作的权限,包括但不限于添加或删除集群、创建资源组等;而 karpor-guest 角色则仅限于在面板上进行查看操作。随着对 Karpor 的深入了解,用户可以根据自身需求,创建额外的 ClusterRole,实现更细致的权限管理。 + +## 导出 Hub Cluster 的 KubeConfig + +由于 Hub Cluster 需要 KubeConfig 进行验证,可以通过以下命令一键导出用于访问 Hub Cluster 的 KubeConfig。 +```shell +# 以下操作在安装 Karpor 的 Kubernetes 集群中运行 +kubectl get configmap karpor-kubeconfig -n karpor -o go-template='{{.data.config}}' > $HOME/.kube/karpor-hub-cluster.kubeconfig +``` + +**注意**:确保本地可访问 Hub Cluster KubeConfig 中的 server 地址。默认为集群内部地址 (https://karpor-server.karpor.svc:7443),本地无法直接连接。如在本地部署 Karpor,需将 karpor-server 服务转发至本地 7443 端口,并将 server 地址改为 `https://127.0.0.1:7443`。 + +你可以使用以下 sed 命令将 Hub 集群证书中的访问地址更改为本地地址: + +对于 MacOS/BSD 系统(需要在 `-i` 后添加 `''`): +```shell +sed -i '' 's/karpor-server.karpor.svc/127.0.0.1/g' $HOME/.kube/karpor-hub-cluster.kubeconfig +``` + +对于 Linux/GNU 系统(仅需要 `-i`): +```shell +sed -i 's/karpor-server.karpor.svc/127.0.0.1/g' $HOME/.kube/karpor-hub-cluster.kubeconfig +``` + +对于 Windows 系统: +请手动修改 kubeconfig 文件中的服务器地址。 + +## 将 Hub Cluster 的服务转发到本地 + +在本节中,我们假设你将 Karpor 部署在了本地集群(比如用 kind 或者 minikube 创建的集群)。 + +如上节所说,为了在本地访问 Hub Cluster,你需要将 karpor-server 的服务转发到本地。如果你使用了其他方法进行了转发,可以跳过这一步。这里使用简单的 port-forward 进行转发,打开另一个终端,运行: + +```shell +# 以下操作在安装 Karpor 的 Kubernetes 集群中运行 +kubectl -n karpor port-forward svc/karpor-server 7443:7443 +``` + +## 为你的用户创建 ServiceAccount 和 ClusterRoleBinding + +本节将指导你如何在 Hub Cluster 中创建 karpor-admin 和 karpor-guest 用户,并为它们分配相应的 ClusterRoleBinding。以下是具体的操作步骤: + +首先,指定 kubectl 连接的目标集群为 Hub Cluster: +```shell +export KUBECONFIG=$HOME/.kube/karpor-hub-cluster.kubeconfig +``` + +然后,我们将创建两个常用的身份:管理员(karpor-admin)和访客(karpor-guest)。这个过程包括创建 ServiceAccount 并将其绑定到相应的 ClusterRole: + +```shell +kubectl create serviceaccount karpor-admin +kubectl create clusterrolebinding karpor-admin --clusterrole=karpor-admin --serviceaccount=default:karpor-admin +kubectl create serviceaccount karpor-guest +kubectl create clusterrolebinding karpor-guest --clusterrole=karpor-guest --serviceaccount=default:karpor-guest +``` + +## 为你的用户创建 Token + +以下操作需在 Hub Cluster 中执行,请确保已正确设置 kubectl 连接到 Hub Cluster: +```shell +export KUBECONFIG=$HOME/.kube/karpor-hub-cluster.kubeconfig +``` + +默认情况下,token 的有效期为 1 小时。如果你需要长期使用的 token,可以在生成时指定更长的过期时间。例如: +```shell +kubectl create token karpor-admin --duration=1000h +``` + +默认参数下, token 的最长有效期为 8760h(1 年)。如果你需要修改这个最长有效期,可以在 karpor-server 的启动参数中添加 `--service-account-max-token-expiration={MAX_EXPIRATION:h/m/s}`。 + +**注意**:创建 token 需要 v1.25.0 或更高版本的 kubectl 。 + +## 开始安全地使用 Karpor + +复制刚刚生成的 token,粘贴到 Karpor dashboard 的 token 输入框中, 点击登录。 + +在安全环境下开启你的 Karpor 之旅吧! diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/3-user-guide/2-multi-cluster-management.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/3-user-guide/2-multi-cluster-management.md new file mode 100644 index 00000000..750379d9 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/3-user-guide/2-multi-cluster-management.md @@ -0,0 +1,94 @@ +--- +title: 多集群管理 +--- +多集群管理是将集群注册进 Karpor 的入口,使能在大量集群间进行搜索和洞察。 + +## 注册集群 + +1. 点击 集群管理 标签页。 +2. 点击 注册集群 按钮。 + ![](/karpor/assets/cluster-mng/cluster-mng-empty.png) +3. 添加集群名字。集群名称必须唯一且一旦创建不能更改。 +4. 上传该集群的 KubeConfig 文件(一个具有读权限的文件就足够了)。 +5. 点击 验证并提交 按钮。 + ![](/karpor/assets/cluster-mng/cluster-mng-register-new-cluster.png) +6. 一旦验证通过,集群将会被添加到 集群管理 页面。 + ![](/karpor/assets/cluster-mng/cluster-mng-register-success.png) + +**注意**:请确保上传的集群证书中的 server 地址(目标集群地址)与 Karpor 之间 的网络连通性。举例来说,如果你在本地集群中部署了 Karpor,并希望注册该本地集群,则需要将集群证书中的 server 地址修改为集群内部地址 `https://kubernetes.default.svc.cluster.local:443`,以确保 Karpor 能够直接访问目标集群。 + +### 注册 EKS 集群 + +如果你想注册 EKS 集群,那么需要对 KubeConfig 进行一些额外的操作: + +1. 导出 EKS 集群的 KubeConfig。例如,通过如下 aws 命令可以获得指定集群的 KubeConfig: + +```shell +aws eks --region update-kubeconfig --name --kubeconfig= +``` + +2. 在导出的 KubeConfig 文件中的 `users/exec` 中添加 `env`、`interactiveMode` 和 `provideClusterInfo` 字段。可以参考以下的 KubeConfig 结构: + +```yaml +apiVersion: v1 +clusters: +- cluster: + certificate-authority-data: CA + server: SERVER + name: CLUSTER +contexts: +- context: + cluster: CLUSTER + user: USER + name: CONTEXT +current-context: CONTEXT +kind: Config +preferences: {} +users: +- name: USER + user: + exec: + apiVersion: client.authentication.k8s.io/v1beta1 + args: + - --region + - ap-southeast-1 + - eks + - get-token + - --cluster-name + - mycluster3 + - --output + - json + command: aws + ### 以下字段需要补充到 KubeConfig 中 + env: + - name: AWS_ACCESS_KEY_ID + value: + - name: AWS_SECRET_ACCESS_KEY + value: + - name: AWS_DEFAULT_REGION + value: + - name: AWS_DEFAULT_OUTPUT + value: json + interactiveMode: IfAvailable + provideClusterInfo: false +``` + +3. 在 [注册集群](#%E6%B3%A8%E5%86%8C%E9%9B%86%E7%BE%A4) 中使用修改后的 KubeConfig。 + +## 编辑集群 + +编辑 按钮允许修改 显示名称描述,从而改变仪表盘中集群名称和描述的显示方式。 + +![](/karpor/assets/cluster-mng/cluster-mng-edit-cluster.png) + +## 轮换证书 + +当 KubeConfig 过期时,你可以通过点击 轮换证书 来更新证书。 +![](/karpor/assets/cluster-mng/cluster-mng-rotate-cluster-1.png) +![](/karpor/assets/cluster-mng/cluster-mng-rotate-cluster-2.png) +![](/karpor/assets/cluster-mng/cluster-mng-rotate-cluster-3.png) + +## 移除集群 + +通过 删除 按钮方便地移除已注册的集群。 +![](/karpor/assets/cluster-mng/cluster-mng-delete-cluster.png) diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/3-user-guide/3-search.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/3-user-guide/3-search.md new file mode 100644 index 00000000..55828ac6 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/3-user-guide/3-search.md @@ -0,0 +1,34 @@ +--- +title: 如何搜索 +--- +在本节中,我们将探索如何使用 Karpor 执行多集群资源搜索,本指南完全通过 Dashboard 进行。 + +我们支持三种搜索方法: + +- **通过 SQL 搜索**:使用 SQL 查询语言执行资源搜索。 +- **通过 DSL 搜索**:通过 `Karpor` 的特定领域语言(DSL)进行资源搜索。 +- **通过自然语言搜索**:使用自然语言进行资源搜索。 + +## 通过 SQL 搜索 + +Karpor 提供了一个方便的 SQL 查询功能,允许你使用熟悉的 SQL 语法搜索和过滤所有托管集群中的 Kubernetes 资源,并为多集群资源搜索提供了针对性的优化和增强。 + +SQL 是软件工程行业从业者容易获取的技能之一,理论上使得学习曲线相当低。因此,这种搜索方法是为你准备的!特别适合 Karpor 的初学者。 + +以下是使用 SQL 搜索的步骤: + +1. **进入搜索页面**:我们将首页设计为搜索的入口点,因此打开 `Karpor` 的 Web UI 立即呈现给你搜索页面。 + ![](/karpor/assets/search/search-home.png) +2. **编写 SQL 查询语句**:使用 SQL 语法编写你的查询语句,指定你希望搜索的集群名称、资源类型、条件和过滤器。此外,如果你输入关键词并按空格,搜索框将弹出带有下拉菜单的自动完成提示,建议你可以输入的下一个可能的关键词。 + ![](/karpor/assets/search/search-auto-complete.png) +3. **执行查询**:点击 `搜索` 按钮执行查询,并被发送到搜索结果页面。Karpor 将返回与 SQL 查询匹配的资源列表。 + ![](/karpor/assets/search/search-result.png) +4. **高级功能**:利用我们的内置高级 SQL 语法,如排序、全文搜索等,进一步细化你的搜索。详情请参阅:[搜索方法文档](../5-references/3-search-methods.md)。 + +## 通过 DSL 搜索 + +敬请期待。🚧 + +## 通过自然语言搜索 + +敬请期待。🚧 diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/3-user-guide/4-insight/1-inspecting-any-resource-group-and-resource.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/3-user-guide/4-insight/1-inspecting-any-resource-group-and-resource.md new file mode 100644 index 00000000..3ac1df52 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/3-user-guide/4-insight/1-inspecting-any-resource-group-and-resource.md @@ -0,0 +1,27 @@ +--- +title: 检查任何资源组和资源 +--- +在这部分内容中,我们将通过清晰的步骤和实例详细解释如何使用 Karpor 来检查任何资源组或资源。 + +如果你不熟悉相关概念,可以参考 [术语表](../../2-concepts/3-glossary.md) 章节。 + +## 检查具体资源 + +1. 搜索你感兴趣的资源: + ![](/karpor/assets/search/search-home.png) +2. 在搜索结果页,所有通过条件筛选的资源将会被列出: + ![](/karpor/assets/search/search-result.png) +3. 点击任意资源名称,即可跳转到该资源的洞察页面: + ![](/karpor/assets/insight/insight-home.png) + +## 检查具体资源组 + +你可能已经注意到,在每一个搜索结果条目中,资源的 `Cluster`、`Kind`、`Namespace` 等标签都列了出来。请注意,这些标签是**超链接**,我们称之为 "**锚点**"。它们代表了指向特定资源组或资源的链接。通过点击这些**锚点**,你可以快速跳转到该资源组或资源的洞察页面。 + +![](/karpor/assets/search/search-result.png) + +## 在资源组 / 资源间灵活切换 + +实际上,除了前述搜索结果中的标签外,在任何页面上看到的任何资源 / 资源组名称,都可以作为**锚点**重定向,就像是时空虫洞,允许你在任何维度之间来回穿梭,直到找到你正在搜索的资源。搜索和锚点都是加速检索的手段,它们是 Karpor 作为 Kubernetes 探索器的关键特性。 + +![](/karpor/assets/insight/insight-breadcrumbs.png) diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/3-user-guide/4-insight/2-custom-resource-group.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/3-user-guide/4-insight/2-custom-resource-group.md new file mode 100644 index 00000000..e009d79d --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/3-user-guide/4-insight/2-custom-resource-group.md @@ -0,0 +1,92 @@ +--- +title: 自定义资源组 +--- +## 创建自定义资源组 + +本节将重点介绍如何在 Karpor 中创建自定义资源组。通过自定义资源组,你可以根据自己的需求和逻辑概念,在 Karpor 中灵活管理和组织资源。我们将逐步指导你创建和定义自定义资源组,并展示如何使用这些组进行资源洞察和管理。 + +如果你不熟悉**资源组**和**资源组规则**相关概念,可以参考 [词汇表](../../2-concepts/3-glossary.md) 部分。 + +**假设**在你的组织或公司内,有一个 `应用单元` 的概念,代表**某个环境中应用的所有资源**。 + +我们在**标签中标记应用的名称和环境**。例如,以下是 `生产环境` 中 `mock-apple` 的 `应用单元`: + +```yaml +apiVersion: v1 +kind: Namespace +metadata: + labels: + app.kubernetes.io/name: mock-apple + name: mock-apple +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/environment: prod + app.kubernetes.io/name: mock-apple +spec: + replicas: 3 + selector: + matchLabels: + app.kubernetes.io/environment: prod + app.kubernetes.io/name: mock-apple + template: + metadata: + labels: + app.kubernetes.io/environment: prod + app.kubernetes.io/name: mock-apple + fruit: apple + spec: + containers: + - image: nginx:latest + name: mock-container + dnsPolicy: ClusterFirst + restartPolicy: Always +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/environment: prod + app.kubernetes.io/name: mock-apple + name: mock-service-apple-prod + namespace: mock-apple +spec: + ports: + - port: 80 + protocol: TCP + targetPort: 80 + selector: + app.kubernetes.io/environment: prod + app.kubernetes.io/name: mock-apple + type: ClusterIP +``` + +现在,我们将按照以下步骤创建一个名为 `应用单元` 的自定义 `资源组规则`。它将根据用户指定的规则对集群中的所有资源进行分类,并列出所有符合规则的 `资源组`。 + +1. 点击 洞察 标签进入洞察首页。 +2. 在页面底部,你将看到一个默认的资源组规则 `命名空间`,这是按命名空间分类的单一规则。 + ![](/karpor/assets/insight/insight-homepage.png) +3. 点击创建资源组按钮 +,并在弹出窗口中填入 `应用单元` 的**基本信息和分类规则**。 + ![](/karpor/assets/insight/insight-create-app-resource-group-rule.png) +4. 点击 提交 按钮,然后点击新出现的 应用单元 标签,列出所有应用单元。 + ![](/karpor/assets/insight/insight-list-app-resource-groups.png) +5. 你可以在搜索框中输入关键词,快速找到 `生产` 环境中的 `mock-apple` 应用单元。 + ![](/karpor/assets/insight/insight-search-app-resource-group.png) +6. 你可以点击资源组卡片上的 查看 按钮,跳转到相应的 `资源组洞察页面`,查看某个应用单元的所有资源、拓扑关系、合规报告等聚合信息。 +7. 如有需要,你也可以使用相同的步骤创建 `环境资源组`。 + ![](/karpor/assets/insight/insight-create-env-resource-group-rule.png) + ![](/karpor/assets/insight/insight-list-env-resource-groups.png) + +## 编辑自定义资源组 + +你可以点击自定义资源组选项卡右侧的按钮 来修改弹出窗口中的基本信息和分类规则。 + +![](/karpor/assets/insight/insight-edit-env-resource-group.png) + +## 删除自定义资源组 + +你可以点击自定义资源组标签右侧的按钮 然后在弹出窗口中点击 删除,以删除当前资源组规则。 + +![](/karpor/assets/insight/insight-delete-env-resource-group.png) diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/3-user-guide/4-insight/3-summary.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/3-user-guide/4-insight/3-summary.md new file mode 100644 index 00000000..639426ff --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/3-user-guide/4-insight/3-summary.md @@ -0,0 +1,23 @@ +--- +title: 概览 +--- +在本节中,我们将了解 Karpor 洞察页面上的 `概览卡片`,它们用于快速查看和理解当前资源组或资源的关键指标。 + +在不同的资源组下,`概览卡片` 显示的内容也可能有所不同。 + +如果你查看的是: + +1. **资源组洞察页面**: + + 1. **集群洞察页面**,概览卡片显示的是集群的**节点、Pod 数量、CPU、内存容量以及 Kubernetes 版本**。 + ![](/karpor/assets/insight/insight-summary-cluster.png) + 2. **资源种类洞察页面**,概览卡片显示的是**所属集群、GVK(Group Version Kind)信息,以及当前集群下该类型资源的数量**。 + ![](/karpor/assets/insight/insight-summary-kind.png) + 3. **命名空间洞察页面**,概览卡片显示的是**所属集群、命名空间,以及当前命名空间下最丰富的资源类型**。 + ![](/karpor/assets/insight/insight-summary-namespace.png) + 4. **自定义资源组洞察页面**,概览卡片显示的是**每个规则的关键值,以及当前资源组下的几个资源统计数据**。 + ![](/karpor/assets/insight/insight-summary-custom-resource-group.png) +2. **资源洞察页面**,概览卡片显示的是**当前资源的名称、GVK 信息、所属集群和命名空间**。 + ![](/karpor/assets/insight/insight-summary-resource.png) + +⚠️ **注意**:无论你处于哪个资源组洞察页面,概览卡片总会显示一个健康评分,该评分基于实体的风险合规状态计算得出。 diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/3-user-guide/4-insight/4-compliance-report.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/3-user-guide/4-insight/4-compliance-report.md new file mode 100644 index 00000000..68b60d87 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/3-user-guide/4-insight/4-compliance-report.md @@ -0,0 +1,16 @@ +--- +title: 合规报告 +--- +本节将介绍合规扫描功能,主要用于检测和评估当前资源或资源组中的所有资源是否符合特定的合规标准和安全政策。在本节中,你将了解如何有效利用合规扫描功能以确保集群和资源的安全与合规。 + +如果你不熟悉**合规报告**或**风险**相关概念,你可以参考 [术语表](../../2-concepts/3-glossary.md) 章节。 + +1. 按照 [检查任何资源组和资源](#%E6%A3%80%E6%9F%A5%E4%BB%BB%E4%BD%95%E8%B5%84%E6%BA%90%E7%BB%84%E5%92%8C%E8%B5%84%E6%BA%90) 的指引,导航至特定资源组 / 资源的洞察页面。 +2. 你可以看到资源的**合规报告**卡片。 + ![](/karpor/assets/insight/insight-home.png) +3. 该卡片显示了对当前资源或资源组下所有资源进行扫描时识别出的**风险**,按风险等级分类。在每个风险等级标签下,风险按发生频率从高到低排序。每个风险条目显示标题、描述、发生次数以及发现问题的扫描工具。 +4. 点击特定风险将显示一个弹出窗口,展示风险的详细信息。 + ![](/karpor/assets/insight/insight-single-issue.png) +5. 点击 查看所有风险 ,将弹出一个抽屉,列出所有风险。这里,你可以搜索、分类、分页等。 + ![](/karpor/assets/insight/insight-all-issues.png) +6. 一旦你按照其指示解决了一个风险,可以点击 重新扫描 按钮,这将触发对资源组下所有资源进行全面的合规扫描。一旦扫描完成,仪表板将显示新的结果。 diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/3-user-guide/4-insight/5-topology.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/3-user-guide/4-insight/5-topology.md new file mode 100644 index 00000000..1bbaedd5 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/3-user-guide/4-insight/5-topology.md @@ -0,0 +1,19 @@ +--- +title: 拓扑结构 +--- +## 拓扑结构 + +在本节中,我们将探索 Karpor 中的拓扑功能。拓扑视图将帮助你更直观地理解集群中各种资源之间的关系和依赖。以下是如何使用拓扑视图。 + +1. 按照 [检查任意资源组和资源](#%E6%A3%80%E6%9F%A5%E4%BB%BB%E4%BD%95%E8%B5%84%E6%BA%90%E7%BB%84%E5%92%8C%E8%B5%84%E6%BA%90) 的指引,导航至特定资源组 / 资源的洞察页面。 +2. 在页面底部,你可以看到资源拓扑图。 + ![](/karpor/assets/insight/insight-topology.png) +3. 根据当前页面情况: + 1. 资源洞察页面: + 1. 该图将展示与当前资源相关的上游和下游资源。例如,如果当前资源是一个 Deployment(部署),拓扑图将展示 Deployment 下的 ReplicaSet(副本集)以及 ReplicaSet 下的 Pods(容器组)。 + ![](/karpor/assets/insight/insight-topology-example.png) + 2. 点击资源拓扑图中的一个节点,等同于点击特定资源的锚点,这将直接导航至该资源的洞察页面。 + 2. 资源组洞察页面: + 1. 该图将直观显示当前资源组下各种资源类型的数量与关系。 + 2. 点击资源拓扑图中的一个节点,等同于点击资源类型,下方列表将刷新显示当前资源组中特定类型下的所有资源。 + ![](/karpor/assets/insight/insight-linkage.png) diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/3-user-guide/4-insight/_category_.json b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/3-user-guide/4-insight/_category_.json new file mode 100644 index 00000000..c39e5397 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/3-user-guide/4-insight/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "How to Insight" +} diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/3-user-guide/4-insight/index.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/3-user-guide/4-insight/index.md new file mode 100644 index 00000000..9cec8507 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/3-user-guide/4-insight/index.md @@ -0,0 +1,6 @@ +--- +title: 如何洞察 +--- +在本节中,我们将介绍如何使用 Karpor 对集群内的资源进行全面洞察。你可以通过多种方式访问洞察页面,并且可以轻松地在不同范围(如集群、类型、命名空间或单个资源)的洞察页面之间切换。如果你当前组织内有特定领域的逻辑范围,你甚至可以通过设置资源组规则来自定义资源组(如应用程序、环境等)。我们还提供功能以便对这些自定义资源组进行洞察。 + +本指南将完全在 Karpor 仪表板上操作。 diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/3-user-guide/5-best-production-practices/1-one-pass-with-proxy.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/3-user-guide/5-best-production-practices/1-one-pass-with-proxy.md new file mode 100644 index 00000000..44f92637 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/3-user-guide/5-best-production-practices/1-one-pass-with-proxy.md @@ -0,0 +1,42 @@ +--- +title: 告别集群证书切换,让你“一卡通行” +--- +## 挑战与需求 + +### 大规模多集群的规模挑战 + +2014 年 6 月,Kubernetes 从 Google 的内部 Borg 项目诞生,引人注目地亮相。在科技巨头的支持和一个蓬勃发展的开源社区的帮助下,它逐渐成为了容器编排领域的事实标准。随着公司开始在生产环境中部署 Kubernetes,单个 Kubernetes 集群无法再满足内部日益复杂的需求。单个集群中的节点数量超过社区推荐的限制(5,000)是很常见的,使得扩展到多集群成为一个自然的选择。 + +### 多集群访问者的基本需求 + +随着多集群的蓬勃发展,各种平台可能需要跨不同集群访问资源,需要获取每个集群的 KubeConfig。 + +随着用户和集群数量的增加,集群管理员面临着巨大的时间成本:如果有 `M` 个集群和 `N` 个用户,管理 KubeConfig 的时间复杂度将变为 `O (M*N)`。此外,用户在访问不同集群时需要切换不同的 KubeConfig,不同集群的 KubeConfig 对应的权限也各不相同,无疑增加了使用的复杂度。 + +![直接连接:用户需要维护多个 KubeConfigs](assets/1-one-pass-with-proxy/image-20240326163622363.png) + +在这种情况下,有没有一种方法能方便地访问不同集群中的资源,而无需维护大量的 KubeConfig 和管理跨集群的各种用户权限问题?此外,这种方法理想地应该是云原生的,可以通过 kubectl 和 Kubernetes 官方客户端访问,以降低过渡到使用这种方法的成本。`Karpor` 的出现就是为了解决这些问题。 + +## "一站式通行" 的理念 + +我们开发了 `Karpor`,一个开源项目。作为一个 Kubernetes 资源探索器,在搜索和洞察集群资源方面具有独特优势,它的基础多集群管理组件,具备集群证书颁发和多集群请求代理的特点,使其高度适合作为平台对多个集群的统一访问入口。该组件支持以云原生方式转发用户请求到指定集群,允许用户维护一套 KubeConfig 来访问不同的集群,使访问多集群像访问单个集群一样简单。那么,它是如何工作的呢?下面,我们介绍 `Karpor` 的架构和功能。 + +![使用多集群网关:用户只需要维护一套 KubeConfigs](assets/1-one-pass-with-proxy/image-20240326164141400.png) + +### 多集群请求路由和代理 + +`Karpor` 包含一个应用层网关,能够将任何 Kubernetes 风格的请求转发给指定的 Kubernetes 集群。`Karpor` 也是基于 Kubernetes 框架开发的,作为 kube-apiserver,可以独立运行或作为现有 kube-apiserver 的扩展。`Karpor` 支持处理两种类型的扩展资源:`Cluster` 和 `Cluster/Proxy`,前者用于存储集群信息,后者用于将用户请求转发到特定集群。用户可以通过 Kubernetes 官方 CLI(`kubectl`)或 SDK(`client-go`、`client-java` 等)进行访问。 + +`Karpor` 将所有对 `Cluster/Proxy` 子资源的访问代理到目标集群。例如,要从 `Cluster1` 集群检索 Pod 信息,用户需要向 `Karpor` 发送 `GET /apis/kusionstack.io/Cluster/cluster1/proxy/api/v1/pods` 请求。`Karpor` 将从 `Cluster/Cluster1` 资源生成一个 KubeConfig 以访问该集群,并将 `/api/v1/pods` 请求代理到 `Cluster1` 集群。 + +![使用 kubectl 和 karpor 证书访问任何管理的集群](assets/1-one-pass-with-proxy/image-20240326165247891.png) + +### 支持所有 Kubernetes 原生请求 + +`Karpor` 支持转发所有 kube-apiserver 请求。具体来说,`Karpor` 是一个应用层网关,通过 HTTP connect 协议代理 HTTP 请求。除了支持对资源的 `get`、`create`、`update` 和 `delete` 操作外,它还支持 `watch`、`log`、`exec`、`attach` 等。(由于用于 `exec` 和 `attach` 的 SPDY 协议不支持 http2,`Karpor` 在转发这些请求时会禁用 http2,切换到 http1.1 并支持 hijacker 处理)。 + +![](assets/1-one-pass-with-proxy/image-20240326165632158.png) + +## 总结 + +从上文中可以看出,利用 `Karpor` 的多集群管理组件,为用户提供了一个可控权限的 “多集群通行证”。用户不再需要关心频繁切换集群证书和新集群的接入等问题。有了这个“一证通行”,访问多个集群的成本降低了,满足了大多数用户在多集群平台上的最基本需求。 diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/3-user-guide/5-best-production-practices/_category_.json b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/3-user-guide/5-best-production-practices/_category_.json new file mode 100644 index 00000000..82dd90e3 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/3-user-guide/5-best-production-practices/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Best Production Practices" +} diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/3-user-guide/5-best-production-practices/assets/1-one-pass-with-proxy/image-20240326163622363.png b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/3-user-guide/5-best-production-practices/assets/1-one-pass-with-proxy/image-20240326163622363.png new file mode 100644 index 00000000..ab8051fe Binary files /dev/null and b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/3-user-guide/5-best-production-practices/assets/1-one-pass-with-proxy/image-20240326163622363.png differ diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/3-user-guide/5-best-production-practices/assets/1-one-pass-with-proxy/image-20240326164141400.png b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/3-user-guide/5-best-production-practices/assets/1-one-pass-with-proxy/image-20240326164141400.png new file mode 100644 index 00000000..de950079 Binary files /dev/null and b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/3-user-guide/5-best-production-practices/assets/1-one-pass-with-proxy/image-20240326164141400.png differ diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/3-user-guide/5-best-production-practices/assets/1-one-pass-with-proxy/image-20240326165247891.png b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/3-user-guide/5-best-production-practices/assets/1-one-pass-with-proxy/image-20240326165247891.png new file mode 100644 index 00000000..27fffb47 Binary files /dev/null and b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/3-user-guide/5-best-production-practices/assets/1-one-pass-with-proxy/image-20240326165247891.png differ diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/3-user-guide/5-best-production-practices/assets/1-one-pass-with-proxy/image-20240326165632158.png b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/3-user-guide/5-best-production-practices/assets/1-one-pass-with-proxy/image-20240326165632158.png new file mode 100644 index 00000000..99053c68 Binary files /dev/null and b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/3-user-guide/5-best-production-practices/assets/1-one-pass-with-proxy/image-20240326165632158.png differ diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/3-user-guide/_category_.json b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/3-user-guide/_category_.json new file mode 100644 index 00000000..8f01ba26 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/3-user-guide/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "User Guide" +} diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/4-developer-guide/1-contribution-guide/1-non-code-contribute.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/4-developer-guide/1-contribution-guide/1-non-code-contribute.md new file mode 100644 index 00000000..0c723628 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/4-developer-guide/1-contribution-guide/1-non-code-contribute.md @@ -0,0 +1,40 @@ +--- +title: 非代码贡献指南 +--- +你可以用以下任何感兴趣的方式贡献。 + +## 贡献用户示例和 Demo + +* 如果你正在使用 Karpor,最简单的贡献方式就是 [向社区表达感谢](https://github.com/KusionStack/karpor/issues/343)。 + +## 报告漏洞 + +在提交新的 issue 之前,请确保该问题没有被提交过。 + +检查 [Issue 列表](https://github.com/KusionStack/karpor/issues) 是否有类似 issue。 + +通过 [报告漏洞](https://github.com/KusionStack/karpor/issues/new?assignees=&labels=kind%2Fbug&projects=&template=bug-report.yaml) 提交漏洞报告,确保你提供了足够的信息帮助复现该漏洞。 + +遵循下面的 issue 模板并且添加额外信息来帮助我们复现该问题。 + +## 安全性 issue + +如果你确信发现了安全漏洞,请阅读我们的 [安全策略](https://github.com/KusionStack/karpor/blob/main/SECURITY.md) 获取更多细节。 + +## 提议增强特性 + +如果你有提升 Karpor 的好点子,请提交 [特性请求](https://github.com/KusionStack/karpor/issues/new?assignees=&labels=kind%2Ffeature&projects=&template=enhancement.yaml)。 + +## 回答问题 + +如果你有疑问并且在 [文档](https://www.kusionstack.io/karpor/) 中找不到答案,下一步是在 [GitHub 论坛](https://github.com/KusionStack/karpor/discussions) 中提问。 + +帮助这些用户对我们很重要,我们很需要你的帮助。你可以通过回答 [他们的问题](https://github.com/KusionStack/karpor/discussions) 来帮助其他的 Karpor 用户。 + +## 贡献文档 + +贡献文档需要一些提交 pull request 到 Github 的知识,按照下面的指南这将会非常简单。 + +* [kusionstack.io 开发者指南](https://github.com/KusionStack/kusionstack.io/blob/main/README.md) + +查看 [开源指南](https://opensource.guide/how-to-contribute/) 获取更多贡献方式。 diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/4-developer-guide/1-contribution-guide/2-code-contribute.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/4-developer-guide/1-contribution-guide/2-code-contribute.md new file mode 100644 index 00000000..018f8ec4 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/4-developer-guide/1-contribution-guide/2-code-contribute.md @@ -0,0 +1,174 @@ +--- +title: 代码贡献指南 +--- +在本代码贡献指南,你将会了解下列内容: + +- [如何在本地运行 Karpor](#%E5%A6%82%E4%BD%95%E5%9C%A8%E6%9C%AC%E5%9C%B0%E8%BF%90%E8%A1%8C-karpor) +- [如何创建拉取请求(pull request)](#%E5%88%9B%E5%BB%BA%E6%8B%89%E5%8F%96%E8%AF%B7%E6%B1%82pull-request) +- [代码审查指导规则](#%E4%BB%A3%E7%A0%81%E5%AE%A1%E6%9F%A5) +- [Pull request 格式指南](#pull-request-%E6%A0%BC%E5%BC%8F%E6%8C%87%E5%8D%97) +- [更新文档和网站](#%E6%9B%B4%E6%96%B0%E6%96%87%E6%A1%A3%E5%92%8C%E7%BD%91%E7%AB%99) + +## 如何在本地运行 Karpor + +本指南将会帮助你开始 Karpor 开发。 + +### 前提条件 + +* Golang 版本 1.19+ + +
+ 安装 Golang + +1. 从 [官方网站](https://go.dev/dl/) 安装 golang 1.19+。解压二进制文件并放置到某个位置,假设该位置是 home 目录下的 `~/go/`,下面是一个示例命令,你应当选择适合你系统的正确二进制文件。 + +``` +wget https://go.dev/dl/go1.20.2.linux-amd64.tar.gz +tar xzf go1.20.2.linux-amd64.tar.gz +``` + +如果你想在本地开发环境维护多个 golang 版本,你可以下载包并解压到某个位置,比如 `~/go/go1.19.1`,然后根据下面的命令相应地改变路径。 + +1. 为 Golang 设置环境变量 + +``` +export PATH=~/go/bin/:$PATH +export GOROOT=~/go/ +export GOPATH=~/gopath/ +``` + +如果 `gopath` 目录不存在,可以使用 `mkdir ~/gopath` 创建。这些命令将会把 go 二进制文件所在的目录添加到 `PATH` 环境变量 (让其成为 go 命令的优先选择)并且设置 `GOROOT` 环境到该 go 目录。如果将这些行添加到你的 `~/.bashrc` or `~/.zshrc` 文件,你就不用每次打开新的终端时设置这些环境变量。 + +1. (可选) 在一些地区,例如中国大陆,连接到默认的 go 仓库可能会很慢;你可以配置 GOPROXY 来加速下载过程。 + +``` +go env -w GOPROXY=https://goproxy.cn,direct +``` + +
+ +* Kubernetes 版本 v1.20+ ,且配置文件保存在 `~/.kube/config`。 +* golangci-lint 版本 v1.52.2+, 通过运行 `make lint` 可以自动安装,如果自动安装失败,你可以手动安装。 + +
+ 手动安装 golangci-lint + +你可以根据 [安装指南](https://golangci-lint.run/welcome/install)手动安装,或者使用以下命令: + +``` +cd ~/go/ && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s v1.52.2 +``` + +
+ +### 构建 + +- 克隆项目 + +```shell +git clone git@github.com:KusionStack/karpor.git +``` + +- 本地构建 + +执行 `make build-all` 将会为所有平台创建可执行文件;如果你只想为特定平台构建,执行 `make build-${PlatformName}`,例如 `make build-darwin`。查看所有可用命令,执行 `make help`。 + +### 测试 + +为了保证代码质量,编写测试代码是必不可少的,你可以在项目根目录运行以下命令执行单元测试: + +```shell +make test +``` + +如果你需要生成额外的覆盖率报告,执行: + +```shell +make cover +``` + +接下来你可以执行下列命令,来从浏览器中阅读测试覆盖率报告: + +```shell +make cover-html +``` + +## 创建拉取请求(Pull Request) + +我们很高兴你考虑为 Karpor 项目作出贡献! + +本文档将会指导你完成 [创建拉取请求](./index.md#contribute-a-pull-request) 的过程。 + +### 在你开始之前 + +我们知道你对于创建第一个 pull request 非常兴奋。在我们开始之前,请确保你的代码符合相关的 [代码规约](../2-conventions/2-code-conventions.md)。 + +### 你的第一个 Pull Request + +在提交你的 PR 之前,运行下面的命令并确保它们都成功了: + +``` +make test +make lint +``` + +如果这是你第一次向 Github 上的开源项目贡献,请确保你已经阅读了 [创建拉取请求](https://docs.github.com/zh/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/creating-a-pull-request)。 + +为了提高你的 pull request 被接受的机会,请确保你的 pull rquest 符合以下指导规则: + +- 标题和描述与实现相符。 +- pull request 中的所有 commit 都符合 [格式指南](#Formatting-guidelines)。 +- pull request 会关闭一个相关 issue。 +- pull request 包含了必要的测试,以验证预期行为。 +- 如果你的 pull request 有冲突,请将你的分支 rebase 到 main 分支。 + +如果 pull request 修复了一个漏洞: + +- pull request 的描述中必须包含 `Closes #` 或 `Fixes #`。 +- 为了避免回归问题,pull request 必须包含验证该漏洞被修复的测试。 + +## 代码审查 + +一旦你创建了一个 pull requset,下一步就是让其他人审查你的改动。代码审查对审查者和 pull request 作者都是很好的学习机会。 + +如果你觉得某个特定的人应当审查你的 pull request,你可以在描述或评论中标记他们。 +通过输入 `@` 符号和其用户名来标记用户。 + +我们建议你阅读 [如何进行代码审查](https://google.github.io/eng-practices/review/reviewer/) 来了解更多关于代码审查的知识。 + +## Pull request 格式指南 + +精心编写的 pull request 可以最大程度地缩短你的更改被接受的时间。这些指南将帮助你为 pull requset 编写条理清晰的提交消息和说明。 + +### Commit 信息格式 + +了解更多:[Commit 规约](../2-conventions/4-commit-conventions.md) + +### Pull Request 标题 + +在接受 pull request 时,Karpor 团队会将所有的 commit 合并为一个。 + +Pull request 的标题将会成为合并后的 commit 信息的描述。 + +我们仍然鼓励贡献者撰写详细的 commit 信息,因为它们将会作为 git commit 正文的一部分。 + +我们在生成发布更新日志时将会使用 pull request 的标题。因此,我们会努力使标题尽可能具有信息量。 + +确保你的 pull request 标题使用与 commit 信息相同的格式。如果不遵循该格式,我们将会在该 pull request 添加 `title-needs-formatting` 标签。 + +### 通过所有 CI 检查 + +在合并之前,所有的测试 CI 都应该通过: + +- 覆盖率不应该下降。当前,pull request 的覆盖率应当至少为 70%。 +- Karpor 使用 **CLA** 作为贡献者协议。它要求你在第一次合并 pull request 之前签署。 + +## 更新文档和网站 + +如果你的 pull request 被合并了,而且它引入了新的特性或增强,你需要更新文档并且提交 pull requset 到 [kusionstack.io](https://github.com/KusionStack/kusionstack.io) 仓库。 + +根据下面的文档了解如何编写文档: + +- [kusionstack.io 开发者指南](https://github.com/KusionStack/kusionstack.io/blob/main/README.md) + +太棒了,你已经完成了代码贡献的整个生命周期! diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/4-developer-guide/1-contribution-guide/3-roles.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/4-developer-guide/1-contribution-guide/3-roles.md new file mode 100644 index 00000000..206d98ea --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/4-developer-guide/1-contribution-guide/3-roles.md @@ -0,0 +1,36 @@ +--- +title: 角色 +--- +感谢您对本开源项目的关注和支持!本文档将阐述贡献者在项目中的角色、职责以及如何从 Contributor 升级为 Maintainer,以及 Maintainer 降级为 Contributor 的规则。我们希望通过这份文档,让每位贡献者都能清楚地了解自己的成长路径,并为项目的发展做出更大的贡献。 + +## 贡献者角色及职责 + +在本开源项目中,我们主要设有两个贡献者角色:Contributor 和 Maintainer。 +以下是对这两个角色的简要介绍: + +1. Contributor:项目的贡献者,可以是代码贡献者、文档贡献者、测试贡献者等。Contributor 为项目提供了宝贵的资源,帮助项目不断完善和发展。 +2. Maintainer:项目的维护者,负责项目的日常维护工作,包括审查和合并 PR、处理 Issue、发布版本等。Maintainer 是项目的核心成员,对项目的发展方向和决策具有重要的影响力。 + +## Contributor 升级为 Maintainer + +我们非常欢迎每位 Contributor 为项目的发展做出贡献,并鼓励 Contributor 向 Maintainer 的角色发展。 +以下是从 Contributor 升级为 Maintainer 的条件: + +1. 持续贡献:Contributor 需要在一段时间内(例如 3 个月)持续为项目贡献代码、文档或其他资源。这表明 Contributor 对项目的关注度和热情。 +2. 质量保证:Contributor 提交的代码或文档等资源需要保持较高的质量,符合项目的规范要求,并对项目产生积极的影响。 +3. 积极参与:Contributor 需要积极参与到项目的讨论和决策中来,为项目的发展提供建设性的意见和建议。 +4. 团队协作:Contributor 需要具备良好的团队协作精神,能够与其他贡献者和 Maintainer 友好沟通,共同解决问题。 +5. 责任担当:Contributor 需要具备一定的责任心,愿意承担项目维护的部分工作,包括审查 PR、处理 Issue 等。 + +当 Contributor 满足以上条件时,现有的 Maintainer 将会对其进行评估,如果达到 Maintainer 的要求,将会邀请其成为新的 Maintainer。 + +## Maintainer 降级为 Contributor + +Maintainer 在项目中承担了重要的职责,我们希望每位 Maintainer 都能够保持对项目的关注和热情。 +然而,我们也理解每个人的时间和精力是有限的,因此,当 Maintainer 无法继续履行职责时,将会降级为 Contributor: + +1. 长时间不活跃:如果 Maintainer 在一段时间内(例如 3 个月)没有参与项目的维护工作,包括审查 PR、处理 Issue 等,将被视为不活跃。 +2. 质量问题:如果 Maintainer 在项目中的工作出现严重的质量问题,导致项目的发展受到影响,将被视为不符合 Maintainer 的要求。 +3. 团队协作问题:如果 Maintainer 在与其他贡献者和 Maintainer 的协作过程中出现严重的沟通问题或团队协作问题,如不尊重他人意见、频繁产生冲突、拒绝协作等,影响到项目的正常运作和氛围,将被视为不符合 Maintainer 的要求。 +4. 违反规定:如果 Maintainer 违反了项目的规定或行为准则,包括但不限于泄露敏感信息、滥用权限等,将被视为不符合 Maintainer 的要求。 +5. 主动申请:如果 Maintainer 由于个人原因无法继续履行职责,可以主动申请降级为 Contributor。 diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/4-developer-guide/1-contribution-guide/_category_.json b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/4-developer-guide/1-contribution-guide/_category_.json new file mode 100644 index 00000000..09eab23b --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/4-developer-guide/1-contribution-guide/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Contribution Guide" +} diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/4-developer-guide/1-contribution-guide/index.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/4-developer-guide/1-contribution-guide/index.md new file mode 100644 index 00000000..57c4cca4 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/4-developer-guide/1-contribution-guide/index.md @@ -0,0 +1,117 @@ +# 贡献指南 + +贡献指南介绍了如何参与社区发展和向社区贡献。 + +为了帮助我们为所有人建立安全和积极的社区体验,我们要求所有的参与者遵守 CNCF 社区 [行为准则](https://github.com/cncf/foundation/blob/main/code-of-conduct-languages/zh.md)。 + +## 开始贡献之前 + +### 找到一个贡献点 + +有多种方式对 Karpor 贡献,包括代码和非代码贡献,我们对任何人对社区的任何方式的努力都非常感谢。 + +这里是一些示例: + +* 贡献代码仓库和文档。 +* 报告和分类 issue。 +* 在你的地区组织会议和用户群组。 +* 回答 Karpor 相关问题帮助别人。 + +并且: + +- 如果你不知道如何开始,我们准备了一份 [新手任务清单 | Community tasks 🎖︎](https://github.com/KusionStack/karpor/issues/463),或者你可以通过 issue 跟踪器过滤 [help wanted | 需要帮助](https://github.com/KusionStack/karpor/issues?q=is%3Aopen+is%3Aissue+label%3A%22help+wanted%22) 或 [good first issue | 新手任务](https://github.com/KusionStack/karpor/issues?q=is%3Aopen+is%3Aissue++label%3A%22good+first+issue%22) 标签. 你可以从任何感兴趣的 issue 开始。 +- 如果你有任何问题,欢迎 [提交 Issue](https://github.com/KusionStack/karpor/issues/new/choose) 或者 [发帖讨论](https://github.com/KusionStack/karpor/discussions/new/choose),我们会尽快回答。 + +### 如何进行非代码贡献 + +我们认为对社区存续和未来发展而言,非代码贡献和代码贡献同样重要。 + +- 参考 [非代码贡献指南](./non-code-contribute) 获取更多细节 + +### 如何进行代码贡献 + +不确定从哪里开始向 Karpor 代码库贡献?可以从浏览带有 `good first issue` 或 `help wanted` 标签的 issue 开始。 + +- [Good first issue | 新手任务](https://github.com/KusionStack/karpor/labels/good%20first%20issue) 通常很容易解决的任务。 +- [Help wantet | 需要帮助](https://github.com/KusionStack/karpor/labels/help%20wanted) 和复杂程度无关, 我们希望能够在社区解决的问题。 +- 参考 [代码贡献指南](./code-contribute) 获取更多细节。 + +学习 [代码规约](../conventions/code-conventions) 和 [测试规约](../conventions/test-conventions),并了解在写代码时要注意的地方。 + +然后阅读 [发布流程与节奏指南](../conventions/release-process),了解你的代码什么时候会发布。 + +## 贡献一个拉取请求(Pull Request) + +在打开或者认领 issue 之后,你可以通过提交一个拉取请求(Pull Request)为 karpor 进行代码或非代码贡献。这里是你应该遵循的一些步骤: + +### Fork 仓库 + +Karpor 遵循主干开发模式,也就是说,用于发布的代码维护在 main 分支。 + +那么,为了开发 Karpor,你需要从 [karpor](https://github.com/KusionStack/karpor) Fork 一个项目到你自己的工作空间,然后检出一个新的分支用于开发代码。 + +### 开发代码和非代码 + +现在你可以开始解决 issue 。为了维护 Karpor 的代码质量,提交 PR 之后,一些必要的检查会被触发。 + +开发结束之后,你需要 commit 代码然后将代码 push 到你 fork 出的仓库。由于 PR 的标题将作为 commit message,你的 PR 标题需要符合 [commit 规约](../2-conventions/4-commit-conventions.md)。 + +以下是一些简单的解释: + +PR 的标题需要按照以下结构组织: + +``` +<类型>[可选 范围]: <描述> + +[可选 正文] +``` + +要求中的类型可以帮助更好地确认这次提交的范围,基于 [Angular 指南](https://github.com/angular/angular/blob/22b96b9/CONTRIBUTING.md#-commit-message-guidelines)。 + +我们使用小写的 `<类型>`,以避免在大小写敏感的问题上浪费时间。`<类型>` 可以是以下之一: + +``` +feat: 新特性 +fix: 漏洞修复 +docs: 仅文档改动 +build: 关于构建系统和外部依赖的改动 +style: 不影响代码含义的改动(如空行、格式、缺少分号等) +refactor: 不属于漏洞修复或者增加特性的代码改动 +perf: 提升性能的代码改动 +test: 增加缺少的测试用例或者修正现有的测试用例 +chore: 构建过程或辅助工具和库(如文档生成)的修改 +``` + +### 打开一个拉取请求(Pull Request) + +[打开一个拉取请求(Pull Request)](https://github.com/KusionStack/karpor/pulls):打开一个从你 fork 的仓库的开发分支到 karpor main 分支的拉取请求(Pull Request)。你需要清楚地描述你的 PR 做了什么,并且链接到一个 issue。除此之外,PR 的标题应该按照前面提到的 commit 规约,并且长度在 5-256 个字符之间,不允许使用 `WIP` 和 `[WIP]` 前缀。 + +### 签署贡献者许可协议(Contributor License Agreement,CLA) + +如果这是你的第一个 PR ,你需要签署我们的 [CLA(贡献者许可协议)](https://github.com/KusionStack/.github/blob/main/CLA.md)。 你唯一需要做的事情的是在当前 PR 按以下格式发表评论: + +`I have read the CLA Document and I hereby sign the CLA` + +如果你的 CLA 签署失败了,可能有以下原因: + +* 评论的格式必须与上面完全一致,例如不能有额外的空格、空行等。 +* git commit 的作者和 Karpor PR 的作者必须一致。 + +### PR 检查 + +为了维持 karpor 项目的可靠性,以下检查将会自动触发: + +* 单元测试 +* Golang 代码风格检查 +* Commit 风格检查 +* PR 标题检查 +* 代码许可证检查 +* Markdown 格式检查 + +请确保你的 PR 通过这些检查。 + +## 成为社区成员 + +如果你对成为社区成员感兴趣或者想了解更多关于治理的内容,请查看 [角色](./3-roles.md) 获取更多细节。 + +在 Karpor 的世界中享受编码和协作吧! diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/4-developer-guide/2-conventions/1-release-process.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/4-developer-guide/2-conventions/1-release-process.md new file mode 100644 index 00000000..0454cc00 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/4-developer-guide/2-conventions/1-release-process.md @@ -0,0 +1,49 @@ +--- +title: 发布流程和节奏 +--- +## 发布计划 + +我们将通过 [GitHub 里程碑](https://github.com/KusionStack/karpor/milestones) 建立并持续根据发布计划。每个发布里程碑将包括两类任务: + +- Maintainer 承诺完成的任务。Maintainer 会在根据他们的时间和精力承诺下次发布要实现的特性。通常,这些任务会经过离线讨论并添加到里程碑。这些任务会被分配给计划实施和测试它们的 Maintainer。 +- Contributor 提出的额外事项,通常是不紧急的特性和优化。Maintainer 不承诺在当前 release 周期内完成,但承诺会对这些社区提交进行代码审查。 + +里程碑会清晰地描述最终要的特性和期望完成日期。这将清楚地告知终端用户下一版本的发布时间和内容。 + +除了下一次里程碑之外,我们也会维护未来几个发布里程碑的草稿。 + +## 发布标准 + +- 所有的 **官方发布** 都应该在 `main` 分支添加标签,并且携带类似 `alpha`、 `beta`、 `rc` 的可选先行版本后缀,例如,一个通常的官方发布版本可能是 `v1.2.3`、 `v1.2.3-alpha.0`。例如,如果我们想要在发布正式版本 `v1.2.3` 之前进行一些验证,我们可以先发布类似 `v1.2.3-alpha.0` 的先行版本,在验证完成之后再发布 `v1.2.3` 版本。 +- Maintainer 承诺完成特定的特性和增强,由 [GitHub 里程碑](https://github.com/KusionStack/karpor/milestones) 跟踪。 +- 我们会尽可能防止发布延期;如果一个特性无法按时完成,它将会被挪到下次发布。 +- **每月** 发布一个新版本。 + +## 发布标准流程 + +Maintainer 负责推动发布过程并遵循标准操作程序以确保发布的质量。 + +1. 为指定发布的 git commit 添加标签并推到上游;该标签需要满足[语义化版本控制](#%E8%AF%AD%E4%B9%89%E5%8C%96%E7%89%88%E6%9C%AC%E6%8E%A7%E5%88%B6)。 +2. 确保触发的 Github Action 流水线执行成功。一旦成功,将会自动触发一次 Github 发布,其中包括根据提交信息计算出的 Changelog,以及镜像和 tar.gz 文件等制品。 +3. 根据 **Github 发布** 编写清晰的发布说明,包括: + - 用户友好的发布亮点。 + - 已弃用和不兼容的更改。 + - 有关如何安装和升级的简要说明。 + +## 门控测试 + +在创建发布分支之前:我们会有一个 **一周** 的代码冻结期。在这期间,我们将避免合并任何功能 PR,只会修复错误。 + +Maintainer 会负责测试并修复那些在临近发布时间发现的紧急问题。 + +## 语义化版本控制 + +`Karpor` 采用 [语义化版本控制](https://semver.org/lang/zh-CN/) 作为版本号。 + +版本格式为:主版本号.次版本号.修订号,例如, `v1.2.3`。版本号 **递增规则** 如下: + +- 主版本号:当你做了不兼容的 API 修改。 +- 次版本号:当你做了向下兼容的功能性新增。 +- 修订号:当你做了向下兼容的问题修正。 + +**先行版本号及版本编译信息** 可以作为加到“主版本号.次版本号.修订号”的后面,作为延伸,比如 `v1.2.3-alpha.0`, `v1.2.3-beta.1`, `v1.2.3-rc.2`, 其中 `-alpha.0`, `-beta.1`, `-rc.2` 是先行版本号。 diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/4-developer-guide/2-conventions/2-code-conventions.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/4-developer-guide/2-conventions/2-code-conventions.md new file mode 100644 index 00000000..e30bab57 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/4-developer-guide/2-conventions/2-code-conventions.md @@ -0,0 +1,79 @@ +--- +title: 代码规约 +--- +在这部分,你将会了解 Karpor 项目中所有类型的代码规约。不必一次把这些规则全部了解,确保你在编写代码前阅读对应的部分就可以了。 + +- [Go 代码规约](#go-%E4%BB%A3%E7%A0%81%E8%A7%84%E7%BA%A6) +- [Bash 或脚本规约](#bash-%E6%88%96%E8%84%9A%E6%9C%AC%E8%A7%84%E7%BA%A6) +- [目录和文件规约](#%E7%9B%AE%E5%BD%95%E5%92%8C%E6%96%87%E4%BB%B6%E8%A7%84%E7%BA%A6) +- [Linting 和格式化](#linting-%E5%92%8C%E6%A0%BC%E5%BC%8F%E5%8C%96) + +## Go 代码规约 + +- [Go 代码评审评论](https://go.dev/wiki/CodeReviewComments) +- [高效的 Go](https://golang.org/doc/effective_go.html) +- 了解并且避免 [Go 地雷](https://gist.github.com/lavalamp/4bd23295a9f32706a48f) +- 为你的代码编写注释. + + - [Go's 注释规约](https://go.dev/blog/godoc) + - 如果代码评阅者询问你代码为什么要这么实现,这可能说明你应当为你的代码编写注释。 +- 命令行标志应该用双连接号 `--`,而不是下划线 +- 接口 + + - 根据 RFC3986,URL 是大小写敏感的。Karpor 使用“短横线命名(`kebab-case`)”作为 URL。 + - 例如:`POST /rest-api/v1/resource-group-rule` +- 命名 + + - 为接口选择名称时请考虑包名称,避免冗余。 + + - 例如: `storage.Interface` 优于 `storage.StorageInterface`。 + - 不要在包名称中使用大写字符、下划线和破折号。 + - 选择包名称时,请考虑父目录名称。 + + - 所有 `pkg/manager/cluster/foo.go` 应该命名为 `package cluster` + 而不是 `package clustermanager`。 + - 除非有充分理由,`package foo` 行应该与 .go 文件所在目录的名称相同。 + - 为了避免歧义,导入包时可以指定别名。 + - 锁应该被称为 `lock`,并且永远不应该被嵌入(总是以 `lock sync.Mutex` 的形式明确声明)。当存在多个锁时,应该遵循 Go 的命名约定给每个锁一个 buts 的名称 - `stateLock`,`mapLock` 等。 + +## Bash 或脚本规约 + +- [Shell 样式指南](https://google.github.io/styleguide/shell.xml) +- 确保构建、发布、测试和集群管理脚本可以在 macOS 上运行 + +## 目录和文件规约 + +- 避免软件包无序扩展。为新的包找到合适的子目录。 + + - 没有更合适放置位置的新包应该放在 `pkg/util` 下的子目录。 +- 避免使用通用包。使用名为 `util` 的包让人疑惑。相反地,应当根据你期望的功能推导出包名 + 例如,处理等待操作的使用功能位于 `wait` 包中,包括类似 Poll 这样的功能,所以完整名称是 `wait.Poll` +- 所有的文件名都应该是小写 +- Go 源码文件名和目录名中使用 `_`,而不是 `-` + + - 包目录名通常应当尽量避免使用分隔符(当包目录名含多个单词时,它们通常应该被放在嵌套的子目录) +- 文档的文件名和目录名中应该使用 `-`,而不是 `_` +- 用于说明系统特性的示例应该位于 `/docs/user-guide` 或 `/docs/admin`, 取决于它是主要面向部署应用的用户还是集群管理员。实际的应用示例应位于 `/example` 中 + + - 示例还应该展示 [配置和使用系统的最佳实践](https://kubernetes.io/docs/concepts/configuration/overview/) +- 第三方代码 + + - 普通的第三方依赖 Go 代码 由 [go modules](https://github.com/golang/go/wiki/Modules) 管理 + - 其他的第三方代码应该放在 `/third_party` 目录下 + + - fork 的第三方 Go 代码放在 `/third_party/forked` 目录下 + - fork 的_golang stdlib_ 代码放在 `/third_party/forked/golang` 目录下 + - 第三方代码必须包含许可证 + - 这也包括修改过的第三方代码和引用 + +## Linting 和格式化 + +为了确保 Go 代码库之间的一致性,我们要求所有代码通过多个 linter 检查。 + +要运行所有的 linter,请使用 `lint` 作为 Makefile 目标: + +```shell +make lint +``` + +该命令将清理代码并进行一些 lint 检查。检查结束后请记得检查所有变更。 diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/4-developer-guide/2-conventions/3-test-conventions.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/4-developer-guide/2-conventions/3-test-conventions.md new file mode 100644 index 00000000..892c7f9e --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/4-developer-guide/2-conventions/3-test-conventions.md @@ -0,0 +1,266 @@ +--- +title: 测试规约 +--- +## 测试原则 + +在 Karpor 中,我们主要关注以下三种测试: + +- 单元测试:主要关注 **最小可测试单元**(例如函数,方法,实用类等) +- 集成测试:针对 **多个单元(或模块)**间相互作用和集成的测试 +- 端到端测试(e2e tests): 针对 **整个系统行为** 的测试,通常需要模拟真实用户场景。 + +每种测试都有优势,劣势和适用场景。为了实现更好的开发体验,我们在编写测试时应遵循以下原则。 + +**测试原则**: + +- 单个测试用例应该仅覆盖单个场景 +- 遵循 **7-2-1 原则**,即 70% 的单元测试,20% 的集成测试和 10% 的端到端测试 +- **非必要情况下,避免在单元测试中使用框架**(比如 `golang/mock`)。如果你觉得需要在单元测试中使用 mock 框架,那么你可能应该实现的是集成测试甚至端到端测试。 + +## 技术选择 + +在当前时间点,在 Go 语言生态中最流行的测试框架主要有 [Ginkgo](https://onsi.github.io/ginkgo/)/[Gomega](https://onsi.github.io/gomega/) 和 [Testify](https://github.com/stretchr/testify)。因此,本节主要讨论这两个测试框架的的特点、优缺点以及最终的选择。 + +### Ginkgo/Gomega + +**优点**: + +1. **BDD 支持**:Ginkgo 因为支持行为驱动开发(Behavior-Driven Development,BDD)风格而收到许多开发人员的青睐。它提供了丰富的 DSL 语法,通过 `Describe`、`Context`、`It` 等关键字使测试用例更具描述性和可读性。 +2. **并行执行**:Ginkgo 能够以多进程并行执行测试,提高了测试执行的效率。 +3. **丰富的匹配器**:与 Gomega 匹配器库结合使用时,它提供了丰富的断言能力,使测试更加直观和方便。 +4. **异步支持**:Ginkgo 提供了对处理复杂异步场景的原生支持,降低了死锁和超时的风险。 +5. **测试用例组织**:支持将测试用例组织到测试套件中,便于管理和扩展。 + +**缺点**: + +1. **学习曲线过于陡峭**:对不熟悉 BDD 测试框架的开发者来说,Ginkgo 的 DSL 语法可能需要一些时间熟悉。 +2. **并行测试的复杂性**:尽管 Ginkgo 支持并行测试,但是管理用于并行测试的资源和环境可能会引入额外的复杂性。 + +### Testify + +**优点**: + +1. **简化的接口 API**: Testify 提供了简单明了的 API,容易上手,特别是对于熟悉了 `testing` 包的开发者。 +2. **Mock 支持**: 提供了强大的 Mock 功能,便于模拟依赖和接口进行单元测试。 +3. **表格驱动测试**: 支持表格驱动测试,允许测试同一个函数使用各种不同输入和预期输出,增强了测试用例的可重用性。 +4. **与 `testing` 包的兼容性**: Testify 与 Go 标准库的 testing 包高度兼容,可以无缝集成到现有的测试工作流中。 +5. **文档**: Testify 的 [官方文档](https://pkg.go.dev/github.com/stretchr/testify) 也提供了丰富的介绍,如何使用其断言和 Mock 功能。 + +**缺点**: + +1. **缺少 BDD 支持**: Testify 不支持 BDD 风格,对于寻求提高测试用例可读性的开发人员可能直观性较差。 +2. **功能相对简单**: 与 Ginkgo 相比,Testify 的功能相对简单,可能不满足一些复杂测试场景的需求。 + +### 总结 + +简而言之,Ginkgo/Gomega 提供了更好的可读性和可维护性,产生清晰明了的测试代码,但需要熟悉 BDD 风格,学习曲线比较陡峭。Testify 更简单、更实用,学习曲线较为平缓,但随着时间的推移,测试代码风格可能变得更加多样化,降低可维护性。 + +考虑到 Karpor 的实际情况和两种框架的优缺点,我们决定结合使用这两个框架: + +- 使用 Testify 进行单元测试,坚持使用 [表格驱动测试](https://go.dev/wiki/TableDrivenTests) 来约束代码风格,防止退化; +- 利用 Ginkgo 的 BDD 特性编写更高级别的集成和端到端测试; + +这种组合充分发挥了两种框架的优势,提高了测试的整体效率、可读性和质量。 + +## 编写规范 + +### 测试风格 + +[表格驱动测试](https://go.dev/wiki/TableDrivenTests) 是编写测试用例的最佳实践,类似于编程中的设计模式,它也是官方 Go 语言推荐的风格。表格驱动测试使用表格提供各种输入和预期输出,允许同一个测试函数验证不同的场景。这种方法的优点是它增加了测试用例的可重用性,减少了重复代码,并使测试更加清晰易于维护。 + +虽然 Go 的 `testing` 包中没有直接支持表格驱动测试的语法,但可以通过编写辅助函数和使用匿名函数来模拟实现。 + +这是一个在 Go 标准库的 `fmt` 包中实现的表格驱动测试的示例: + +```go +var flagtests = []struct { + in string + out string +}{ + {"%a", "[%a]"}, + {"%-a", "[%-a]"}, + {"%+a", "[%+a]"}, + {"%#a", "[%#a]"}, + {"% a", "[% a]"}, + {"%0a", "[%0a]"}, + {"%1.2a", "[%1.2a]"}, + {"%-1.2a", "[%-1.2a]"}, + {"%+1.2a", "[%+1.2a]"}, + {"%-+1.2a", "[%+-1.2a]"}, + {"%-+1.2abc", "[%+-1.2a]bc"}, + {"%-1.2abc", "[%-1.2a]bc"}, +} +func TestFlagParser(t *testing.T) { + var flagprinter flagPrinter + for _, tt := range flagtests { + t.Run(tt.in, func(t *testing.T) { + s := Sprintf(tt.in, &flagprinter) + if s != tt.out { + t.Errorf("got %q, want %q", s, tt.out) + } + }) + } +} +``` + +值得注意的是,目前大部分主流 IDE 都已经集成了 [gotests](https://github.com/cweill/gotests),可以自动生成表格驱动风格的 Go 单元测试,相信这可以提升大家编写单元测试的效率: + +- [GoLand](https://blog.jetbrains.com/go/2020/03/13/test-driven-development-with-goland/) +- [Visual Studio Code](https://juandes.com/go-test-vsc/) + +### 文件命名 + +- **规范内容**:测试函数必须以 `Test` 开头,后面跟着被测试函数的名称,使用驼峰式命名法。 +- **正面示例**:`xxx_test.go` +- **反面示例**:`testFile.go`、`test_xxx.go` + +### 测试函数命名 + +- **规范内容**:测试函数名必须以 `Test` 开头,后面跟着被测试函数的名称,使用驼峰式命名法。 +- **正面示例**: + ```go + func TestAdd(t *testing.T) { + // 测试逻辑 ... + } + ``` +- **反面示例**: + ```go + func TestAddWrong(t *testing.T) { + // 测试逻辑 ... + } + ``` + +### 测试函数签名 + +- **规范内容**:测试函数签名必须是 `func TestXxx(t *testing.T)` 形式,其中 `t` 是类型为 `*testing.T` 的对象,并且不应该有其他的参数和返回值。 +- **正面示例**: + ```go + func TestSubtraction(t *testing.T) { + // 测试逻辑 ... + } + ``` +- **反面示例**: + ```go + func TestSubtraction(value int) { + // 测试逻辑 ... + } + ``` + +### 测试组织 + +- **规范内容**:测试用例应当相互独立,避免测试之间相互影响;使用子测试 `t.Run` 来组织复杂的测试场景。 +- **正面示例**: + ```go + func TestMathOperations(t *testing.T) { + t.Run("Addition", func(t *testing.T) { + // addition 的测试逻辑 ... + }) + t.Run("Subtraction", func(t *testing.T) { + // subtraction 的测试逻辑 ... + }) + } + ``` +- **反面示例**: + ```go + func TestMathOperations(t *testing.T) { + // 混合 addition 和 subtraction 的测试逻辑 ... + } + ``` + +### Test Coverage + +- **规范内容**:需要注意测试覆盖率,使用 `go test -cover` 命令检查代码的测试覆盖率。 +- **正面示例**: + + ```shell + $ go test -cover + ``` +- **反面示例**: + + ```shell + $ go test # 不检查测试覆盖率 + ``` +- **注意**: Karpor 将此命令包装为 `make cover`,它将在命令行中输出每个包的覆盖率和总覆盖率。如果你想在浏览器中查看覆盖率报告,请执行 `make cover-html`。 + +### 性能测试 + +- **规范内容**:性能测试应当以 `Benchmark` 开头,并且接受 `*testing.B` 类型的参数,专注于性能测试。 +- **正面示例**: + ```go + func BenchmarkAdd(b *testing.B) { + for i := 0; i < b.N; i++ { + add(1, 1) + } + } + ``` +- **反面示例**: + ```go + func BenchmarkAddWrong(b *testing.B) { + for i := 0; i < 1000; i++ { + add(1, 1) + } + } + ``` + +### 并发测试 + +- **规范内容**:对于并发代码,应该编写适当的测试用例,以确保并发逻辑的正确性。 +- **正面示例**: + ```go + func TestConcurrentAccess(t *testing.T) { + // 设置并发环境 ... + // 并发访问测试逻辑 ... + } + ``` +- **反面示例**: + ```go + func TestConcurrentAccess(t *testing.T) { + // 仅测试单线程逻辑... + } + ``` + +### 测试帮助函数 + +- **规范内容**:可以在测试文件中定义辅助函数来帮助设置测试环境或清理资源。 +- **正面示例**: + ```go + func setupTest(t *testing.T) { + // 设置测试环境 ... + } + + func tearDownTest(t *testing.T) { + // 清理资源 ... + } + + func TestMyFunction(t *testing.T) { + t.Run("TestSetup", func(t *testing.T) { + setupTest(t) + // 测试逻辑 ... + }) + } + ``` +- **反面示例**: + ```go + // 直接在测试中设置环境和清理资源 + func TestMyFunction(t *testing.T) { + // 设置测试环境 ... + // 测试逻辑 ... + // 清理资源 ... + } + ``` + +### 避免使用全局变量 + +- **规范内容**: 尽量避免在测试中使用全局变量以确保测试独立。 +- **正面示例**: 在测试函数内部声明并使用必要的变量。 +- **反面示例**: 在测试文件的顶部声明全局变量。 + +### 清晰的错误信息 + +- **规范内容**: 当测试失败时,输出清晰易懂的错误信息,帮助开发人员定位问题。 +- **正面示例**: + - `t.Errorf("Expected value %d, but got %d", expected, real)` +- **反面示例**: + - `t.Errorf("Error occurred")` + - `fmt.Println("Error occurred")` + - `panic("Error occurred")` diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/4-developer-guide/2-conventions/4-commit-conventions.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/4-developer-guide/2-conventions/4-commit-conventions.md new file mode 100644 index 00000000..979b7631 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/4-developer-guide/2-conventions/4-commit-conventions.md @@ -0,0 +1,71 @@ +--- +title: Commit 规约 +--- +## Commit 信息结构 + +Karpor 遵循 [约定式提交](https://www.conventionalcommits.org/zh-hans/v1.0.0/)。 + +Commit 信息应当组织为以下结构: + +``` +<类型>[可选 范围]: <描述> + +[可选 正文] +``` + +## 示例 + +带范围的 Commit 信息: + +``` +feat(lang): add polish language +``` + +不带正文的 Commit 信息: + +``` +docs: correct spelling of CHANGELOG +``` + +带多个正文段落的 Commit 信息: + +``` +fix: correct minor typos in code + +see the issue for details + +on typos fixed. + +reviewed-by: Z +refs #133 +``` + +## `<类型>`(必须) + +必须填写的类型有助于更容易确定这次提交的范围,基于 [Angular 指南](https://github.com/angular/angular/blob/22b96b9/CONTRIBUTING.md#-commit-message-guidelines)。 + +我们在 `<类型>` 使用小写,以避免花费时间在大小写敏感问题上。`<类型>` 可以是以下之一: + +- **feat**:新特性 +- **fix**:漏洞修复 +- **docs**:仅文档改动 +- **build**:关于构建系统和外部依赖的改动 +- **style**:不影响代码含义的改动(如空行、格式、缺少分号等) +- **refactor**:不属于漏洞修复或者增加特性的代码改动 +- **perf**:提升性能的代码改动 +- **test**: 增加缺少的测试用例或者修正现有的测试用例 +- **chore**: 构建过程或辅助工具和库(如文档生成)的修改 + +## `<范围>`(可选) + +范围是可选的,可以包含在括号中为类型提供更多的上下文信息。可以指定这次 commit 的任何内容。Github issue 也是有效的范围,例如 `fix(ui)`、`feat(api)`、`fix(#233)` 等。 + +当改动影响多个范围时,可以使用 `*`。 + +## `<描述>`(必须) + +描述必须紧跟在类型/范围前缀后面的冒号和空格。它是代码更改的简明摘要,例如 `fix: array parsing issue when multiple spaces were contained in string`,而不是 `fix: bug`。 + +## `<正文>`(可选) + +在简短的描述后可以添加较长的正文,提供有关代码更改的更多上下文信息。正文必须位于描述之后一行。 diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/4-developer-guide/2-conventions/_category_.json b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/4-developer-guide/2-conventions/_category_.json new file mode 100644 index 00000000..3287fa06 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/4-developer-guide/2-conventions/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Conventions" +} diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/4-developer-guide/_category_.json b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/4-developer-guide/_category_.json new file mode 100644 index 00000000..8de262b6 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/4-developer-guide/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Developer Guide" +} diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/5-references/1-cli-commands/1-karpor.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/5-references/1-cli-commands/1-karpor.md new file mode 100644 index 00000000..891809d7 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/5-references/1-cli-commands/1-karpor.md @@ -0,0 +1,230 @@ +--- +title: karpor +--- +### Synopsis + +Launch an API server + +``` +karpor [flags] +``` + +### Options + +``` + --admission-control-config-file string File with admission control configuration. + --advertise-address ip The IP address on which to advertise the apiserver to members of the cluster. This address must be reachable by the rest of the cluster. If blank, the --bind-address will be used. If --bind-address is unspecified, the host's default interface will be used. + --anonymous-auth Enables anonymous requests to the secure port of the API server. Requests that are not rejected by another authentication method are treated as anonymous requests. Anonymous requests have a username of system:anonymous, and a group name of system:unauthenticated. (default true) + --api-audiences strings Identifiers of the API. The service account token authenticator will validate that tokens used against the API are bound to at least one of these audiences. If the --service-account-issuer flag is configured and this flag is not, this field defaults to a single element list containing the issuer URL. + --audit-log-batch-buffer-size int The size of the buffer to store events before batching and writing. Only used in batch mode. (default 10000) + --audit-log-batch-max-size int The maximum size of a batch. Only used in batch mode. (default 1) + --audit-log-batch-max-wait duration The amount of time to wait before force writing the batch that hadn't reached the max size. Only used in batch mode. + --audit-log-batch-throttle-burst int Maximum number of requests sent at the same moment if ThrottleQPS was not utilized before. Only used in batch mode. + --audit-log-batch-throttle-enable Whether batching throttling is enabled. Only used in batch mode. + --audit-log-batch-throttle-qps float32 Maximum average number of batches per second. Only used in batch mode. + --audit-log-compress If set, the rotated log files will be compressed using gzip. + --audit-log-format string Format of saved audits. "legacy" indicates 1-line text format for each event. "json" indicates structured json format. Known formats are legacy,json. (default "json") + --audit-log-maxage int The maximum number of days to retain old audit log files based on the timestamp encoded in their filename. + --audit-log-maxbackup int The maximum number of old audit log files to retain. Setting a value of 0 will mean there's no restriction on the number of files. + --audit-log-maxsize int The maximum size in megabytes of the audit log file before it gets rotated. + --audit-log-mode string Strategy for sending audit events. Blocking indicates sending events should block server responses. Batch causes the backend to buffer and write events asynchronously. Known modes are batch,blocking,blocking-strict. (default "blocking") + --audit-log-path string If set, all requests coming to the apiserver will be logged to this file. '-' means standard out. + --audit-log-truncate-enabled Whether event and batch truncating is enabled. + --audit-log-truncate-max-batch-size int Maximum size of the batch sent to the underlying backend. Actual serialized size can be several hundreds of bytes greater. If a batch exceeds this limit, it is split into several batches of smaller size. (default 10485760) + --audit-log-truncate-max-event-size int Maximum size of the audit event sent to the underlying backend. If the size of an event is greater than this number, first request and response are removed, and if this doesn't reduce the size enough, event is discarded. (default 102400) + --audit-log-version string API group and version used for serializing audit events written to log. (default "audit.k8s.io/v1") + --audit-policy-file string Path to the file that defines the audit policy configuration. + --audit-webhook-batch-buffer-size int The size of the buffer to store events before batching and writing. Only used in batch mode. (default 10000) + --audit-webhook-batch-max-size int The maximum size of a batch. Only used in batch mode. (default 400) + --audit-webhook-batch-max-wait duration The amount of time to wait before force writing the batch that hadn't reached the max size. Only used in batch mode. (default 30s) + --audit-webhook-batch-throttle-burst int Maximum number of requests sent at the same moment if ThrottleQPS was not utilized before. Only used in batch mode. (default 15) + --audit-webhook-batch-throttle-enable Whether batching throttling is enabled. Only used in batch mode. (default true) + --audit-webhook-batch-throttle-qps float32 Maximum average number of batches per second. Only used in batch mode. (default 10) + --audit-webhook-config-file string Path to a kubeconfig formatted file that defines the audit webhook configuration. + --audit-webhook-initial-backoff duration The amount of time to wait before retrying the first failed request. (default 10s) + --audit-webhook-mode string Strategy for sending audit events. Blocking indicates sending events should block server responses. Batch causes the backend to buffer and write events asynchronously. Known modes are batch,blocking,blocking-strict. (default "batch") + --audit-webhook-truncate-enabled Whether event and batch truncating is enabled. + --audit-webhook-truncate-max-batch-size int Maximum size of the batch sent to the underlying backend. Actual serialized size can be several hundreds of bytes greater. If a batch exceeds this limit, it is split into several batches of smaller size. (default 10485760) + --audit-webhook-truncate-max-event-size int Maximum size of the audit event sent to the underlying backend. If the size of an event is greater than this number, first request and response are removed, and if this doesn't reduce the size enough, event is discarded. (default 102400) + --audit-webhook-version string API group and version used for serializing audit events written to webhook. (default "audit.k8s.io/v1") + --authorization-mode strings Ordered list of plug-ins to do authorization on secure port. Comma-delimited list of: AlwaysAllow,AlwaysDeny,ABAC,Webhook,RBAC,Node. (default [RBAC]) + --authorization-policy-file string File with authorization policy in json line by line format, used with --authorization-mode=ABAC, on the secure port. + --authorization-webhook-cache-authorized-ttl duration The duration to cache 'authorized' responses from the webhook authorizer. (default 5m0s) + --authorization-webhook-cache-unauthorized-ttl duration The duration to cache 'unauthorized' responses from the webhook authorizer. (default 30s) + --authorization-webhook-config-file string File with webhook configuration in kubeconfig format, used with --authorization-mode=Webhook. The API server will query the remote service to determine access on the API server's secure port. + --authorization-webhook-version string The API version of the authorization.k8s.io SubjectAccessReview to send to and expect from the webhook. (default "v1beta1") + --bind-address ip The IP address on which to listen for the --secure-port port. The associated interface(s) must be reachable by the rest of the cluster, and by CLI/web clients. If blank or an unspecified address (0.0.0.0 or ::), all interfaces will be used. (default 0.0.0.0) + --cert-dir string The directory where the TLS certs are located. If --tls-cert-file and --tls-private-key-file are provided, this flag will be ignored. (default "apiserver.local.config/certificates") + --client-ca-file string If set, any request presenting a client certificate signed by one of the authorities in the client-ca-file is authenticated with an identity corresponding to the CommonName of the client certificate. + --contention-profiling Enable lock contention profiling, if profiling is enabled + --cors-allowed-origins strings List of allowed origins for CORS, comma separated. An allowed origin can be a regular expression to support subdomain matching. If this list is empty CORS will not be enabled. (default [.*]) + --delete-collection-workers int Number of workers spawned for DeleteCollection call. These are used to speed up namespace cleanup. (default 1) + --disable-admission-plugins strings admission plugins that should be disabled although they are in the default enabled plugins list (NamespaceLifecycle, MutatingAdmissionWebhook, ValidatingAdmissionPolicy, ValidatingAdmissionWebhook). Comma-delimited list of admission plugins: MutatingAdmissionWebhook, NamespaceLifecycle, ValidatingAdmissionPolicy, ValidatingAdmissionWebhook. The order of plugins in this flag does not matter. (default [MutatingAdmissionWebhook,NamespaceLifecycle,ValidatingAdmissionWebhook,ValidatingAdmissionPolicy]) + --egress-selector-config-file string File with apiserver egress selector configuration. + --elastic-search-addresses strings The elastic search address + --elastic-search-password string The elastic search password + --elastic-search-username string The elastic search username + --enable-admission-plugins strings admission plugins that should be enabled in addition to default enabled ones (NamespaceLifecycle, MutatingAdmissionWebhook, ValidatingAdmissionPolicy, ValidatingAdmissionWebhook). Comma-delimited list of admission plugins: MutatingAdmissionWebhook, NamespaceLifecycle, ValidatingAdmissionPolicy, ValidatingAdmissionWebhook. The order of plugins in this flag does not matter. + --enable-garbage-collector Enables the generic garbage collector. MUST be synced with the corresponding flag of the kube-controller-manager. (default true) + --enable-priority-and-fairness If true and the APIPriorityAndFairness feature gate is enabled, replace the max-in-flight handler with an enhanced one that queues and dispatches with priority and fairness (default true) + --encryption-provider-config string The file containing configuration for encryption providers to be used for storing secrets in etcd + --encryption-provider-config-automatic-reload Determines if the file set by --encryption-provider-config should be automatically reloaded if the disk contents change. Setting this to true disables the ability to uniquely identify distinct KMS plugins via the API server healthz endpoints. + --etcd-cafile string SSL Certificate Authority file used to secure etcd communication. + --etcd-certfile string SSL certification file used to secure etcd communication. + --etcd-compaction-interval duration The interval of compaction requests. If 0, the compaction request from apiserver is disabled. (default 5m0s) + --etcd-count-metric-poll-period duration Frequency of polling etcd for number of resources per type. 0 disables the metric collection. (default 1m0s) + --etcd-db-metric-poll-interval duration The interval of requests to poll etcd and update metric. 0 disables the metric collection (default 30s) + --etcd-healthcheck-timeout duration The timeout to use when checking etcd health. (default 2s) + --etcd-keyfile string SSL key file used to secure etcd communication. + --etcd-prefix string The prefix to prepend to all resource paths in etcd. (default "/registry/karpor") + --etcd-readycheck-timeout duration The timeout to use when checking etcd readiness (default 2s) + --etcd-servers strings List of etcd servers to connect with (scheme://ip:port), comma separated. + --etcd-servers-overrides strings Per-resource etcd servers overrides, comma separated. The individual override format: group/resource#servers, where servers are URLs, semicolon separated. Note that this applies only to resources compiled into this server binary. + --external-hostname string The hostname to use when generating externalized URLs for this master (e.g. Swagger API Docs or OpenID Discovery). + --feature-gates mapStringBool A set of key=value pairs that describe feature gates for alpha/experimental features. Options are: + APIListChunking=true|false (BETA - default=true) + APIPriorityAndFairness=true|false (BETA - default=true) + APIResponseCompression=true|false (BETA - default=true) + APISelfSubjectReview=true|false (ALPHA - default=false) + APIServerIdentity=true|false (BETA - default=true) + APIServerTracing=true|false (ALPHA - default=false) + AggregatedDiscoveryEndpoint=true|false (ALPHA - default=false) + AllAlpha=true|false (ALPHA - default=false) + AllBeta=true|false (BETA - default=false) + AnyVolumeDataSource=true|false (BETA - default=true) + AppArmor=true|false (BETA - default=true) + CPUManagerPolicyAlphaOptions=true|false (ALPHA - default=false) + CPUManagerPolicyBetaOptions=true|false (BETA - default=true) + CPUManagerPolicyOptions=true|false (BETA - default=true) + CSIMigrationPortworx=true|false (BETA - default=false) + CSIMigrationRBD=true|false (ALPHA - default=false) + CSINodeExpandSecret=true|false (ALPHA - default=false) + CSIVolumeHealth=true|false (ALPHA - default=false) + ComponentSLIs=true|false (ALPHA - default=false) + ContainerCheckpoint=true|false (ALPHA - default=false) + CronJobTimeZone=true|false (BETA - default=true) + CrossNamespaceVolumeDataSource=true|false (ALPHA - default=false) + CustomCPUCFSQuotaPeriod=true|false (ALPHA - default=false) + CustomResourceValidationExpressions=true|false (BETA - default=true) + DisableCloudProviders=true|false (ALPHA - default=false) + DisableKubeletCloudCredentialProviders=true|false (ALPHA - default=false) + DownwardAPIHugePages=true|false (BETA - default=true) + DynamicResourceAllocation=true|false (ALPHA - default=false) + EventedPLEG=true|false (ALPHA - default=false) + ExpandedDNSConfig=true|false (BETA - default=true) + ExperimentalHostUserNamespaceDefaulting=true|false (BETA - default=false) + GRPCContainerProbe=true|false (BETA - default=true) + GracefulNodeShutdown=true|false (BETA - default=true) + GracefulNodeShutdownBasedOnPodPriority=true|false (BETA - default=true) + HPAContainerMetrics=true|false (ALPHA - default=false) + HPAScaleToZero=true|false (ALPHA - default=false) + HonorPVReclaimPolicy=true|false (ALPHA - default=false) + IPTablesOwnershipCleanup=true|false (ALPHA - default=false) + InTreePluginAWSUnregister=true|false (ALPHA - default=false) + InTreePluginAzureDiskUnregister=true|false (ALPHA - default=false) + InTreePluginAzureFileUnregister=true|false (ALPHA - default=false) + InTreePluginGCEUnregister=true|false (ALPHA - default=false) + InTreePluginOpenStackUnregister=true|false (ALPHA - default=false) + InTreePluginPortworxUnregister=true|false (ALPHA - default=false) + InTreePluginRBDUnregister=true|false (ALPHA - default=false) + InTreePluginvSphereUnregister=true|false (ALPHA - default=false) + JobMutableNodeSchedulingDirectives=true|false (BETA - default=true) + JobPodFailurePolicy=true|false (BETA - default=true) + JobReadyPods=true|false (BETA - default=true) + KMSv2=true|false (ALPHA - default=false) + KubeletInUserNamespace=true|false (ALPHA - default=false) + KubeletPodResources=true|false (BETA - default=true) + KubeletPodResourcesGetAllocatable=true|false (BETA - default=true) + KubeletTracing=true|false (ALPHA - default=false) + LegacyServiceAccountTokenTracking=true|false (ALPHA - default=false) + LocalStorageCapacityIsolationFSQuotaMonitoring=true|false (ALPHA - default=false) + LogarithmicScaleDown=true|false (BETA - default=true) + MatchLabelKeysInPodTopologySpread=true|false (ALPHA - default=false) + MaxUnavailableStatefulSet=true|false (ALPHA - default=false) + MemoryManager=true|false (BETA - default=true) + MemoryQoS=true|false (ALPHA - default=false) + MinDomainsInPodTopologySpread=true|false (BETA - default=false) + MinimizeIPTablesRestore=true|false (ALPHA - default=false) + MultiCIDRRangeAllocator=true|false (ALPHA - default=false) + NetworkPolicyStatus=true|false (ALPHA - default=false) + NodeInclusionPolicyInPodTopologySpread=true|false (BETA - default=true) + NodeOutOfServiceVolumeDetach=true|false (BETA - default=true) + NodeSwap=true|false (ALPHA - default=false) + OpenAPIEnums=true|false (BETA - default=true) + OpenAPIV3=true|false (BETA - default=true) + PDBUnhealthyPodEvictionPolicy=true|false (ALPHA - default=false) + PodAndContainerStatsFromCRI=true|false (ALPHA - default=false) + PodDeletionCost=true|false (BETA - default=true) + PodDisruptionConditions=true|false (BETA - default=true) + PodHasNetworkCondition=true|false (ALPHA - default=false) + PodSchedulingReadiness=true|false (ALPHA - default=false) + ProbeTerminationGracePeriod=true|false (BETA - default=true) + ProcMountType=true|false (ALPHA - default=false) + ProxyTerminatingEndpoints=true|false (BETA - default=true) + QOSReserved=true|false (ALPHA - default=false) + ReadWriteOncePod=true|false (ALPHA - default=false) + RecoverVolumeExpansionFailure=true|false (ALPHA - default=false) + RemainingItemCount=true|false (BETA - default=true) + RetroactiveDefaultStorageClass=true|false (BETA - default=true) + RotateKubeletServerCertificate=true|false (BETA - default=true) + SELinuxMountReadWriteOncePod=true|false (ALPHA - default=false) + SeccompDefault=true|false (BETA - default=true) + ServerSideFieldValidation=true|false (BETA - default=true) + SizeMemoryBackedVolumes=true|false (BETA - default=true) + StatefulSetAutoDeletePVC=true|false (ALPHA - default=false) + StatefulSetStartOrdinal=true|false (ALPHA - default=false) + StorageVersionAPI=true|false (ALPHA - default=false) + StorageVersionHash=true|false (BETA - default=true) + TopologyAwareHints=true|false (BETA - default=true) + TopologyManager=true|false (BETA - default=true) + TopologyManagerPolicyAlphaOptions=true|false (ALPHA - default=false) + TopologyManagerPolicyBetaOptions=true|false (BETA - default=false) + TopologyManagerPolicyOptions=true|false (ALPHA - default=false) + UserNamespacesStatelessPodsSupport=true|false (ALPHA - default=false) + ValidatingAdmissionPolicy=true|false (ALPHA - default=false) + VolumeCapacityPriority=true|false (ALPHA - default=false) + WinDSR=true|false (ALPHA - default=false) + WinOverlay=true|false (BETA - default=true) + WindowsHostNetwork=true|false (ALPHA - default=true) (default APIPriorityAndFairness=true) + --goaway-chance float To prevent HTTP/2 clients from getting stuck on a single apiserver, randomly close a connection (GOAWAY). The client's other in-flight requests won't be affected, and the client will reconnect, likely landing on a different apiserver after going through the load balancer again. This argument sets the fraction of requests that will be sent a GOAWAY. Clusters with single apiservers, or which don't use a load balancer, should NOT enable this. Min is 0 (off), Max is .02 (1/50 requests); .001 (1/1000) is a recommended starting point. + -h, --help help for karpor + --http2-max-streams-per-connection int The limit that the server gives to clients for the maximum number of streams in an HTTP/2 connection. Zero means to use golang's default. (default 1000) + --lease-reuse-duration-seconds int The time in seconds that each lease is reused. A lower value could avoid large number of objects reusing the same lease. Notice that a too small value may cause performance problems at storage layer. (default 60) + --livez-grace-period duration This option represents the maximum amount of time it should take for apiserver to complete its startup sequence and become live. From apiserver's start time to when this amount of time has elapsed, /livez will assume that unfinished post-start hooks will complete successfully and therefore return true. + --max-mutating-requests-inflight int This and --max-requests-inflight are summed to determine the server's total concurrency limit (which must be positive) if --enable-priority-and-fairness is true. Otherwise, this flag limits the maximum number of mutating requests in flight, or a zero value disables the limit completely. (default 200) + --max-requests-inflight int This and --max-mutating-requests-inflight are summed to determine the server's total concurrency limit (which must be positive) if --enable-priority-and-fairness is true. Otherwise, this flag limits the maximum number of non-mutating requests in flight, or a zero value disables the limit completely. (default 400) + --min-request-timeout int An optional field indicating the minimum number of seconds a handler must keep a request open before timing it out. Currently only honored by the watch request handler, which picks a randomized value above this number as the connection timeout, to spread out load. (default 1800) + --permit-address-sharing If true, SO_REUSEADDR will be used when binding the port. This allows binding to wildcard IPs like 0.0.0.0 and specific IPs in parallel, and it avoids waiting for the kernel to release sockets in TIME_WAIT state. [default=false] + --permit-port-sharing If true, SO_REUSEPORT will be used when binding the port, which allows more than one instance to bind on the same address and port. [default=false] + --profiling Enable profiling via web interface host:port/debug/pprof/ (default true) + --read-only-mode turn on the read only mode + --request-timeout duration An optional field indicating the duration a handler must keep a request open before timing it out. This is the default request timeout for requests but may be overridden by flags such as --min-request-timeout for specific types of requests. (default 1m0s) + --requestheader-allowed-names strings List of client certificate common names to allow to provide usernames in headers specified by --requestheader-username-headers. If empty, any client certificate validated by the authorities in --requestheader-client-ca-file is allowed. + --requestheader-client-ca-file string Root certificate bundle to use to verify client certificates on incoming requests before trusting usernames in headers specified by --requestheader-username-headers. WARNING: generally do not depend on authorization being already done for incoming requests. + --requestheader-extra-headers-prefix strings List of request header prefixes to inspect. X-Remote-Extra- is suggested. + --requestheader-group-headers strings List of request headers to inspect for groups. X-Remote-Group is suggested. + --requestheader-username-headers strings List of request headers to inspect for usernames. X-Remote-User is common. + --search-storage-type string The search storage type + --secure-port int The port on which to serve HTTPS with authentication and authorization. If 0, don't serve HTTPS at all. (default 443) + --shutdown-delay-duration duration Time to delay the termination. During that time the server keeps serving requests normally. The endpoints /healthz and /livez will return success, but /readyz immediately returns failure. Graceful termination starts after this delay has elapsed. This can be used to allow load balancer to stop sending traffic to this server. + --shutdown-send-retry-after If true the HTTP Server will continue listening until all non long running request(s) in flight have been drained, during this window all incoming requests will be rejected with a status code 429 and a 'Retry-After' response header, in addition 'Connection: close' response header is set in order to tear down the TCP connection when idle. + --storage-backend string The storage backend for persistence. Options: 'etcd3' (default). + --storage-media-type string The media type to use to store objects in storage. Some resources or storage backends may only support a specific media type and will ignore this setting. Supported media types: [application/json, application/yaml, application/vnd.kubernetes.protobuf] (default "application/json") + --strict-transport-security-directives strings List of directives for HSTS, comma separated. If this list is empty, then HSTS directives will not be added. Example: 'max-age=31536000,includeSubDomains,preload' + --tls-cert-file string File containing the default x509 Certificate for HTTPS. (CA cert, if any, concatenated after server cert). If HTTPS serving is enabled, and --tls-cert-file and --tls-private-key-file are not provided, a self-signed certificate and key are generated for the public address and saved to the directory specified by --cert-dir. (default "apiserver.local.config/certificates/apiserver.crt") + --tls-cipher-suites strings Comma-separated list of cipher suites for the server. If omitted, the default Go cipher suites will be used. + Preferred values: TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_256_CBC_SHA, TLS_RSA_WITH_AES_256_GCM_SHA384. + Insecure values: TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_RSA_WITH_RC4_128_SHA, TLS_RSA_WITH_3DES_EDE_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA256, TLS_RSA_WITH_RC4_128_SHA. + --tls-min-version string Minimum TLS version supported. Possible values: VersionTLS10, VersionTLS11, VersionTLS12, VersionTLS13 + --tls-private-key-file string File containing the default x509 private key matching --tls-cert-file. (default "apiserver.local.config/certificates/apiserver.key") + --tls-sni-cert-key namedCertKey A pair of x509 certificate and private key file paths, optionally suffixed with a list of domain patterns which are fully qualified domain names, possibly with prefixed wildcard segments. The domain patterns also allow IP addresses, but IPs should only be used if the apiserver has visibility to the IP address requested by a client. If no domain patterns are provided, the names of the certificate are extracted. Non-wildcard matches trump over wildcard matches, explicit domain patterns trump over extracted names. For multiple key/certificate pairs, use the --tls-sni-cert-key multiple times. Examples: "example.crt,example.key" or "foo.crt,foo.key:*.foo.com,foo.com". (default []) + --tracing-config-file string File with apiserver tracing configuration. + --watch-cache Enable watch caching in the apiserver (default true) + --watch-cache-sizes strings Watch cache size settings for some resources (pods, nodes, etc.), comma separated. The individual setting format: resource[.group]#size, where resource is lowercase plural (no version), group is omitted for resources of apiVersion v1 (the legacy core API) and included for others, and size is a number. This option is only meaningful for resources built into the apiserver, not ones defined by CRDs or aggregated from external servers, and is only consulted if the watch-cache is enabled. The only meaningful size setting to supply here is zero, which means to disable watch caching for the associated resource; all non-zero values are equivalent and mean to not disable watch caching for that resource +``` + +### SEE ALSO + +* [karpor syncer](2-karpor-syncer.md) - start a resource syncer to sync resource from clusters + +###### Auto generated by spf13/cobra on 7-May-2024 diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/5-references/1-cli-commands/2-karpor-syncer.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/5-references/1-cli-commands/2-karpor-syncer.md new file mode 100644 index 00000000..d25245ae --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/5-references/1-cli-commands/2-karpor-syncer.md @@ -0,0 +1,25 @@ +--- +title: karpor syncer +--- +## karpor syncer + +start a resource syncer to sync resource from clusters + +``` +karpor syncer [flags] +``` + +### Options + +``` + --elastic-search-addresses strings The elastic search address. + --health-probe-bind-address string The address the probe endpoint binds to. (default ":8081") + -h, --help help for syncer + --metrics-bind-address string The address the metric endpoint binds to. (default ":8080") +``` + +### SEE ALSO + +* [karpor](1-karpor.md) - Launch an API server + +###### Auto generated by spf13/cobra on 7-May-2024 diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/5-references/1-cli-commands/_category_.json b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/5-references/1-cli-commands/_category_.json new file mode 100644 index 00000000..41757f5f --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/5-references/1-cli-commands/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "CLI Commands" +} diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/5-references/2-openapi.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/5-references/2-openapi.md new file mode 100644 index 00000000..81c0321d --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/5-references/2-openapi.md @@ -0,0 +1,1862 @@ +--- +title: OpenAPI +--- +## Informations + +### Version + +1.0 + +### Contact + +## Content negotiation + +### URI Schemes + +* http + +### Consumes + +* application/json +* multipart/form-data +* text/plain + +### Produces + +* application/json +* text/plain + +## All endpoints + +### cluster + +| Method | URI | Name | Summary | +| ------ | ------------------------------------ | ------------------------------------------------------------------------------------- | -------------------------------------------- | +| DELETE | /rest-api/v1/cluster/{clusterName} | [delete rest API v1 cluster cluster name](#delete-rest-api-v1-cluster-cluster-name) | Delete removes a cluster resource by name. | +| GET | /rest-api/v1/cluster/{clusterName} | [get rest API v1 cluster cluster name](#get-rest-api-v1-cluster-cluster-name) | Get returns a cluster resource by name. | +| GET | /rest-api/v1/clusters | [get rest API v1 clusters](#get-rest-api-v1-clusters) | List lists all cluster resources. | +| POST | /rest-api/v1/cluster/{clusterName} | [post rest API v1 cluster cluster name](#post-rest-api-v1-cluster-cluster-name) | Create creates a cluster resource. | +| POST | /rest-api/v1/cluster/config/file | [post rest API v1 cluster config file](#post-rest-api-v1-cluster-config-file) | Upload kubeConfig file for cluster | +| POST | /rest-api/v1/cluster/config/validate | [post rest API v1 cluster config validate](#post-rest-api-v1-cluster-config-validate) | Validate KubeConfig | +| PUT | /rest-api/v1/cluster/{clusterName} | [put rest API v1 cluster cluster name](#put-rest-api-v1-cluster-cluster-name) | Update updates the cluster metadata by name. | + +### debug + +| Method | URI | Name | Summary | +| ------ | ---------- | ------------------------------- | ---------------------------- | +| GET | /endpoints | [get endpoints](#get-endpoints) | List all available endpoints | + +### insight + +| Method | URI | Name | Summary | +| ------ | ----------------------------- | --------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------- | +| GET | /rest-api/v1/insight/audit | [get rest API v1 insight audit](#get-rest-api-v1-insight-audit) | Audit based on resource group. | +| GET | /rest-api/v1/insight/detail | [get rest API v1 insight detail](#get-rest-api-v1-insight-detail) | GetDetail returns a Kubernetes resource by name, namespace, cluster, apiVersion and kind. | +| GET | /rest-api/v1/insight/events | [get rest API v1 insight events](#get-rest-api-v1-insight-events) | GetEvents returns events for a Kubernetes resource by name, namespace, cluster, apiVersion and kind. | +| GET | /rest-api/v1/insight/score | [get rest API v1 insight score](#get-rest-api-v1-insight-score) | ScoreHandler calculates a score for the audited manifest. | +| GET | /rest-api/v1/insight/stats | [get rest API v1 insight stats](#get-rest-api-v1-insight-stats) | Get returns a global statistics info. | +| GET | /rest-api/v1/insight/summary | [get rest API v1 insight summary](#get-rest-api-v1-insight-summary) | Get returns a Kubernetes resource summary by name, namespace, cluster, apiVersion and kind. | +| GET | /rest-api/v1/insight/topology | [get rest API v1 insight topology](#get-rest-api-v1-insight-topology) | GetTopology returns a topology map for a Kubernetes resource by name, namespace, cluster, apiVersion and kind. | + +### resourcegroup + +| Method | URI | Name | Summary | +| ------ | ---------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | ------------------------------------------- | +| GET | /rest-api/v1/resource-groups/{resourceGroupRuleName} | [get rest API v1 resource groups resource group rule name](#get-rest-api-v1-resource-groups-resource-group-rule-name) | List lists all ResourceGroups by rule name. | + +### resourcegrouprule + +| Method | URI | Name | Summary | +| ------ | -------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------ | +| DELETE | /rest-api/v1/resource-group-rule/{resourceGroupRuleName} | [delete rest API v1 resource group rule resource group rule name](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name) | Delete removes a ResourceGroupRule by name. | +| GET | /rest-api/v1/resource-group-rule/{resourceGroupRuleName} | [get rest API v1 resource group rule resource group rule name](#get-rest-api-v1-resource-group-rule-resource-group-rule-name) | Get returns a ResourceGroupRule by name. | +| GET | /rest-api/v1/resource-group-rules | [get rest API v1 resource group rules](#get-rest-api-v1-resource-group-rules) | List lists all ResourceGroupRules. | +| POST | /rest-api/v1/resource-group-rule | [post rest API v1 resource group rule](#post-rest-api-v1-resource-group-rule) | Create creates a ResourceGroupRule. | +| PUT | /rest-api/v1/resource-group-rule | [put rest API v1 resource group rule](#put-rest-api-v1-resource-group-rule) | Update updates the ResourceGroupRule metadata by name. | + +### search + +| Method | URI | Name | Summary | +| ------ | ------------------- | ------------------------------------------------- | ----------------------------------------------------------------------------------------------------- | +| GET | /rest-api/v1/search | [get rest API v1 search](#get-rest-api-v1-search) | SearchForResource returns an array of Kubernetes runtime Object matched using the query from context. | + +## Paths + +### Delete removes a cluster resource by name. (*DeleteRestAPIV1ClusterClusterName*) + +``` +DELETE /rest-api/v1/cluster/{clusterName} +``` + +This endpoint deletes the cluster resource by name. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ----------- | ------ | ------ | -------- | --------- | :------: | ------- | ----------------------- | +| clusterName | `path` | string | `string` | | ✓ | | The name of the cluster | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| --------------------------------------------------- | --------------------- | --------------------- | :---------: | ------------------------------------------------------------- | +| [200](#delete-rest-api-v1-cluster-cluster-name-200) | OK | Operation status | | [schema](#delete-rest-api-v1-cluster-cluster-name-200-schema) | +| [400](#delete-rest-api-v1-cluster-cluster-name-400) | Bad Request | Bad Request | | [schema](#delete-rest-api-v1-cluster-cluster-name-400-schema) | +| [401](#delete-rest-api-v1-cluster-cluster-name-401) | Unauthorized | Unauthorized | | [schema](#delete-rest-api-v1-cluster-cluster-name-401-schema) | +| [404](#delete-rest-api-v1-cluster-cluster-name-404) | Not Found | Not Found | | [schema](#delete-rest-api-v1-cluster-cluster-name-404-schema) | +| [405](#delete-rest-api-v1-cluster-cluster-name-405) | Method Not Allowed | Method Not Allowed | | [schema](#delete-rest-api-v1-cluster-cluster-name-405-schema) | +| [429](#delete-rest-api-v1-cluster-cluster-name-429) | Too Many Requests | Too Many Requests | | [schema](#delete-rest-api-v1-cluster-cluster-name-429-schema) | +| [500](#delete-rest-api-v1-cluster-cluster-name-500) | Internal Server Error | Internal Server Error | | [schema](#delete-rest-api-v1-cluster-cluster-name-500-schema) | + +#### Responses + +##### 200 - Operation status + +Status: OK + +###### Schema + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### Delete removes a ResourceGroupRule by name. (*DeleteRestAPIV1ResourceGroupRuleResourceGroupRuleName*) + +``` +DELETE /rest-api/v1/resource-group-rule/{resourceGroupRuleName} +``` + +This endpoint deletes the ResourceGroupRule by name. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| --------------------- | ------ | ------ | -------- | --------- | :------: | ------- | ----------------------------------- | +| resourceGroupRuleName | `path` | string | `string` | | ✓ | | The name of the resource group rule | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| --------------------------------------------------------------------------- | --------------------- | --------------------- | :---------: | ------------------------------------------------------------------------------------- | +| [200](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-200) | OK | Operation status | | [schema](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-200-schema) | +| [400](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-400) | Bad Request | Bad Request | | [schema](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-400-schema) | +| [401](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-401) | Unauthorized | Unauthorized | | [schema](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-401-schema) | +| [404](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-404) | Not Found | Not Found | | [schema](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-404-schema) | +| [405](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-405) | Method Not Allowed | Method Not Allowed | | [schema](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-405-schema) | +| [429](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-429) | Too Many Requests | Too Many Requests | | [schema](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-429-schema) | +| [500](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-500) | Internal Server Error | Internal Server Error | | [schema](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-500-schema) | + +#### Responses + +##### 200 - Operation status + +Status: OK + +###### Schema + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### List all available endpoints (*GetEndpoints*) + +``` +GET /endpoints +``` + +List all registered endpoints in the router + +#### Consumes + +* text/plain + +#### Produces + +* text/plain + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ------------------------- | ------ | ----------------------------- | :---------: | ----------------------------------- | +| [200](#get-endpoints-200) | OK | Endpoints listed successfully | | [schema](#get-endpoints-200-schema) | + +#### Responses + +##### 200 - Endpoints listed successfully + +Status: OK + +###### Schema + +### Get returns a cluster resource by name. (*GetRestAPIV1ClusterClusterName*) + +``` +GET /rest-api/v1/cluster/{clusterName} +``` + +This endpoint returns a cluster resource by name. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ----------- | ------- | ------ | -------- | --------- | :------: | ------- | -------------------------------------------------- | +| clusterName | `path` | string | `string` | | ✓ | | The name of the cluster | +| format | `query` | string | `string` | | | | The format of the response. Either in json or yaml | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ------------------------------------------------ | --------------------- | --------------------- | :---------: | ---------------------------------------------------------- | +| [200](#get-rest-api-v1-cluster-cluster-name-200) | OK | Unstructured object | | [schema](#get-rest-api-v1-cluster-cluster-name-200-schema) | +| [400](#get-rest-api-v1-cluster-cluster-name-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-cluster-cluster-name-400-schema) | +| [401](#get-rest-api-v1-cluster-cluster-name-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-cluster-cluster-name-401-schema) | +| [404](#get-rest-api-v1-cluster-cluster-name-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-cluster-cluster-name-404-schema) | +| [405](#get-rest-api-v1-cluster-cluster-name-405) | Method Not Allowed | Method Not Allowed | | [schema](#get-rest-api-v1-cluster-cluster-name-405-schema) | +| [429](#get-rest-api-v1-cluster-cluster-name-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-cluster-cluster-name-429-schema) | +| [500](#get-rest-api-v1-cluster-cluster-name-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-cluster-cluster-name-500-schema) | + +#### Responses + +##### 200 - Unstructured object + +Status: OK + +###### Schema + +[UnstructuredUnstructured](#unstructured-unstructured) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### List lists all cluster resources. (*GetRestAPIV1Clusters*) + +``` +GET /rest-api/v1/clusters +``` + +This endpoint lists all cluster resources. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ---------- | ------- | ------- | -------- | --------- | :------: | ------- | -------------------------------------------------------------- | +| descending | `query` | boolean | `bool` | | | | Whether to sort the list in descending order. Default to false | +| orderBy | `query` | string | `string` | | | | The order to list the cluster. Default to order by name | +| summary | `query` | boolean | `bool` | | | | Whether to display summary or not. Default to false | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ------------------------------------ | --------------------- | ----------------------- | :---------: | ---------------------------------------------- | +| [200](#get-rest-api-v1-clusters-200) | OK | List of cluster objects | | [schema](#get-rest-api-v1-clusters-200-schema) | +| [400](#get-rest-api-v1-clusters-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-clusters-400-schema) | +| [401](#get-rest-api-v1-clusters-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-clusters-401-schema) | +| [404](#get-rest-api-v1-clusters-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-clusters-404-schema) | +| [405](#get-rest-api-v1-clusters-405) | Method Not Allowed | Method Not Allowed | | [schema](#get-rest-api-v1-clusters-405-schema) | +| [429](#get-rest-api-v1-clusters-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-clusters-429-schema) | +| [500](#get-rest-api-v1-clusters-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-clusters-500-schema) | + +#### Responses + +##### 200 - List of cluster objects + +Status: OK + +###### Schema + +[][UnstructuredUnstructured](#unstructured-unstructured) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### Audit based on resource group. (*GetRestAPIV1InsightAudit*) + +``` +GET /rest-api/v1/insight/audit +``` + +This endpoint audits based on the specified resource group. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ---------- | ------- | ------- | -------- | --------- | :------: | ------- | ----------------------------------------------------- | +| apiVersion | `query` | string | `string` | | | | The specified apiVersion, such as 'apps/v1' | +| cluster | `query` | string | `string` | | | | The specified cluster name, such as 'example-cluster' | +| forceNew | `query` | boolean | `bool` | | | | Switch for forced scanning, default is 'false' | +| kind | `query` | string | `string` | | | | The specified kind, such as 'Deployment' | +| name | `query` | string | `string` | | | | The specified resource name, such as 'foo' | +| namespace | `query` | string | `string` | | | | The specified namespace, such as 'default' | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ----------------------------------------- | --------------------- | --------------------- | :---------: | --------------------------------------------------- | +| [200](#get-rest-api-v1-insight-audit-200) | OK | Audit results | | [schema](#get-rest-api-v1-insight-audit-200-schema) | +| [400](#get-rest-api-v1-insight-audit-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-insight-audit-400-schema) | +| [401](#get-rest-api-v1-insight-audit-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-insight-audit-401-schema) | +| [404](#get-rest-api-v1-insight-audit-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-insight-audit-404-schema) | +| [429](#get-rest-api-v1-insight-audit-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-insight-audit-429-schema) | +| [500](#get-rest-api-v1-insight-audit-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-insight-audit-500-schema) | + +#### Responses + +##### 200 - Audit results + +Status: OK + +###### Schema + +[ScannerAuditData](#scanner-audit-data) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### GetDetail returns a Kubernetes resource by name, namespace, cluster, apiVersion and kind. (*GetRestAPIV1InsightDetail*) + +``` +GET /rest-api/v1/insight/detail +``` + +This endpoint returns a Kubernetes resource by name, namespace, cluster, apiVersion and kind. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ---------- | ------- | ------ | -------- | --------- | :------: | ------- | ---------------------------------------------------------------------- | +| apiVersion | `query` | string | `string` | | | | The specified apiVersion, such as 'apps/v1'. Should be percent-encoded | +| cluster | `query` | string | `string` | | | | The specified cluster name, such as 'example-cluster' | +| format | `query` | string | `string` | | | | The format of the response. Either in json or yaml. Default to json | +| kind | `query` | string | `string` | | | | The specified kind, such as 'Deployment' | +| name | `query` | string | `string` | | | | The specified resource name, such as 'foo' | +| namespace | `query` | string | `string` | | | | The specified namespace, such as 'default' | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ------------------------------------------ | --------------------- | --------------------- | :---------: | ---------------------------------------------------- | +| [200](#get-rest-api-v1-insight-detail-200) | OK | Unstructured object | | [schema](#get-rest-api-v1-insight-detail-200-schema) | +| [400](#get-rest-api-v1-insight-detail-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-insight-detail-400-schema) | +| [401](#get-rest-api-v1-insight-detail-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-insight-detail-401-schema) | +| [404](#get-rest-api-v1-insight-detail-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-insight-detail-404-schema) | +| [405](#get-rest-api-v1-insight-detail-405) | Method Not Allowed | Method Not Allowed | | [schema](#get-rest-api-v1-insight-detail-405-schema) | +| [429](#get-rest-api-v1-insight-detail-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-insight-detail-429-schema) | +| [500](#get-rest-api-v1-insight-detail-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-insight-detail-500-schema) | + +#### Responses + +##### 200 - Unstructured object + +Status: OK + +###### Schema + +[UnstructuredUnstructured](#unstructured-unstructured) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### GetEvents returns events for a Kubernetes resource by name, namespace, cluster, apiVersion and kind. (*GetRestAPIV1InsightEvents*) + +``` +GET /rest-api/v1/insight/events +``` + +This endpoint returns events for a Kubernetes resource YAML by name, namespace, cluster, apiVersion and kind. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ---------- | ------- | ------ | -------- | --------- | :------: | ------- | ---------------------------------------------------------------------- | +| apiVersion | `query` | string | `string` | | | | The specified apiVersion, such as 'apps/v1'. Should be percent-encoded | +| cluster | `query` | string | `string` | | | | The specified cluster name, such as 'example-cluster' | +| kind | `query` | string | `string` | | | | The specified kind, such as 'Deployment' | +| name | `query` | string | `string` | | | | The specified resource name, such as 'foo' | +| namespace | `query` | string | `string` | | | | The specified namespace, such as 'default' | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ------------------------------------------ | --------------------- | --------------------- | :---------: | ---------------------------------------------------- | +| [200](#get-rest-api-v1-insight-events-200) | OK | List of events | | [schema](#get-rest-api-v1-insight-events-200-schema) | +| [400](#get-rest-api-v1-insight-events-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-insight-events-400-schema) | +| [401](#get-rest-api-v1-insight-events-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-insight-events-401-schema) | +| [404](#get-rest-api-v1-insight-events-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-insight-events-404-schema) | +| [405](#get-rest-api-v1-insight-events-405) | Method Not Allowed | Method Not Allowed | | [schema](#get-rest-api-v1-insight-events-405-schema) | +| [429](#get-rest-api-v1-insight-events-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-insight-events-429-schema) | +| [500](#get-rest-api-v1-insight-events-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-insight-events-500-schema) | + +#### Responses + +##### 200 - List of events + +Status: OK + +###### Schema + +[][UnstructuredUnstructured](#unstructured-unstructured) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### ScoreHandler calculates a score for the audited manifest. (*GetRestAPIV1InsightScore*) + +``` +GET /rest-api/v1/insight/score +``` + +This endpoint calculates a score for the provided manifest based on the number and severity of issues detected during the audit. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ---------- | ------- | ------- | -------- | --------- | :------: | ------- | ----------------------------------------------------- | +| apiVersion | `query` | string | `string` | | | | The specified apiVersion, such as 'apps/v1' | +| cluster | `query` | string | `string` | | | | The specified cluster name, such as 'example-cluster' | +| forceNew | `query` | boolean | `bool` | | | | Switch for forced compute score, default is 'false' | +| kind | `query` | string | `string` | | | | The specified kind, such as 'Deployment' | +| name | `query` | string | `string` | | | | The specified resource name, such as 'foo' | +| namespace | `query` | string | `string` | | | | The specified namespace, such as 'default' | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ----------------------------------------- | --------------------- | ------------------------ | :---------: | --------------------------------------------------- | +| [200](#get-rest-api-v1-insight-score-200) | OK | Score calculation result | | [schema](#get-rest-api-v1-insight-score-200-schema) | +| [400](#get-rest-api-v1-insight-score-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-insight-score-400-schema) | +| [401](#get-rest-api-v1-insight-score-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-insight-score-401-schema) | +| [404](#get-rest-api-v1-insight-score-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-insight-score-404-schema) | +| [429](#get-rest-api-v1-insight-score-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-insight-score-429-schema) | +| [500](#get-rest-api-v1-insight-score-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-insight-score-500-schema) | + +#### Responses + +##### 200 - Score calculation result + +Status: OK + +###### Schema + +[InsightScoreData](#insight-score-data) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### Get returns a global statistics info. (*GetRestAPIV1InsightStats*) + +``` +GET /rest-api/v1/insight/stats +``` + +This endpoint returns a global statistics info. + +#### Produces + +* application/json + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ----------------------------------------- | --------------------- | ---------------------- | :---------: | --------------------------------------------------- | +| [200](#get-rest-api-v1-insight-stats-200) | OK | Global statistics info | | [schema](#get-rest-api-v1-insight-stats-200-schema) | +| [400](#get-rest-api-v1-insight-stats-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-insight-stats-400-schema) | +| [401](#get-rest-api-v1-insight-stats-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-insight-stats-401-schema) | +| [404](#get-rest-api-v1-insight-stats-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-insight-stats-404-schema) | +| [405](#get-rest-api-v1-insight-stats-405) | Method Not Allowed | Method Not Allowed | | [schema](#get-rest-api-v1-insight-stats-405-schema) | +| [429](#get-rest-api-v1-insight-stats-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-insight-stats-429-schema) | +| [500](#get-rest-api-v1-insight-stats-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-insight-stats-500-schema) | + +#### Responses + +##### 200 - Global statistics info + +Status: OK + +###### Schema + +[InsightStatistics](#insight-statistics) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### Get returns a Kubernetes resource summary by name, namespace, cluster, apiVersion and kind. (*GetRestAPIV1InsightSummary*) + +``` +GET /rest-api/v1/insight/summary +``` + +This endpoint returns a Kubernetes resource summary by name, namespace, cluster, apiVersion and kind. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ---------- | ------- | ------ | -------- | --------- | :------: | ------- | ---------------------------------------------------------------------- | +| apiVersion | `query` | string | `string` | | | | The specified apiVersion, such as 'apps/v1'. Should be percent-encoded | +| cluster | `query` | string | `string` | | | | The specified cluster name, such as 'example-cluster' | +| kind | `query` | string | `string` | | | | The specified kind, such as 'Deployment' | +| name | `query` | string | `string` | | | | The specified resource name, such as 'foo' | +| namespace | `query` | string | `string` | | | | The specified namespace, such as 'default' | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ------------------------------------------- | --------------------- | --------------------- | :---------: | ----------------------------------------------------- | +| [200](#get-rest-api-v1-insight-summary-200) | OK | Resource Summary | | [schema](#get-rest-api-v1-insight-summary-200-schema) | +| [400](#get-rest-api-v1-insight-summary-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-insight-summary-400-schema) | +| [401](#get-rest-api-v1-insight-summary-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-insight-summary-401-schema) | +| [404](#get-rest-api-v1-insight-summary-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-insight-summary-404-schema) | +| [405](#get-rest-api-v1-insight-summary-405) | Method Not Allowed | Method Not Allowed | | [schema](#get-rest-api-v1-insight-summary-405-schema) | +| [429](#get-rest-api-v1-insight-summary-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-insight-summary-429-schema) | +| [500](#get-rest-api-v1-insight-summary-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-insight-summary-500-schema) | + +#### Responses + +##### 200 - Resource Summary + +Status: OK + +###### Schema + +[InsightResourceSummary](#insight-resource-summary) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### GetTopology returns a topology map for a Kubernetes resource by name, namespace, cluster, apiVersion and kind. (*GetRestAPIV1InsightTopology*) + +``` +GET /rest-api/v1/insight/topology +``` + +This endpoint returns a topology map for a Kubernetes resource by name, namespace, cluster, apiVersion and kind. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ---------- | ------- | ------- | -------- | --------- | :------: | ------- | ---------------------------------------------------------------------- | +| apiVersion | `query` | string | `string` | | | | The specified apiVersion, such as 'apps/v1'. Should be percent-encoded | +| cluster | `query` | string | `string` | | | | The specified cluster name, such as 'example-cluster' | +| forceNew | `query` | boolean | `bool` | | | | Force re-generating the topology, default is 'false' | +| kind | `query` | string | `string` | | | | The specified kind, such as 'Deployment' | +| name | `query` | string | `string` | | | | The specified resource name, such as 'foo' | +| namespace | `query` | string | `string` | | | | The specified namespace, such as 'default' | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| -------------------------------------------- | --------------------- | -------------------------------------------- | :---------: | ------------------------------------------------------ | +| [200](#get-rest-api-v1-insight-topology-200) | OK | map from string to resource.ResourceTopology | | [schema](#get-rest-api-v1-insight-topology-200-schema) | +| [400](#get-rest-api-v1-insight-topology-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-insight-topology-400-schema) | +| [401](#get-rest-api-v1-insight-topology-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-insight-topology-401-schema) | +| [404](#get-rest-api-v1-insight-topology-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-insight-topology-404-schema) | +| [405](#get-rest-api-v1-insight-topology-405) | Method Not Allowed | Method Not Allowed | | [schema](#get-rest-api-v1-insight-topology-405-schema) | +| [429](#get-rest-api-v1-insight-topology-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-insight-topology-429-schema) | +| [500](#get-rest-api-v1-insight-topology-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-insight-topology-500-schema) | + +#### Responses + +##### 200 - map from string to resource.ResourceTopology + +Status: OK + +###### Schema + +map of [InsightResourceTopology](#insight-resource-topology) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### Get returns a ResourceGroupRule by name. (*GetRestAPIV1ResourceGroupRuleResourceGroupRuleName*) + +``` +GET /rest-api/v1/resource-group-rule/{resourceGroupRuleName} +``` + +This endpoint returns a ResourceGroupRule by name. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| --------------------- | ------ | ------ | -------- | --------- | :------: | ------- | ----------------------------------- | +| resourceGroupRuleName | `path` | string | `string` | | ✓ | | The name of the resource group rule | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ------------------------------------------------------------------------ | --------------------- | --------------------- | :---------: | ---------------------------------------------------------------------------------- | +| [200](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-200) | OK | Unstructured object | | [schema](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-200-schema) | +| [400](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-400-schema) | +| [401](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-401-schema) | +| [404](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-404-schema) | +| [405](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-405) | Method Not Allowed | Method Not Allowed | | [schema](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-405-schema) | +| [429](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-429-schema) | +| [500](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-500-schema) | + +#### Responses + +##### 200 - Unstructured object + +Status: OK + +###### Schema + +[UnstructuredUnstructured](#unstructured-unstructured) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### List lists all ResourceGroupRules. (*GetRestAPIV1ResourceGroupRules*) + +``` +GET /rest-api/v1/resource-group-rules +``` + +This endpoint lists all ResourceGroupRules. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ---------- | ------- | ------- | -------- | --------- | :------: | ------- | ----------------------------------------------------------------- | +| descending | `query` | boolean | `bool` | | | | Whether to sort the list in descending order. Default to false | +| orderBy | `query` | string | `string` | | | | The order to list the resourceGroupRule. Default to order by name | +| summary | `query` | boolean | `bool` | | | | Whether to display summary or not. Default to false | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ------------------------------------------------ | --------------------- | --------------------------------- | :---------: | ---------------------------------------------------------- | +| [200](#get-rest-api-v1-resource-group-rules-200) | OK | List of resourceGroupRule objects | | [schema](#get-rest-api-v1-resource-group-rules-200-schema) | +| [400](#get-rest-api-v1-resource-group-rules-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-resource-group-rules-400-schema) | +| [401](#get-rest-api-v1-resource-group-rules-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-resource-group-rules-401-schema) | +| [404](#get-rest-api-v1-resource-group-rules-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-resource-group-rules-404-schema) | +| [405](#get-rest-api-v1-resource-group-rules-405) | Method Not Allowed | Method Not Allowed | | [schema](#get-rest-api-v1-resource-group-rules-405-schema) | +| [429](#get-rest-api-v1-resource-group-rules-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-resource-group-rules-429-schema) | +| [500](#get-rest-api-v1-resource-group-rules-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-resource-group-rules-500-schema) | + +#### Responses + +##### 200 - List of resourceGroupRule objects + +Status: OK + +###### Schema + +[][UnstructuredUnstructured](#unstructured-unstructured) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### List lists all ResourceGroups by rule name. (*GetRestAPIV1ResourceGroupsResourceGroupRuleName*) + +``` +GET /rest-api/v1/resource-groups/{resourceGroupRuleName} +``` + +This endpoint lists all ResourceGroups. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| --------------------- | ------ | ------ | -------- | --------- | :------: | ------- | ----------------------------------- | +| resourceGroupRuleName | `path` | string | `string` | | ✓ | | The name of the resource group rule | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| -------------------------------------------------------------------- | --------------------- | ----------------------------- | :---------: | ------------------------------------------------------------------------------ | +| [200](#get-rest-api-v1-resource-groups-resource-group-rule-name-200) | OK | List of resourceGroup objects | | [schema](#get-rest-api-v1-resource-groups-resource-group-rule-name-200-schema) | +| [400](#get-rest-api-v1-resource-groups-resource-group-rule-name-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-resource-groups-resource-group-rule-name-400-schema) | +| [401](#get-rest-api-v1-resource-groups-resource-group-rule-name-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-resource-groups-resource-group-rule-name-401-schema) | +| [404](#get-rest-api-v1-resource-groups-resource-group-rule-name-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-resource-groups-resource-group-rule-name-404-schema) | +| [405](#get-rest-api-v1-resource-groups-resource-group-rule-name-405) | Method Not Allowed | Method Not Allowed | | [schema](#get-rest-api-v1-resource-groups-resource-group-rule-name-405-schema) | +| [429](#get-rest-api-v1-resource-groups-resource-group-rule-name-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-resource-groups-resource-group-rule-name-429-schema) | +| [500](#get-rest-api-v1-resource-groups-resource-group-rule-name-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-resource-groups-resource-group-rule-name-500-schema) | + +#### Responses + +##### 200 - List of resourceGroup objects + +Status: OK + +###### Schema + +[][UnstructuredUnstructured](#unstructured-unstructured) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### SearchForResource returns an array of Kubernetes runtime Object matched using the query from context. (*GetRestAPIV1Search*) + +``` +GET /rest-api/v1/search +``` + +This endpoint returns an array of Kubernetes runtime Object matched using the query from context. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| -------- | ------- | ------ | -------- | --------- | :------: | ------- | ------------------------------------------------------ | +| page | `query` | string | `string` | | | | The current page to fetch. Default to 1 | +| pageSize | `query` | string | `string` | | | | The size of the page. Default to 10 | +| pattern | `query` | string | `string` | | ✓ | | The search pattern. Can be either sql or dsl. Required | +| query | `query` | string | `string` | | ✓ | | The query to use for search. Required | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ---------------------------------- | --------------------- | ----------------------- | :---------: | -------------------------------------------- | +| [200](#get-rest-api-v1-search-200) | OK | Array of runtime.Object | | [schema](#get-rest-api-v1-search-200-schema) | +| [400](#get-rest-api-v1-search-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-search-400-schema) | +| [401](#get-rest-api-v1-search-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-search-401-schema) | +| [404](#get-rest-api-v1-search-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-search-404-schema) | +| [405](#get-rest-api-v1-search-405) | Method Not Allowed | Method Not Allowed | | [schema](#get-rest-api-v1-search-405-schema) | +| [429](#get-rest-api-v1-search-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-search-429-schema) | +| [500](#get-rest-api-v1-search-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-search-500-schema) | + +#### Responses + +##### 200 - Array of runtime.Object + +Status: OK + +###### Schema + +[][interface{}](#interface) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### Create creates a cluster resource. (*PostRestAPIV1ClusterClusterName*) + +``` +POST /rest-api/v1/cluster/{clusterName} +``` + +This endpoint creates a new cluster resource using the payload. + +#### Consumes + +* application/json +* text/plain + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ----------- | ------ | ------------------------------------------------- | ------------------------------ | --------- | :------: | ------- | ---------------------------------------------------- | +| clusterName | `path` | string | `string` | | ✓ | | The name of the cluster | +| request | `body` | [ClusterClusterPayload](#cluster-cluster-payload) | `models.ClusterClusterPayload` | | ✓ | | cluster to create (either plain text or JSON format) | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ------------------------------------------------- | --------------------- | --------------------- | :---------: | ----------------------------------------------------------- | +| [200](#post-rest-api-v1-cluster-cluster-name-200) | OK | Unstructured object | | [schema](#post-rest-api-v1-cluster-cluster-name-200-schema) | +| [400](#post-rest-api-v1-cluster-cluster-name-400) | Bad Request | Bad Request | | [schema](#post-rest-api-v1-cluster-cluster-name-400-schema) | +| [401](#post-rest-api-v1-cluster-cluster-name-401) | Unauthorized | Unauthorized | | [schema](#post-rest-api-v1-cluster-cluster-name-401-schema) | +| [404](#post-rest-api-v1-cluster-cluster-name-404) | Not Found | Not Found | | [schema](#post-rest-api-v1-cluster-cluster-name-404-schema) | +| [405](#post-rest-api-v1-cluster-cluster-name-405) | Method Not Allowed | Method Not Allowed | | [schema](#post-rest-api-v1-cluster-cluster-name-405-schema) | +| [429](#post-rest-api-v1-cluster-cluster-name-429) | Too Many Requests | Too Many Requests | | [schema](#post-rest-api-v1-cluster-cluster-name-429-schema) | +| [500](#post-rest-api-v1-cluster-cluster-name-500) | Internal Server Error | Internal Server Error | | [schema](#post-rest-api-v1-cluster-cluster-name-500-schema) | + +#### Responses + +##### 200 - Unstructured object + +Status: OK + +###### Schema + +[UnstructuredUnstructured](#unstructured-unstructured) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### Upload kubeConfig file for cluster (*PostRestAPIV1ClusterConfigFile*) + +``` +POST /rest-api/v1/cluster/config/file +``` + +Uploads a KubeConfig file for cluster, with a maximum size of 2MB. + +#### Consumes + +* multipart/form-data + +#### Produces + +* text/plain + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ----------- | ---------- | ------ | --------------- | --------- | :------: | ------- | ---------------------------------- | +| description | `formData` | string | `string` | | ✓ | | cluster description | +| displayName | `formData` | string | `string` | | ✓ | | cluster display name | +| file | `formData` | file | `io.ReadCloser` | | ✓ | | Upload file with field name 'file' | +| name | `formData` | string | `string` | | ✓ | | cluster name | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ------------------------------------------------ | --------------------- | --------------------------------------------------------- | :---------: | ---------------------------------------------------------- | +| [200](#post-rest-api-v1-cluster-config-file-200) | OK | Returns the content of the uploaded KubeConfig file. | | [schema](#post-rest-api-v1-cluster-config-file-200-schema) | +| [400](#post-rest-api-v1-cluster-config-file-400) | Bad Request | The uploaded file is too large or the request is invalid. | | [schema](#post-rest-api-v1-cluster-config-file-400-schema) | +| [500](#post-rest-api-v1-cluster-config-file-500) | Internal Server Error | Internal server error. | | [schema](#post-rest-api-v1-cluster-config-file-500-schema) | + +#### Responses + +##### 200 - Returns the content of the uploaded KubeConfig file. + +Status: OK + +###### Schema + +[ClusterUploadData](#cluster-upload-data) + +##### 400 - The uploaded file is too large or the request is invalid. + +Status: Bad Request + +###### Schema + +##### 500 - Internal server error. + +Status: Internal Server Error + +###### Schema + +### Validate KubeConfig (*PostRestAPIV1ClusterConfigValidate*) + +``` +POST /rest-api/v1/cluster/config/validate +``` + +Validates the provided KubeConfig using cluster manager methods. + +#### Consumes + +* application/json +* text/plain + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ------- | ------ | --------------------------------------------------- | ------------------------------- | --------- | :------: | ------- | ------------------------------ | +| request | `body` | [ClusterValidatePayload](#cluster-validate-payload) | `models.ClusterValidatePayload` | | ✓ | | KubeConfig payload to validate | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ---------------------------------------------------- | --------------------- | ---------------------------------- | :---------: | -------------------------------------------------------------- | +| [200](#post-rest-api-v1-cluster-config-validate-200) | OK | Verification passed server version | | [schema](#post-rest-api-v1-cluster-config-validate-200-schema) | +| [400](#post-rest-api-v1-cluster-config-validate-400) | Bad Request | Bad Request | | [schema](#post-rest-api-v1-cluster-config-validate-400-schema) | +| [401](#post-rest-api-v1-cluster-config-validate-401) | Unauthorized | Unauthorized | | [schema](#post-rest-api-v1-cluster-config-validate-401-schema) | +| [404](#post-rest-api-v1-cluster-config-validate-404) | Not Found | Not Found | | [schema](#post-rest-api-v1-cluster-config-validate-404-schema) | +| [429](#post-rest-api-v1-cluster-config-validate-429) | Too Many Requests | Too Many Requests | | [schema](#post-rest-api-v1-cluster-config-validate-429-schema) | +| [500](#post-rest-api-v1-cluster-config-validate-500) | Internal Server Error | Internal Server Error | | [schema](#post-rest-api-v1-cluster-config-validate-500-schema) | + +#### Responses + +##### 200 - Verification passed server version + +Status: OK + +###### Schema + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### Create creates a ResourceGroupRule. (*PostRestAPIV1ResourceGroupRule*) + +``` +POST /rest-api/v1/resource-group-rule +``` + +This endpoint creates a new ResourceGroupRule using the payload. + +#### Consumes + +* application/json +* text/plain + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ------- | ------ | ------------------------------------------------------------------------------------------- | -------------------------------------------------- | --------- | :------: | ------- | -------------------------------------------------------------- | +| request | `body` | [ResourcegroupruleResourceGroupRulePayload](#resourcegrouprule-resource-group-rule-payload) | `models.ResourcegroupruleResourceGroupRulePayload` | | ✓ | | resourceGroupRule to create (either plain text or JSON format) | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ------------------------------------------------ | --------------------- | --------------------- | :---------: | ---------------------------------------------------------- | +| [200](#post-rest-api-v1-resource-group-rule-200) | OK | Unstructured object | | [schema](#post-rest-api-v1-resource-group-rule-200-schema) | +| [400](#post-rest-api-v1-resource-group-rule-400) | Bad Request | Bad Request | | [schema](#post-rest-api-v1-resource-group-rule-400-schema) | +| [401](#post-rest-api-v1-resource-group-rule-401) | Unauthorized | Unauthorized | | [schema](#post-rest-api-v1-resource-group-rule-401-schema) | +| [404](#post-rest-api-v1-resource-group-rule-404) | Not Found | Not Found | | [schema](#post-rest-api-v1-resource-group-rule-404-schema) | +| [405](#post-rest-api-v1-resource-group-rule-405) | Method Not Allowed | Method Not Allowed | | [schema](#post-rest-api-v1-resource-group-rule-405-schema) | +| [429](#post-rest-api-v1-resource-group-rule-429) | Too Many Requests | Too Many Requests | | [schema](#post-rest-api-v1-resource-group-rule-429-schema) | +| [500](#post-rest-api-v1-resource-group-rule-500) | Internal Server Error | Internal Server Error | | [schema](#post-rest-api-v1-resource-group-rule-500-schema) | + +#### Responses + +##### 200 - Unstructured object + +Status: OK + +###### Schema + +[UnstructuredUnstructured](#unstructured-unstructured) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### Update updates the cluster metadata by name. (*PutRestAPIV1ClusterClusterName*) + +``` +PUT /rest-api/v1/cluster/{clusterName} +``` + +This endpoint updates the display name and description of an existing cluster resource. + +#### Consumes + +* application/json +* text/plain + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ----------- | ------ | ------------------------------------------------- | ------------------------------ | --------- | :------: | ------- | ---------------------------------------------------- | +| clusterName | `path` | string | `string` | | ✓ | | The name of the cluster | +| request | `body` | [ClusterClusterPayload](#cluster-cluster-payload) | `models.ClusterClusterPayload` | | ✓ | | cluster to update (either plain text or JSON format) | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ------------------------------------------------ | --------------------- | --------------------- | :---------: | ---------------------------------------------------------- | +| [200](#put-rest-api-v1-cluster-cluster-name-200) | OK | Unstructured object | | [schema](#put-rest-api-v1-cluster-cluster-name-200-schema) | +| [400](#put-rest-api-v1-cluster-cluster-name-400) | Bad Request | Bad Request | | [schema](#put-rest-api-v1-cluster-cluster-name-400-schema) | +| [401](#put-rest-api-v1-cluster-cluster-name-401) | Unauthorized | Unauthorized | | [schema](#put-rest-api-v1-cluster-cluster-name-401-schema) | +| [404](#put-rest-api-v1-cluster-cluster-name-404) | Not Found | Not Found | | [schema](#put-rest-api-v1-cluster-cluster-name-404-schema) | +| [405](#put-rest-api-v1-cluster-cluster-name-405) | Method Not Allowed | Method Not Allowed | | [schema](#put-rest-api-v1-cluster-cluster-name-405-schema) | +| [429](#put-rest-api-v1-cluster-cluster-name-429) | Too Many Requests | Too Many Requests | | [schema](#put-rest-api-v1-cluster-cluster-name-429-schema) | +| [500](#put-rest-api-v1-cluster-cluster-name-500) | Internal Server Error | Internal Server Error | | [schema](#put-rest-api-v1-cluster-cluster-name-500-schema) | + +#### Responses + +##### 200 - Unstructured object + +Status: OK + +###### Schema + +[UnstructuredUnstructured](#unstructured-unstructured) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### Update updates the ResourceGroupRule metadata by name. (*PutRestAPIV1ResourceGroupRule*) + +``` +PUT /rest-api/v1/resource-group-rule +``` + +This endpoint updates the display name and description of an existing ResourceGroupRule. + +#### Consumes + +* application/json +* text/plain + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ------- | ------ | ------------------------------------------------------------------------------------------- | -------------------------------------------------- | --------- | :------: | ------- | -------------------------------------------------------------- | +| request | `body` | [ResourcegroupruleResourceGroupRulePayload](#resourcegrouprule-resource-group-rule-payload) | `models.ResourcegroupruleResourceGroupRulePayload` | | ✓ | | resourceGroupRule to update (either plain text or JSON format) | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ----------------------------------------------- | --------------------- | --------------------- | :---------: | --------------------------------------------------------- | +| [200](#put-rest-api-v1-resource-group-rule-200) | OK | Unstructured object | | [schema](#put-rest-api-v1-resource-group-rule-200-schema) | +| [400](#put-rest-api-v1-resource-group-rule-400) | Bad Request | Bad Request | | [schema](#put-rest-api-v1-resource-group-rule-400-schema) | +| [401](#put-rest-api-v1-resource-group-rule-401) | Unauthorized | Unauthorized | | [schema](#put-rest-api-v1-resource-group-rule-401-schema) | +| [404](#put-rest-api-v1-resource-group-rule-404) | Not Found | Not Found | | [schema](#put-rest-api-v1-resource-group-rule-404-schema) | +| [405](#put-rest-api-v1-resource-group-rule-405) | Method Not Allowed | Method Not Allowed | | [schema](#put-rest-api-v1-resource-group-rule-405-schema) | +| [429](#put-rest-api-v1-resource-group-rule-429) | Too Many Requests | Too Many Requests | | [schema](#put-rest-api-v1-resource-group-rule-429-schema) | +| [500](#put-rest-api-v1-resource-group-rule-500) | Internal Server Error | Internal Server Error | | [schema](#put-rest-api-v1-resource-group-rule-500-schema) | + +#### Responses + +##### 200 - Unstructured object + +Status: OK + +###### Schema + +[UnstructuredUnstructured](#unstructured-unstructured) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +## Models + +### cluster.ClusterPayload + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| ----------- | ------ | -------- | :------: | ------- | --------------------------------------------------------------- | ------- | +| description | string | `string` | | | ClusterDescription is the description of cluster to be created | | +| displayName | string | `string` | | | ClusterDisplayName is the display name of cluster to be created | | +| kubeconfig | string | `string` | | | ClusterKubeConfig is the kubeconfig of cluster to be created | | + +### cluster.UploadData + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| ----------------------- | ------- | -------- | :------: | ------- | ----------- | ------- | +| content | string | `string` | | | | | +| fileName | string | `string` | | | | | +| fileSize | integer | `int64` | | | | | +| sanitizedClusterContent | string | `string` | | | | | + +### cluster.ValidatePayload + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| ---------- | ------ | -------- | :------: | ------- | ----------- | ------- | +| kubeConfig | string | `string` | | | | | + +### entity.ResourceGroup + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| ----------- | ------------- | ------------------- | :------: | ------- | ----------- | ------- | +| annotations | map of string | `map[string]string` | | | | | +| apiVersion | string | `string` | | | | | +| cluster | string | `string` | | | | | +| kind | string | `string` | | | | | +| labels | map of string | `map[string]string` | | | | | +| name | string | `string` | | | | | +| namespace | string | `string` | | | | | + +### insight.ResourceSummary + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| ----------------- | --------------------------------------------- | --------------------- | :------: | ------- | ----------- | ------- | +| creationTimestamp | string | `string` | | | | | +| resource | [EntityResourceGroup](#entity-resource-group) | `EntityResourceGroup` | | | | | +| resourceVersion | string | `string` | | | | | +| uid | string | `string` | | | | | + +### insight.ResourceTopology + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| ------------- | --------------------------------------------- | --------------------- | :------: | ------- | ----------- | ------- | +| children | []string | `[]string` | | | | | +| parents | []string | `[]string` | | | | | +| resourceGroup | [EntityResourceGroup](#entity-resource-group) | `EntityResourceGroup` | | | | | + +### insight.ScoreData + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| ------------------------------------------------------------------------- | -------------- | ------------------ | :------: | ------- | ----------------------------------------------------------------------- | ------- | +| issuesTotal | integer | `int64` | | | IssuesTotal is the total count of all issues found during the audit. | | +| This count can be used to understand the overall number of problems | | | | | | | +| that need to be addressed. | | | | | | | +| resourceTotal | integer | `int64` | | | ResourceTotal is the count of unique resources audited during the scan. | | +| score | number | `float64` | | | Score represents the calculated score of the audited manifest based on | | +| the number and severity of issues. It provides a quantitative measure | | | | | | | +| of the security posture of the resources in the manifest. | | | | | | | +| severityStatistic | map of integer | `map[string]int64` | | | SeverityStatistic is a mapping of severity levels to their respective | | +| number of occurrences. It allows for a quick overview of the distribution | | | | | | | +| of issues across different severity categories. | | | | | | | + +### insight.Statistics + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| ---------------------- | ------- | ------- | :------: | ------- | ----------- | ------- | +| clusterCount | integer | `int64` | | | | | +| resourceCount | integer | `int64` | | | | | +| resourceGroupRuleCount | integer | `int64` | | | | | + +### resourcegrouprule.ResourceGroupRulePayload + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| ----------- | -------- | ---------- | :------: | ------- | ----------- | ------- | +| description | string | `string` | | | | | +| fields | []string | `[]string` | | | | | +| name | string | `string` | | | | | + +### scanner.AuditData + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| ------------- | ------------------------------------------- | ---------------------- | :------: | ------- | ----------- | ------- | +| bySeverity | map of integer | `map[string]int64` | | | | | +| issueGroups | [][ScannerIssueGroup](#scanner-issue-group) | `[]*ScannerIssueGroup` | | | | | +| issueTotal | integer | `int64` | | | | | +| resourceTotal | integer | `int64` | | | | | + +### scanner.Issue + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| -------- | ------- | -------- | :------: | ------- | ------------------------------------------------------------------------------------- | ------- | +| message | string | `string` | | | Message provides a detailed human-readable description of the issue. | | +| scanner | string | `string` | | | Scanner is the name of the scanner that discovered the issue. | | +| severity | integer | `int64` | | | Severity indicates how critical the issue is, using the IssueSeverityLevel constants. | | +| title | string | `string` | | | Title is a brief summary of the issue. | | + +### scanner.IssueGroup + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| -------------- | ----------------------------------------------- | ------------------------ | :------: | ------- | ----------- | ------- | +| issue | [ScannerIssue](#scanner-issue) | `ScannerIssue` | | | | | +| resourceGroups | [][EntityResourceGroup](#entity-resource-group) | `[]*EntityResourceGroup` | | | | | + +### unstructured.Unstructured + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| ---------------------- | ------------------------- | ------------- | :------: | ------- | -------------------------------------------------------------------------------- | ------- | +| object | [interface{}](#interface) | `interface{}` | | | Object is a JSON compatible map with string, float, int, bool, []interface{}, or | | +| map[string]interface{} | | | | | | | +| children. | | | | | | | diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/5-references/3-search-methods.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/5-references/3-search-methods.md new file mode 100644 index 00000000..e1e63903 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/5-references/3-search-methods.md @@ -0,0 +1,109 @@ +--- +title: Search Methods +--- +Karpor is an open-source project that offers robust capabilities for searching resources across multiple clusters. This document outlines the two main search methods supported by Karpor: DSL (Domain Specific Language) and SQL (Structured Query Language), and explains how to utilize them for resource searches. + +## Keywords + +Karpor facilitates resource searches using two methods: DSL and SQL. Both methodologies leverage the following keywords for resource discovery: + +- cluster +- apiVersion +- kind +- namespace +- name +- creationTimestamp +- deletionTimestamp +- ownerReferences +- resourceVersion +- labels.`key` +- annotations.`key` +- content + +## SQL + +Karpor offers a SQL-like approach for querying Kubernetes resources, enabling users to employ SQL syntax for their searches. Below are examples illustrating the use of SQL syntax for various search scenarios: + +**Query resources of the Namespace kind** + +```sql +select * from resources where kind='Namespace' +``` + +**Query resources where the labels contain the key 'key1' with value 'value1'** + +```sql +select * from resources where labels.key1='value1' +``` + +**Query resources where the annotations contain the key 'key1' with value 'value1'** + +```sql +select * from resources where annotations.key1='value1' +``` + +**Query resources that are not of the Pod kind** + +```sql +select * from resources where kind!='Pod' +``` + +**Query resources of the Pod kind within a specific cluster** + +```sql +select * from resources where cluster='demo' and kind='Pod' +``` + +**Query resources of kind within a specified list** + +```sql +select * from resources where kind in ('pod','service') +``` + +**Query resources of kinds not within a specified list** + +```sql +select * from resources where kind not in ('pod','service') +``` + +**Query resources where the namespace starts with appl (where % represents any number of characters)** + +```sql +select * from resources where namespace like 'appl%' +``` + +**Query resources where the namespace contains banan (where \_ represents any single character)** + +```sql +select * from resources where namespace like 'banan_' +``` + +**Query resources where the namespace does not start with appl** + +```sql +select * from resources where namespace not like 'appl%' +``` + +**Query resources where the namespace does not contain banan** + +```sql +select * from resources where namespace notlike 'banan_' +``` + +**Query resources of kind Deployment and created before January 1, 2024, at 18:00:00** + +```sql +select * from resources where kind='Deployment' and creationTimestamp < '2024-01-01T18:00:00Z' +``` + +**Query resources of kind Service and order by creation timestamp in descending order** + +```sql +select * from resources where kind='Service' order by creationTimestamp desc +``` + +**Query resources whose content contains apple** + +```sql +select * from resources where contains(content, 'apple') +``` diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/5-references/_category_.json b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/5-references/_category_.json new file mode 100644 index 00000000..1fd07096 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/5-references/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "References" +} diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/6-roadmap/README.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/6-roadmap/README.md new file mode 100644 index 00000000..bd58b0bc --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/version-v0.5/6-roadmap/README.md @@ -0,0 +1,18 @@ +--- +title: 路线图 +--- +Karpor 是一个新兴的开源项目,我们致力于将其打造成一个小而美/厂商中立/开发者友好/社区驱动的开源项目🚀。未来,我们将重点放在以下几个领域: + +- 提升 Karpor 的**可用性**,降低入门门槛,确保其足够“用户友好”。 +- 加强 Karpor 的**可靠性**,确保其在生产环境中可信赖。 +- 深化与更多社区工具的**生态系统整合**,以确保开放性。 +- 探索 **AI + Karpor**,创造更多可能性。 +- 拥抱开源社区:我们热爱**开源精神**,如果你对开源感兴趣,那么从这里开始! +- ...... + +Karpor 遵循 [发布流程与节奏指南](../4-developer-guide/2-conventions/1-release-process.md),但行动可能不会严格遵守路线图。我们可能会根据社区会议的反馈和 [GitHub 问题](https://github.com/KusionStack/karpor/issues) 调整里程碑,期望所有社区成员加入讨论。关于最终决策,请参考 [GitHub 里程碑](https://github.com/KusionStack/karpor/milestones)。 + +以下是详细的路线图,我们将持续更新 ⬇️ + +- **2024 路线图**: [https://github.com/KusionStack/karpor/issues/273](https://github.com/KusionStack/karpor/issues/273) + diff --git a/i18n/zh/docusaurus-plugin-content-docs-operating/current.json b/i18n/zh/docusaurus-plugin-content-docs-operating/current.json new file mode 100644 index 00000000..39507407 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-operating/current.json @@ -0,0 +1,18 @@ +{ + "version.label": { + "message": "v0.5 🚧", + "description": "The label for version current" + }, + "sidebar.kuperator.category.Getting Started": { + "message": "Getting Started", + "description": "The label for category Getting Started in sidebar kuperator" + }, + "sidebar.kuperator.category.Concepts": { + "message": "Concepts", + "description": "The label for category Concepts in sidebar kuperator" + }, + "sidebar.kuperator.category.Manuals": { + "message": "Manuals", + "description": "The label for category Manuals in sidebar kuperator" + } +} diff --git a/i18n/zh/docusaurus-plugin-content-docs-operating/version-v0.3.json b/i18n/zh/docusaurus-plugin-content-docs-operating/version-v0.3.json new file mode 100644 index 00000000..819a0506 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-operating/version-v0.3.json @@ -0,0 +1,18 @@ +{ + "version.label": { + "message": "v0.3", + "description": "The label for version v0.3" + }, + "sidebar.kuperator.category.Getting Started": { + "message": "Getting Started", + "description": "The label for category Getting Started in sidebar kuperator" + }, + "sidebar.kuperator.category.Concepts": { + "message": "Concepts", + "description": "The label for category Concepts in sidebar kuperator" + }, + "sidebar.kuperator.category.Manuals": { + "message": "Manuals", + "description": "The label for category Manuals in sidebar kuperator" + } +} diff --git a/i18n/zh/docusaurus-plugin-content-docs-operating/version-v0.4.json b/i18n/zh/docusaurus-plugin-content-docs-operating/version-v0.4.json new file mode 100644 index 00000000..ea53bd98 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs-operating/version-v0.4.json @@ -0,0 +1,18 @@ +{ + "version.label": { + "message": "v0.4", + "description": "The label for version v0.4" + }, + "sidebar.kuperator.category.Getting Started": { + "message": "Getting Started", + "description": "The label for category Getting Started in sidebar kuperator" + }, + "sidebar.kuperator.category.Concepts": { + "message": "Concepts", + "description": "The label for category Concepts in sidebar kuperator" + }, + "sidebar.kuperator.category.Manuals": { + "message": "Manuals", + "description": "The label for category Manuals in sidebar kuperator" + } +} diff --git a/i18n/zh/docusaurus-theme-classic/footer.json b/i18n/zh/docusaurus-theme-classic/footer.json new file mode 100644 index 00000000..7db819fc --- /dev/null +++ b/i18n/zh/docusaurus-theme-classic/footer.json @@ -0,0 +1,50 @@ +{ + "link.title.Document": { + "message": "Document", + "description": "The title of the footer links column with title=Document in the footer" + }, + "link.title.Resource": { + "message": "Resource", + "description": "The title of the footer links column with title=Resource in the footer" + }, + "link.title.More": { + "message": "More", + "description": "The title of the footer links column with title=More in the footer" + }, + "link.item.label.Kusion": { + "message": "Kusion", + "description": "The label of footer link with label=Kusion linking to /docs" + }, + "link.item.label.ControllerMesh": { + "message": "ControllerMesh", + "description": "The label of footer link with label=ControllerMesh linking to /ctrlmesh/intro/" + }, + "link.item.label.Karpor": { + "message": "Karpor", + "description": "The label of footer link with label=Karpor linking to /karpor/" + }, + "link.item.label.Blog": { + "message": "Blog", + "description": "The label of footer link with label=Blog linking to https://blog.kusionstack.io/" + }, + "link.item.label.Github": { + "message": "Github", + "description": "The label of footer link with label=Github linking to https://github.com/KusionStack" + }, + "link.item.label.Slack": { + "message": "Slack", + "description": "The label of footer link with label=Slack linking to https://join.slack.com/t/kusionstack/shared_invite/zt-19lqcc3a9-_kTNwagaT5qwBE~my5Lnxg" + }, + "link.item.label.KCL": { + "message": "KCL", + "description": "The label of footer link with label=KCL linking to https://kcl-lang.io" + }, + "copyright": { + "message": "Copyright © 2024 KusionStack Authors. The Linux Foundation has registered trademarks and uses trademarks. For a list of trademarks of The Linux Foundation, please see our Trademark Usage page.", + "description": "The footer copyright" + }, + "logo.alt": { + "message": "AntGroup Open Source Logo", + "description": "The alt text of footer logo" + } +} diff --git a/i18n/zh/docusaurus-theme-classic/navbar.json b/i18n/zh/docusaurus-theme-classic/navbar.json new file mode 100644 index 00000000..a31cd796 --- /dev/null +++ b/i18n/zh/docusaurus-theme-classic/navbar.json @@ -0,0 +1,34 @@ +{ + "title": { + "message": "KusionStack", + "description": "The title in the navbar" + }, + "logo.alt": { + "message": "KusionStack Icon", + "description": "The alt text of navbar logo" + }, + "item.label.Kusion": { + "message": "Kusion", + "description": "Navbar item with label Kusion" + }, + "item.label.Operating": { + "message": "Operating", + "description": "Navbar item with label Operating" + }, + "item.label.Ctrlmesh": { + "message": "Ctrlmesh", + "description": "Navbar item with label Ctrlmesh" + }, + "item.label.Karpor": { + "message": "Karpor", + "description": "Navbar item with label Karpor" + }, + "item.label.Community": { + "message": "Community", + "description": "Navbar item with label Community" + }, + "item.label.Blog": { + "message": "Blog", + "description": "Navbar item with label Blog" + } +} diff --git a/karpor_versioned_docs/version-v0.4/1-getting-started/1-overview.mdx b/karpor_versioned_docs/version-v0.4/1-getting-started/1-overview.mdx new file mode 100644 index 00000000..ee7a9a80 --- /dev/null +++ b/karpor_versioned_docs/version-v0.4/1-getting-started/1-overview.mdx @@ -0,0 +1,420 @@ +--- +id: overview +title: Overview +slug: / +--- + +import { + AiOutlineArrowRight, + AiFillCheckCircle, + AiFillCloseCircle, +} from "react-icons/ai"; +import logoImg from "@site/static/karpor/assets/logo/logo-full.png"; +import searchImg from "@site/static/karpor/assets/overview/search.png"; +import insightImg from "@site/static/karpor/assets/overview/insight.png"; +import visionImg from "@site/static/karpor/assets/overview/vision.png"; +import comingSoonImg from "@site/static/karpor/assets/misc/coming-soon.jpeg"; +import KarporButton from "@site/src/components/KarporButton"; +import GithubStar from "@site/src/components/GithubStars"; +import ReactPlayer from "react-player"; +import Typed from "typed.js"; +import clsx from "clsx"; + +export const Feature = ({ imgSrc, title, description, reverse }) => { + const reverseStyle = reverse ? { flexDirection: "row-reverse" } : {}; + return ( + <> +

{title}

+
+
+ +
+
+ {description} +
+
+ + ); +}; + +export const Content = () => { + const karporVsOthers = [ + { + label: "User Interface", + karpor: true, + kubernetesDashboard: true, + labelDesc: "", + }, + { + label: "Multi-Cluster", + karpor: true, + kubernetesDashboard: false, + labelDesc: "Ability to connect to multiple clusters simultaneously", + }, + { + label: "Aggregated Resource View", + karpor: true, + kubernetesDashboard: false, + labelDesc: "Human-friendly view for resources", + }, + { + label: "Security Compliance", + karpor: true, + kubernetesDashboard: false, + labelDesc: "Automatic scanning risk, assessing health score", + }, + { + label: "Resource Relationship Topology", + karpor: true, + kubernetesDashboard: false, + labelDesc: "Insight into the context of resources", + }, + ]; + const h2Style = { + paddingBottom: "14px", + borderBottom: "2px solid #f1f1f1", + fontSize: 28, + }; + const flexDirectionStyle = { + display: "flex", + flexDirection: "column", + alignItems: "center", + }; + // Setup typed animation + const el = React.useRef(null); + React.useEffect(() => { + const typed = new Typed(el.current, { + strings: [ + "Locate resources, for Developers.", + "Explore cluster insights, for Admins.", + "Connect multi-clusters, for Platforms.", + ], + typeSpeed: 40, + backDelay: 1500, + loop: true, + }); + return () => { + // Destroy Typed instance during cleanup to stop animation + typed.destroy(); + }; + }, []); + return ( + <> +
+
+ +
+
+ + +
+
+ Intelligence for Kubernetes ✨ +
+
+ +
+
+
+
+
+

📖 What is Karpor?

+
+ Karpor is Intelligence for Kubernetes. It brings advanced{" "} + 🔍 Search, 💡 Insight and ✨ AI to Kubernetes. It is + essentially a Kubernetes Visualization Tool. With Karpor, you can gain crucial + visibility into your Kubernetes clusters across any clouds. +
+
+ We hope to become a{" "} + + small and beautiful, vendor-neutral, developer-friendly, + community-driven + {" "} + open-source project! 🚀 +
+
+
+
+ +
+
+
+
+

💡 Why Karpor?

+
+ + ⚡️ Automatic Syncing +
+ Automatically synchronize your resources across any clusters + managed by the multi-cloud platform. +
+
+ 🔍 Powerful, flexible queries +
+ Effectively retrieve and locate resources across multi clusters + that you are looking for in a quick and easy way. + + } + /> +
+ + 🔒 Compliance Governance +
+ Understand your compliance status across multiple clusters and + compliance standards. +
+
+ 📊 Resource Topology +
+ Logical and topological views of relevant resources within their + operational context. +
+
+ 📉 Cost Optimization +
+ Coming soon. + + } + /> +
+ + 💬 Natural Language Operations +
+ Interact with Kubernetes using plain language for more + intuitive operations. +
+
+ 📦 Contextual AI Responses +
+ Get smart, contextual assistance that understands your needs. +
+
+ 🤖 AIOps for Kubernetes +
+ Automate and optimize Kubernetes management with AI-powered + insights. + + } + /> +
+
+
+
+
+

🌈 Our Vision

+
+ The increasing complexity of the kubernetes ecosystem is an + undeniable trend that is becoming more and more difficult to + manage. This complexity not only entails a heavier burden on + operations and maintenance but also slows down the adoption of + new technologies by users, limiting their ability to fully + leverage the potential of kubernetes. +
+
+ We wish Karpor to focus on 🔍 search, 📊 insights, + and ✨ AI, to break through the increasingly complex maze of + kubernetes, achieving the following value proposition: +
+
+
+ +
+
+
+

🙌 Karpor vs. Kubernetes Dashboard

+
+ + {karporVsOthers?.map((item) => { + return ( +
+
+
{item?.label}
+ {item?.labelDesc && ( +
{item?.labelDesc}
+ )} +
+
+ {item?.karpor ? ( + + ) : ( + + )} +
+
+ {item?.kubernetesDashboard ? ( + + ) : ( + + )} +
+
+ ); + })} +
+

🎖️ Open Source Contributors

+
+

Thanks all! 🍻

+

+ Follow{" "} + Contributing Guide, + come and join us! 👇 +

+ +
+

👉 Next Step

+
+ +
+ + ); +}; + + + diff --git a/karpor_versioned_docs/version-v0.4/1-getting-started/2-installation.md b/karpor_versioned_docs/version-v0.4/1-getting-started/2-installation.md new file mode 100644 index 00000000..432c7392 --- /dev/null +++ b/karpor_versioned_docs/version-v0.4/1-getting-started/2-installation.md @@ -0,0 +1,48 @@ +--- +title: Installation +--- +## Prerequisites + +* Ensure there is Kubernetes cluster available to install Karpor. For local installations, you can use Minikube or Kind. + +## Install with helm + +Karpor can be installed easily with helm v3.5+, which is a simple command-line tool and you can get it from [here](https://helm.sh/docs/intro/install/). + +```shell +helm repo add kusionstack https://kusionstack.github.io/charts +helm repo update +helm install karpor kusionstack/karpor +``` + +![Install](./assets/2-installation/install.gif) + +## Upgrade with helm + +```shell +helm repo add kusionstack https://kusionstack.github.io/charts +helm repo update + +# Upgrade to the latest version. +helm upgrade karpor kusionstack/karpor + +# Upgrade to the specified version. +helm upgrade karpor kusionstack/karpor --version 1.2.3 +``` + +## Install/Upgrade locally with helm + +If you have problem connecting to [https://kusionstack.github.io/charts/](https://kusionstack.github.io/charts/) in production, you may need to manually download the chart from [here](https://github.com/KusionStack/charts) and use it to install or upgrade locally. + +```shell +git clone https://github.com/KusionStack/charts.git +helm install/upgrade karpor charts/karpor +``` + +## Uninstall + +To uninstall karpor: + +```shell +helm uninstall karpor +``` diff --git a/karpor_versioned_docs/version-v0.4/1-getting-started/3-quick-start.md b/karpor_versioned_docs/version-v0.4/1-getting-started/3-quick-start.md new file mode 100644 index 00000000..7b7a9b97 --- /dev/null +++ b/karpor_versioned_docs/version-v0.4/1-getting-started/3-quick-start.md @@ -0,0 +1,107 @@ +--- +title: Quick Start +--- +## Prerequisites + +* Ensure [kubectl](https://kubernetes.io/docs/tasks/tools/) is installed. +* Ensure [helm](https://helm.sh/docs/intro/install/) is installed. +* If you do not have a ready-made cluster, you still need a [kind](https://kind.sigs.k8s.io/docs/user/quick-start/#installation/). + +## Create Cluster (Optional) + +First, if you do not have a ready-made cluster, you need to create a kubernetes cluster in your local environment with the `kind` tool. Follow these steps: + +1. Create a cluster. You can create a cluster named `demo-cluster` using the following command: + ```shell + kind create cluster --name demo-cluster + ``` + + This will create a new Kubernetes cluster in your local Docker environment. Wait for a moment until the cluster creation is complete. +2. Verify that the cluster is running properly by executing the command: + ```shell + kubectl cluster-info + ``` + + If everything is set up correctly, you'll see information about your Kubernetes cluster. + +## Installation + +To install Karpor, execute the following command in your terminal: + +```shell +helm repo add kusionstack https://kusionstack.github.io/charts +helm repo update +helm install karpor kusionstack/karpor +``` + +For more installation details, please refer to the [Installation Documentation](2-installation.md). + +![Install](./assets/2-installation/install.gif) + +## Access Karpor Dashboard + +1. Run the following command to forward the Karpor server port: + ```shell + kubectl -n karpor port-forward service/karpor-server 7443:7443 + ``` + + This will create a port forward from your local machine to the Karpor server. +2. Open your browser and enter the following URL: + ```shell + https://127.0.0.1:7443 + ``` + +This will take you to the karpor dashboard. 👇 + +![Open in Browser](./assets/2-installation/open-in-browser.gif) + +Congratulations! 🎉 You have successfully installed Karpor. Now you can start using Karpor for multi-cluster search and insights. + +## Register Cluster + +To register a new cluster with Karpor, follow these steps: + +1. Navigate to the `Cluster Management` section in the Karpor UI. +2. Click on the `Register Cluster` button. +3. Follow the on-screen instructions to complete the registration process. + +An example of the registration button can be found in the image below: + +![](/karpor/assets/cluster-mng/cluster-mng-register-new-cluster.png) + +For a more detailed explanation of the registration process, refer to the [Multi-cluster management](../3-user-guide/1-multi-cluster-management.md) Documentation. + +## Search Resources + +Karpor provides a powerful search feature that allows you to quickly find resources across the registered clusters. To use this feature: + +1. Go to the `Search` page within the Karpor UI. +2. Enter the search criteria for the resources you are looking for. + +Here is an example of the `Search` page: + +![](/karpor/assets/search/search-auto-complete.png) +![](/karpor/assets/search/search-result.png) + +To learn more about the search capabilities and how to use them effectively, check out the [Search Methods Documentation](../5-references/3-search-methods.md). + +## Gain Insight into Resources + +By clicking on a result from your search, you can delve into the `Insight` page, where you'll be able to investigate risks related to the resource, see a topological view with its relevant resources, and examine its detailed information. + +Here are examples for what you can find on the Insight page: + +![](/karpor/assets/insight/insight-home.png) +![](/karpor/assets/insight/insight-single-issue.png) +![](/karpor/assets/insight/insight-topology.png) + +## Conclusion + +Please note that this guide only provides a quick start for Karpor, and you may need to refer to additional documentations and resources to configure and use other features. + +If you have any questions or concerns, check out the official documentation of Karpor or seek relevant support. + +## Next Step + +- Learn Karpor's [Architecture](../concepts/architecture) and [Glossary](../concepts/glossary). +- View [User Guide](../user-guide/multi-cluster-management) to look on more of what you can achieve with Karpor. diff --git a/karpor_versioned_docs/version-v0.4/1-getting-started/_category_.json b/karpor_versioned_docs/version-v0.4/1-getting-started/_category_.json new file mode 100644 index 00000000..41f4c00e --- /dev/null +++ b/karpor_versioned_docs/version-v0.4/1-getting-started/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Getting Started" +} diff --git a/karpor_versioned_docs/version-v0.4/1-getting-started/assets/2-installation/install.gif b/karpor_versioned_docs/version-v0.4/1-getting-started/assets/2-installation/install.gif new file mode 100644 index 00000000..68889793 Binary files /dev/null and b/karpor_versioned_docs/version-v0.4/1-getting-started/assets/2-installation/install.gif differ diff --git a/karpor_versioned_docs/version-v0.4/1-getting-started/assets/2-installation/open-in-browser.gif b/karpor_versioned_docs/version-v0.4/1-getting-started/assets/2-installation/open-in-browser.gif new file mode 100644 index 00000000..00adfb18 Binary files /dev/null and b/karpor_versioned_docs/version-v0.4/1-getting-started/assets/2-installation/open-in-browser.gif differ diff --git a/karpor_versioned_docs/version-v0.4/2-concepts/1-architecture.md b/karpor_versioned_docs/version-v0.4/2-concepts/1-architecture.md new file mode 100644 index 00000000..c53e8491 --- /dev/null +++ b/karpor_versioned_docs/version-v0.4/2-concepts/1-architecture.md @@ -0,0 +1,24 @@ +--- +title: Architecture +--- +![](assets/1-architecture/architecture.png) + +## Components + +- `Dashboard`: Web UI for Karpor. +- `Server`: Main Backend Server for Karpor. +- `Syncer`: Independent Server to synchronize cluster resources in real-time. +- `Storage`: Storage Backend to store the synchronized resources and user data. + +## How Karpor Works + +1. After installation, users can register clusters of interest into Karpor. +2. The Syncer runs and automatically synchronizes the resources of interest from the cluster to Storage. It also ensures the real-time changes to the resources are automatically sync-ed to Karpor Storage. +3. When a user wishes to locate specific resource(s), a search query can be typed into the search box in the Dashboard. The Dashboard interacts with the search endpoint of the Server. The search module within the Server parses the search query, searches for relevant resources in Storage, and returns the results to the Dashboard. +4. Upon clicking a search result, the user is directed to a resource insight page. The Dashboard calls the insight endpoint of the Server, where the Server's insight module performs a static scan of the resource, generates issue reports, and locates its relevant resources to draw a resource topology map with all of its parents and children. +5. The insight page also applies to groups of resources, such as all resources in a cluster, a Group-Version-Kind combination, a namespace or a custom-defined resource group. + +## Next Step + +- Learn Karpor's [Glossary](../concepts/glossary). +- View [User Guide](../user-guide/multi-cluster-management) to look on more of what you can achieve with Karpor. diff --git a/karpor_versioned_docs/version-v0.4/2-concepts/3-glossary.md b/karpor_versioned_docs/version-v0.4/2-concepts/3-glossary.md new file mode 100644 index 00000000..d7c52cef --- /dev/null +++ b/karpor_versioned_docs/version-v0.4/2-concepts/3-glossary.md @@ -0,0 +1,49 @@ +--- +title: Glossary +--- +## Cluster + +Equivalent to the concept of a cluster in `Kubernetes`, such as a cluster named `democluster`. + +`Karpor` can manage multiple clusters, including cluster registration, certificate rotation, generating and viewing insights, and other operations through a Dashboard. It also supports accessing any managed cluster using a unified certificate issued by `Karpor` through command-line tools such as `kubectl` and `kubectx`. + +For more details, please refer to the best practice: [One Pass with Proxy](../3-user-guide/4-best-production-practices/1-one-pass-with-proxy.md). + +## Resource + +Equivalent to the resource concept in `Kubernetes`, such as a `Deployment` named `mockDeployment`. + +`Karpor` performs real-time synchronization, search, and insights on resources within the managed clusters. A resource is the object with the smallest granularity for searching and insights in `Karpor`. + +## Resource Group + +**A resource group is a logical organizational structure** used to combine related `Kubernetes` resources for a more intuitive view, search, and insight experience. For example, an `Application` named `mockapp` resource group can be created to includes a `Namespace`, a `Deployment`, and multiple `Pods`, all with a specific label such as `app.kubernetes.io/name: mockapp`. + +## Resource Group Rule + +**A resource group rule is a set of conditions** that groups specific resources into appropriate resource groups. These rules aim to organize resources into logical units based on properties such as `annotations`, `labels`, `namespace`, and so on. For example, to define an Application resource group rule, you can specify the `app.kubernetes.io/name` annotation as a grouping condition. + +`Karpor` has a preset resource group rule - `Namespace` - as well as custom resource group rules. + +![](assets/3-glossary/image-20240326171327110.png) + +## Topology + +In `Karpor`, the topology refers to the **relations and dependencies between relevant resources within a given resource group**. Viewing and understanding the interior structure of a resource group is made easier with a visual topology diagram, which is helpful for troubleshooting and locating issues. + +## Audit + +Audit refers to **performing a compliance scan on all resources within a given resource group**. The goal is to help users discover potential risks. The scanning tools and rules used for the audit are currently built into the system, but we will support customization in the future. + +## Issue + +**The output of the audit is referred to as issues**. If there are no problems with the scanned object, the audit results will be empty. Otherwise, all identified risks will be categorized by their risk level and displayed, including descriptions of each risk, associated resources, etc., guiding users to fix the issues, ensure the security and compliance of the cluster resources. + +## Score + +The score is used to reflect the **overall health status of a resource group or a resource**, reminding users to take timely adjustments and measures. The health score is calculated based on the resource group's audit results. The factors that impact the score include: **risk level**, **number of risks**, and **total number of resources**. + +## Next Step + +- Learn Karpor's [Architecture](../concepts/architecture). +- View [User Guide](../user-guide/multi-cluster-management) to look on more of what you can achieve with Karpor. diff --git a/karpor_versioned_docs/version-v0.4/2-concepts/_category_.json b/karpor_versioned_docs/version-v0.4/2-concepts/_category_.json new file mode 100644 index 00000000..bccddbf1 --- /dev/null +++ b/karpor_versioned_docs/version-v0.4/2-concepts/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Concepts" +} diff --git a/karpor_versioned_docs/version-v0.4/2-concepts/assets/1-architecture/architecture.png b/karpor_versioned_docs/version-v0.4/2-concepts/assets/1-architecture/architecture.png new file mode 100644 index 00000000..afec9346 Binary files /dev/null and b/karpor_versioned_docs/version-v0.4/2-concepts/assets/1-architecture/architecture.png differ diff --git a/karpor_versioned_docs/version-v0.4/2-concepts/assets/3-glossary/image-20240326171327110.png b/karpor_versioned_docs/version-v0.4/2-concepts/assets/3-glossary/image-20240326171327110.png new file mode 100644 index 00000000..f5673eb8 Binary files /dev/null and b/karpor_versioned_docs/version-v0.4/2-concepts/assets/3-glossary/image-20240326171327110.png differ diff --git a/karpor_versioned_docs/version-v0.4/3-user-guide/1-multi-cluster-management.md b/karpor_versioned_docs/version-v0.4/3-user-guide/1-multi-cluster-management.md new file mode 100644 index 00000000..d7c5cc9a --- /dev/null +++ b/karpor_versioned_docs/version-v0.4/3-user-guide/1-multi-cluster-management.md @@ -0,0 +1,33 @@ +--- +title: Multi-Cluster Management +--- +Multi-cluster management is the entrance to register clusters into Karpor, enabling search and insight capabilities across a large number of clusters. + +## Register Cluster + +1. Click the Cluster Management Tab. +2. Click the Register Cluster button. + ![](/karpor/assets/cluster-mng/cluster-mng-empty.png) +3. Add the cluster name. The cluster name must be unique and CANNOT be altered once created. +4. Upload the cluster's KubeConfig file. One with read permission is sufficient. +5. Click the Verify and Submit button. + ![](/karpor/assets/cluster-mng/cluster-mng-register-new-cluster.png) +6. Once verified, the cluster will be added under the Cluster Management page + ![](/karpor/assets/cluster-mng/cluster-mng-register-success.png) + +## Edit Cluster + +The Edit button allows for modifications to the Display Name and Description, thus altering how the cluster's name and description appear on the Dashboard. +![](/karpor/assets/cluster-mng/cluster-mng-edit-cluster.png) + +## Rotate Certificate + +When the KubeConfig expires, you can update the certificate by clicking Rotate Certificate. +![](/karpor/assets/cluster-mng/cluster-mng-rotate-cluster-1.png) +![](/karpor/assets/cluster-mng/cluster-mng-rotate-cluster-2.png) +![](/karpor/assets/cluster-mng/cluster-mng-rotate-cluster-3.png) + +## Remove Cluster + +The delete button facilitates the removal of a registered cluster. +![](/karpor/assets/cluster-mng/cluster-mng-delete-cluster.png) diff --git a/karpor_versioned_docs/version-v0.4/3-user-guide/2-search.md b/karpor_versioned_docs/version-v0.4/3-user-guide/2-search.md new file mode 100644 index 00000000..f72693de --- /dev/null +++ b/karpor_versioned_docs/version-v0.4/3-user-guide/2-search.md @@ -0,0 +1,34 @@ +--- +title: How to Search +--- +Within this section, we will explore how to perform multi-cluster resource searches using Karpor, with this guide being done entirely through the Dashboard. + +We support three methods of search: + +- **Search by SQL**: Perform resource searches using SQL query language. +- **Search by DSL**: Conduct resource searches through `Karpor`'s Domain Specific Language (DSL). +- **Search by Natural Language**: Using natural language for resource search. + +## Search by SQL + +Karpor offers a nifty SQL query feature that allows you to search and filter all Kubernetes resources within managed clusters using familiar SQL syntax and provides targeted optimizations and enhancements for multi-cluster resource searches. + +SQL is one of the easily accessible skills for practitioners in the software engineering industry, theoretically making the learning curve quite low. As such, this search method is prepared for you! It is particularly well-suited for beginners to Karpor. + +Below are the steps to use Search by SQL: + +1. **Enter the Search page**: We designed the homepage as the entry point for search, so opening `Karpor`'s Web UI immediately presents you with the search page. + ![](/karpor/assets/search/search-home.png) +2. **Compose SQL query statements**: Write your query statement using SQL syntax, specifying the cluster name, resource type, conditions, and filters you wish to search for. Additionally, if you enter a keyword and press a space, the search box will pop up with a dropdown with auto-completion, suggesting possible keywords you can type next. + ![](/karpor/assets/search/search-auto-complete.png) +3. **Execute the query**: Click the 'search' button to execute the query and be sent to the search results page. Karpor will return a list of resources that match the SQL query. + ![](/karpor/assets/search/search-result.png) +4. **Advanced features**: Utilize our built-in advanced SQL syntax, such as sorting, full-text search, etc., to refine your search further. For details, please refer to: [Search Methodology Documentation](../5-references/3-search-methods.md). + +## Search by DSL + +Coming soon. 🚧 + +## Search by Natural Language + +Coming soon. 🚧 diff --git a/karpor_versioned_docs/version-v0.4/3-user-guide/3-insight/1-inspecting-any-resource-group-and-resource.md b/karpor_versioned_docs/version-v0.4/3-user-guide/3-insight/1-inspecting-any-resource-group-and-resource.md new file mode 100644 index 00000000..6632f8e9 --- /dev/null +++ b/karpor_versioned_docs/version-v0.4/3-user-guide/3-insight/1-inspecting-any-resource-group-and-resource.md @@ -0,0 +1,27 @@ +--- +title: Inspecting Any Resource Group and Resource +--- +In this part, we will explain in detail through clear steps and examples how to use Karpor to inspect any resource group or resource. + +If you are not familiar with relevant concepts, you can refer to the [Glossary](../../2-concepts/3-glossary.md) section. + +## Inspecting Specific Resources + +1. Search for the resource you are interested in: + ![](/karpor/assets/search/search-home.png) +2. On the search results page, all resources filtered by the criteria will be listed: + ![](/karpor/assets/search/search-result.png) +3. Click on any resource name to jump to that resource's insight page: + ![](/karpor/assets/insight/insight-home.png) + +## Inspecting Specific Resource Groups + +You may notice that in each search result entry, tags for `Cluster`, `Kind`, `Namespace`, etc., of the resource are listed. Please note that these tags are **hyperlinks**, which we refer to as "**anchor points**". These represent the links to a particular resource group or a resource. By clicking on these **anchor points**, you can quickly jump to the insight page of that resource group or resource. + +![](/karpor/assets/search/search-result.png) + +## Flexible Switching Between Resource Groups/Resources + +In fact, besides the tags in the mentioned search results, any resource/resource group names you see on any page can be re-directed to as **anchor points**, which serve like space-time wormholes, allowing you to traverse back and forth through any dimension until you find the resources you are searching for. Both search and anchor points are means to expedite the retrieval, which are key features of Karpor as a Kubernetes Explorer. + +![](/karpor/assets/insight/insight-breadcrumbs.png) diff --git a/karpor_versioned_docs/version-v0.4/3-user-guide/3-insight/2-custom-resource-group.md b/karpor_versioned_docs/version-v0.4/3-user-guide/3-insight/2-custom-resource-group.md new file mode 100644 index 00000000..2f22fb79 --- /dev/null +++ b/karpor_versioned_docs/version-v0.4/3-user-guide/3-insight/2-custom-resource-group.md @@ -0,0 +1,92 @@ +--- +title: Custom Resource Group +--- +## Creating Custom Resource Group + +This section will focus on how to create custom resource group within Karpor. Through custom resource group, you can flexibly manage and organize resources in Karpor according to your own needs and logical concepts. We will guide you step by step to create and define custom resource group and show you how to use these groups for resource insight and management. + +If you're not familiar with **Resource Group** and **Resource Group Rule** related concepts, you can refer to the [Glossary](../../2-concepts/3-glossary.md) section. + +**Let's assume** that within your organization or company, there is a concept of `application unit` that represent **all resources of an application in a certain environment**. + +We mark the **name and environment of the application in the label**. For example, the following is the `application unit` of `mock-apple` in the `production environment`: + +```yaml +apiVersion: v1 +kind: Namespace +metadata: + labels: + app.kubernetes.io/name: mock-apple + name: mock-apple +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/environment: prod + app.kubernetes.io/name: mock-apple +spec: + replicas: 3 + selector: + matchLabels: + app.kubernetes.io/environment: prod + app.kubernetes.io/name: mock-apple + template: + metadata: + labels: + app.kubernetes.io/environment: prod + app.kubernetes.io/name: mock-apple + fruit: apple + spec: + containers: + - image: nginx:latest + name: mock-container + dnsPolicy: ClusterFirst + restartPolicy: Always +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/environment: prod + app.kubernetes.io/name: mock-apple + name: mock-service-apple-prod + namespace: mock-apple +spec: + ports: + - port: 80 + protocol: TCP + targetPort: 80 + selector: + app.kubernetes.io/environment: prod + app.kubernetes.io/name: mock-apple + type: ClusterIP +``` + +Now, we will create a custom `resource group rule` called the `application unit` by following the steps below. It will classify all resources in the cluster according to the rules specified by the user and list all `resource groups` that comply with the rules. + +1. Click on the Insight tab to enter the insight homepage. +2. At the bottom of the page, you will see a default resource group rule `namespace`, which is a single rule classified by a namespace. + ![](/karpor/assets/insight/insight-homepage.png) +3. Click on the create button + of the resource group and fill in the **basic information and classification rules** of the `application unit` in the pop-up window. + ![](/karpor/assets/insight/insight-create-app-resource-group-rule.png) +4. Click on the Submit button, then click on the newly appearing application unit tab to list all application units. + ![](/karpor/assets/insight/insight-list-app-resource-groups.png) +5. You can enter keywords in the search box to quickly find the `application unit` of `mock-apple` in `production`. + ![](/karpor/assets/insight/insight-search-app-resource-group.png) +6. You can click the View button on a resource group card to jump to the corresponding `resource group insight page` and view aggregated information such as all resources, topology relationships, compliance reports, etc. of a certain application unit. +7. If necessary, you can also use the same steps to create an `environment resource group`. + ![](/karpor/assets/insight/insight-create-env-resource-group-rule.png) + ![](/karpor/assets/insight/insight-list-env-resource-groups.png) + +## Edit Custom Resource Group + +You can click the button on the right side of the custom resource group tab to modify basic information and classification rules in the pop-up window. + +![](/karpor/assets/insight/insight-edit-env-resource-group.png) + +## Delete Custom Resource Group + +You can click the button on the right side of the custom resource group tab, then click on the Delete to delete current resource group rule in the pop-up window. + +![](/karpor/assets/insight/insight-delete-env-resource-group.png) diff --git a/karpor_versioned_docs/version-v0.4/3-user-guide/3-insight/3-summary.md b/karpor_versioned_docs/version-v0.4/3-user-guide/3-insight/3-summary.md new file mode 100644 index 00000000..1f3970f7 --- /dev/null +++ b/karpor_versioned_docs/version-v0.4/3-user-guide/3-insight/3-summary.md @@ -0,0 +1,22 @@ +--- +title: Summary +--- +In this section, we will learn about the `summary card` on the Karpor insight page, which are used to quickly view and understand key metrics for the current resource group or resource. + +Under different resource groups, the content displayed by the `summary card` may also vary. + +If you are on: + +1. **Resource Group Insight Page**: + 1. **Cluster Insight Page**, the summary card shows the **Node, Pod numbers, CPU, memory capacity, and Kubernetes version of the cluster**. + ![](/karpor/assets/insight/insight-summary-cluster.png) + 2. **Resource Kind Insight Page**, the summary card shows the **affiliated cluster, GVK (Group Version Kind) information, and the number of that type of resource under the current cluster**. + ![](/karpor/assets/insight/insight-summary-kind.png) + 3. **Namespace Insight Page**, the summary card shows the **affiliated cluster, namespace, and the most abundant resource types under the current namespace.** + ![](/karpor/assets/insight/insight-summary-namespace.png) + 4. **Custom Resource Group Insight Page**, the summary card shows the **key-value of each rule, and several resource statistics under the current resource group.** + ![](/karpor/assets/insight/insight-summary-custom-resource-group.png) +2. **Resource Insight Page**, the summary card shows the **current resource's name, GVK information, affiliated cluster, and namespace.** + ![](/karpor/assets/insight/insight-summary-resource.png) + +⚠️ **Attention**: No matter which resource group insight page you are on, the summary card will always display a health score, calculated based on the risk compliance status of the subject. diff --git a/karpor_versioned_docs/version-v0.4/3-user-guide/3-insight/4-compliance-report.md b/karpor_versioned_docs/version-v0.4/3-user-guide/3-insight/4-compliance-report.md new file mode 100644 index 00000000..1c714804 --- /dev/null +++ b/karpor_versioned_docs/version-v0.4/3-user-guide/3-insight/4-compliance-report.md @@ -0,0 +1,16 @@ +--- +title: Compliance Report +--- +This section will introduce the compliance scan feature, primarily used to detect and assess whether all resources in the current resource or resource group comply with specific compliance standards and security policies. In this section, you will understand how to effectively utilize the compliance scan feature to ensure the security and compliance of the cluster and resources. + +If you're not familiar with **Compliance Report** or **Risk** related concepts, you can refer to the [Glossary](../../2-concepts/3-glossary.md) section. + +1. Follow the guidance on [Inspecting Any Resource Group and Resource](#inspecting-any-resource-group-and-resource) and resource to navigate to the insights page of a particular resource group/resource. +2. You can see the **Compliance Report** card of the resource. + ![](/karpor/assets/insight/insight-home.png) +3. This card displays the **Risk** identified during the scan of the current resource or all the resources under the resource group, categorized by risk level. Under each risk level tag, risks are sorted from highest to lowest occurrence. Each risk entry shows the title, description, number of occurrences, and the scanning tool that discovered the issue. +4. Clicking on a specific risk will display a popup with the details of the risk. + ![](/karpor/assets/insight/insight-single-issue.png) +5. Click on View All Risks, and a drawer will pop out listing all the risks. Here, you can search, categorize, paginate, etc + ![](/karpor/assets/insight/insight-all-issues.png) +6. Once you have resolved a risk following its indications, you can click the [Rescan] button, which will trigger a comprehensive compliance scan of all resources under the resource group. The Dashboard will display the new results once the scan is completed. diff --git a/karpor_versioned_docs/version-v0.4/3-user-guide/3-insight/5-topology.md b/karpor_versioned_docs/version-v0.4/3-user-guide/3-insight/5-topology.md new file mode 100644 index 00000000..73a4ef79 --- /dev/null +++ b/karpor_versioned_docs/version-v0.4/3-user-guide/3-insight/5-topology.md @@ -0,0 +1,19 @@ +--- +title: Topology +--- +## Topology + +In this section, we will explore the topology feature in Karpor. The topology view will help you more intuitively understand the relationships and dependencies among various resources in your cluster. Here's how to use the topology view. + +1. Follow the guidance on [Inspecting Any Resource Group and Resource](#inspecting-any-resource-group-and-resource) to navigate to the insights page of a particular resource group/resource. +2. At the bottom of the page, you can see the resource topology map. + ![](/karpor/assets/insight/insight-topology.png) +3. Depending on the current page: + 1. Resource Insights Page: + 1. The map will display relevant upstream and downstream resources related to the current resource. For example, if the current resource is a Deployment, the topology map will show the ReplicaSet under the Deployment and the Pods under the ReplicaSet. + ![](/karpor/assets/insight/insight-topology-example.png) + 2. Clicking on a node in the resource topology map is equivalent to clicking on an anchor of a specific resource, which will directly navigate to the insights page of that resource. + 2. Resource Group Insights Page: + 1. The map will intuitively show the quantity and relationship of all types of resources under the current resource group. + 2. Clicking on a node in the resource topology map is equivalent to clicking on a resource type, and the list below will refresh with all the resources under a specific type within the current resource group. + ![](/karpor/assets/insight/insight-linkage.png) diff --git a/karpor_versioned_docs/version-v0.4/3-user-guide/3-insight/_category_.json b/karpor_versioned_docs/version-v0.4/3-user-guide/3-insight/_category_.json new file mode 100644 index 00000000..c39e5397 --- /dev/null +++ b/karpor_versioned_docs/version-v0.4/3-user-guide/3-insight/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "How to Insight" +} diff --git a/karpor_versioned_docs/version-v0.4/3-user-guide/3-insight/index.md b/karpor_versioned_docs/version-v0.4/3-user-guide/3-insight/index.md new file mode 100644 index 00000000..0bdb110d --- /dev/null +++ b/karpor_versioned_docs/version-v0.4/3-user-guide/3-insight/index.md @@ -0,0 +1,6 @@ +--- +title: How to Insight +--- +In this section, we will introduce how to gain comprehensive insights into the resources within a cluster using Karpor. You can access the Insight page in various ways and easily toggle between insight pages for different scopes (such as Cluster, Kind, Namespace or individual Resource). If there are domain-specific logical scopes within your current organization, you can even customize resource groups (such as Application, Environment, etc.) by setting resource group rules. We also provide functionality to gain insights into these custom resource groups. + +This guide will be entirely operated from the Karpor Dashboard. diff --git a/karpor_versioned_docs/version-v0.4/3-user-guide/4-best-production-practices/1-one-pass-with-proxy.md b/karpor_versioned_docs/version-v0.4/3-user-guide/4-best-production-practices/1-one-pass-with-proxy.md new file mode 100644 index 00000000..cb8cb78f --- /dev/null +++ b/karpor_versioned_docs/version-v0.4/3-user-guide/4-best-production-practices/1-one-pass-with-proxy.md @@ -0,0 +1,42 @@ +--- +title: One-Pass with Proxy +--- +## Challenges and Demands + +### The Scale of Multicluster at a Grand Scale + +In June 2014, Kubernetes, born from Google's internal Borg project, made a striking debut. Endorsed by tech giants and aided by a thriving open-source community, it gradually became the de facto standard in the container orchestration field. As companies began to deploy Kubernetes in production environments, a single Kubernetes cluster could no longer meet the increasingly complex demands internally. It's common for the number of nodes in a single cluster to exceed the community-recommended limit (5,000), making the expansion into a multicluster configuration a natural choice. + +### The Basic Needs from Multicluster Accessors + +With the thriving development of multiclusters, various platforms may need to access resources across different clusters, requiring access to each cluster's KubeConfig. + +As the number of users and clusters increases, cluster administrators face significant time costs: If there are `M` clusters and `N` users, the time complexity for managing KubeConfig becomes `O(M*N)`. Moreover, users need to switch between different KubeConfigs when accessing different clusters, and the corresponding permissions for KubeConfigs vary across clusters, undoubtedly adding to the complexity of use. + +![Direct Connection: Users need to maintain multiple KubeConfigs](assets/1-one-pass-with-proxy/image-20240326163622363.png) + +Under these circumstances, is there a method to conveniently access resources in different clusters without maintaining a large number of KubeConfigs and managing various users' permissions across clusters? Moreover, this method should ideally be cloud-native, accessible through kubectl and Kubernetes' official client, to reduce the cost of transitioning to this method. The emergence of `Karpor` is to solve these problems. + +## The Idea of A "One-Pass Access" + +We developed `Karpor`, an open-source project. While serving as a Kubernetes Explorer with unique advantages in searching and insight into cluster resources, its foundational multicluster management component, featuring cluster certificate issuance and multicluster request proxying, makes it highly suitable as a unified access point for platforms to multiple clusters. This component supports forwarding user requests to designated clusters in a cloud-native manner, allowing users to maintain a single set of KubeConfigs to access different clusters, making multicluster access as simple as accessing a single cluster. So, how does it work? Below, we introduce `Karpor`'s architecture and functionality. + +![Using Multi-cluster Gateway: Users only need to maintain a single set of KubeConfigs](assets/1-one-pass-with-proxy/image-20240326164141400.png) + +### Multi-cluster Request Routing and Proxy + +`Karpor` includes an application layer gateway capable of forwarding any Kubernetes-style request to a specified Kubernetes cluster. `Karpor` is also developed based on the Kubernetes framework as a kube-apiserver, which can operate independently or as an extension to an existing kube-apiserver. `Karpor` supports handling two types of extended resources: `Cluster` and `Cluster/Proxy`, the former for storing cluster information and the latter for forwarding user requests to a specific cluster. Users can access through the Kubernetes official CLI (`kubectl`) or SDK (`client-go`, `client-java`, etc.). + +`Karpor` proxies all access to `Cluster/Proxy` subresources to the target cluster. For example, to retrieve Pod information from the `Cluster1` cluster, users need to send the `GET /apis/kusionstack.io/Cluster/cluster1/proxy/api/v1/pods` request to `Karpor`. `Karpor` will generate a KubeConfig from the `Cluster/Cluster1` resource for accessing the cluster and proxy the `/api/v1/pods` request to the `Cluster1` cluster. + +![Accessing any managed cluster with kubectl & karpor certificate](assets/1-one-pass-with-proxy/image-20240326165247891.png) + +### Supporting All Kubernetes Native Requests + +`Karpor` supports forwarding all kube-apiserver requests. Specifically, `Karpor` is an application layer gateway that proxies HTTP requests through the HTTP connect protocol. In addition to supporting `get`, `create`, `update`, and `delete` operations on resources, it also supports `watch`, `log`, `exec`, `attach`, etc. (Since the SPDY protocol used for `exec`, and `attach` does not support http2, `Karpor` will disable http2 when forwarding these requests, switching to http1.1 and supporting hijacker processing). + +![](assets/1-one-pass-with-proxy/image-20240326165632158.png) + +## Summary + +As can be gleaned from the text above, utilizing `Karpor`'s multi-cluster management component enables the issuance of a "multi-cluster pass" with controllable permissions for users. Users no longer need to concern themselves with issues such as frequent cluster certificate switching and onboarding of new clusters. With this "one-pass access", the cost of accessing multiple clusters is reduced, fulfilling the most fundamental needs of most users on multi-cluster platforms. diff --git a/karpor_versioned_docs/version-v0.4/3-user-guide/4-best-production-practices/_category_.json b/karpor_versioned_docs/version-v0.4/3-user-guide/4-best-production-practices/_category_.json new file mode 100644 index 00000000..82dd90e3 --- /dev/null +++ b/karpor_versioned_docs/version-v0.4/3-user-guide/4-best-production-practices/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Best Production Practices" +} diff --git a/karpor_versioned_docs/version-v0.4/3-user-guide/4-best-production-practices/assets/1-one-pass-with-proxy/image-20240326163622363.png b/karpor_versioned_docs/version-v0.4/3-user-guide/4-best-production-practices/assets/1-one-pass-with-proxy/image-20240326163622363.png new file mode 100644 index 00000000..ab8051fe Binary files /dev/null and b/karpor_versioned_docs/version-v0.4/3-user-guide/4-best-production-practices/assets/1-one-pass-with-proxy/image-20240326163622363.png differ diff --git a/karpor_versioned_docs/version-v0.4/3-user-guide/4-best-production-practices/assets/1-one-pass-with-proxy/image-20240326164141400.png b/karpor_versioned_docs/version-v0.4/3-user-guide/4-best-production-practices/assets/1-one-pass-with-proxy/image-20240326164141400.png new file mode 100644 index 00000000..de950079 Binary files /dev/null and b/karpor_versioned_docs/version-v0.4/3-user-guide/4-best-production-practices/assets/1-one-pass-with-proxy/image-20240326164141400.png differ diff --git a/karpor_versioned_docs/version-v0.4/3-user-guide/4-best-production-practices/assets/1-one-pass-with-proxy/image-20240326165247891.png b/karpor_versioned_docs/version-v0.4/3-user-guide/4-best-production-practices/assets/1-one-pass-with-proxy/image-20240326165247891.png new file mode 100644 index 00000000..27fffb47 Binary files /dev/null and b/karpor_versioned_docs/version-v0.4/3-user-guide/4-best-production-practices/assets/1-one-pass-with-proxy/image-20240326165247891.png differ diff --git a/karpor_versioned_docs/version-v0.4/3-user-guide/4-best-production-practices/assets/1-one-pass-with-proxy/image-20240326165632158.png b/karpor_versioned_docs/version-v0.4/3-user-guide/4-best-production-practices/assets/1-one-pass-with-proxy/image-20240326165632158.png new file mode 100644 index 00000000..99053c68 Binary files /dev/null and b/karpor_versioned_docs/version-v0.4/3-user-guide/4-best-production-practices/assets/1-one-pass-with-proxy/image-20240326165632158.png differ diff --git a/karpor_versioned_docs/version-v0.4/3-user-guide/_category_.json b/karpor_versioned_docs/version-v0.4/3-user-guide/_category_.json new file mode 100644 index 00000000..8f01ba26 --- /dev/null +++ b/karpor_versioned_docs/version-v0.4/3-user-guide/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "User Guide" +} diff --git a/karpor_versioned_docs/version-v0.4/4-developer-guide/1-contribution-guide/1-non-code-contribute.md b/karpor_versioned_docs/version-v0.4/4-developer-guide/1-contribution-guide/1-non-code-contribute.md new file mode 100644 index 00000000..721dbf92 --- /dev/null +++ b/karpor_versioned_docs/version-v0.4/4-developer-guide/1-contribution-guide/1-non-code-contribute.md @@ -0,0 +1,40 @@ +--- +title: Non-code Contribution Guide +--- +You can contribute in any of the following ways that interest you. + +## Contributing Use Cases and Demos + +* If you are using Karpor, the simplest way to contribute is to [express gratitude to the community](https://github.com/KusionStack/karpor/issues/343). + +## Reporting Bugs + +Before submitting a new issue, please make sure that no one has already reported the problem. + +Check the [Issue list](https://github.com/KusionStack/karpor/issues) for any similar issues. + +[Report bugs](https://github.com/KusionStack/karpor/issues/new?assignees=&labels=kind%2Fbug&projects=&template=bug-report.yaml) by submitting a Bug report, ensuring you provide as much information as possible to help reproduce the Bug. + +Follow the issue template and add additional information to help us replicate the issue. + +## Security Issues + +If you believe you have discovered a security vulnerability, please read our [security policy](https://github.com/KusionStack/karpor/blob/main/SECURITY.md) for more detailed information. + +## Suggesting Enhancements + +If you have ideas to improve Karpor, please submit a [feature request](https://github.com/KusionStack/karpor/issues/new?assignees=&labels=kind%2Ffeature&projects=&template=enhancement.yaml). + +## Answering Questions + +If you have a question and cannot find the answer in the [documentation](https://www.kusionstack.io/karpor/), the next step is to ask on [GitHub Discussions](https://github.com/KusionStack/karpor/discussions). + +Helping these users is important to us, and we would love to have your help. You can contribute by answering [their questions](https://github.com/KusionStack/karpor/discussions) to help other Karpor users. + +## Contributing Documentation + +Contributing to the documentation requires some knowledge on how to submit a pull request to Github, which I think won't be difficult if you follow the guide. + +* [kusionstack.io Developer's Guide](https://github.com/KusionStack/kusionstack.io/blob/main/README.md) + +For more ways to contribute, please look at the [Open Source Guide](https://opensource.guide/how-to-contribute/). diff --git a/karpor_versioned_docs/version-v0.4/4-developer-guide/1-contribution-guide/2-code-contribute.md b/karpor_versioned_docs/version-v0.4/4-developer-guide/1-contribution-guide/2-code-contribute.md new file mode 100644 index 00000000..e00a4374 --- /dev/null +++ b/karpor_versioned_docs/version-v0.4/4-developer-guide/1-contribution-guide/2-code-contribute.md @@ -0,0 +1,174 @@ +--- +title: Code Contribution Guide +--- +In this code contribution guide, you will learn about the following: + +- [How to run Karpor locally](#running-karpor-locally) +- [How to create a pull request](#creating-a-pull-request) +- [Code review guidelines](#code-review) +- [Formatting guidelines for pull requests](#formatting-guidelines) +- [Updating Documentation and Website](#updating-documentation-and-website) + +## Running Karpor Locally + +This guide will help you get started with Karpor development. + +### Prerequisites + +* Golang version 1.19+ + +
+ Installing Golang + +1. Install go1.19+ from the [official website](https://go.dev/dl/). Extract the binary files and place them at a location, assuming it is located under the home directory `~/go/`, here is an example command, you should choose the correct binary file for your system. + +``` +wget https://go.dev/dl/go1.20.2.linux-amd64.tar.gz +tar xzf go1.20.2.linux-amd64.tar.gz +``` + +If you would like to maintain multiple versions of golang in your local development environment, you can download the package and extract it to a location, like `~/go/go1.19.1`, and then alter the path in the command below accordingly. + +1. Set environment variables for Golang + +``` +export PATH=~/go/bin/:$PATH +export GOROOT=~/go/ +export GOPATH=~/gopath/ +``` + +If the `gopath` folder does not exist, create it with `mkdir ~/gopath`. These commands will add the go binary folder to the `PATH` environment variable (making it the primary choice for go) and set the `GOROOT` environment to this go folder. Please add these lines to your `~/.bashrc` or `~/.zshrc` file, so you won't need to set these environment variables every time you open a new terminal. + +1. (Optional) Some regions, such as China, may have slow connection to the default go registry; you can configure GOPROXY to speed up the download process. + +``` +go env -w GOPROXY=https://goproxy.cn,direct +``` + +
+ +* Kubernetes version v1.20+ configured with `~/.kube/config`. +* golangci-lint version v1.52.2+, it will be installed automatically if you run `make lint`, if the installation fails, you can install it manually. + +
+ Manually installing golangci-lint + +You can install it manually following the [guide](https://golangci-lint.run/welcome/install), or use the command: + +``` +cd ~/go/ && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s v1.52.2 +``` + +
+ +### Building + +- Clone this project + +```shell +git clone git@github.com:KusionStack/karpor.git +``` + +- Build locally + +Executing `make build-all` will build the executables for all platforms; if you only want to build for a specific platform, execute `make build-${PlatformName}`, e.g., `make build-darwin`. To see all available commands, execute `make help`. + +### Testing + +It's essential to write tests to maintain code quality, you can run all unit tests by executing the following command in the project root directory: + +```shell +make test +``` + +If you need to generate extra coverage report files, execute: + +```shell +make cover +``` + +Then you can view the content of the coverage report in a browser by running: + +```shell +make cover-html +``` + +## Creating a Pull Request + +We are thrilled that you are considering contributing to the Karpor project! + +This document will guide you through the process of [creating a pull request](./index.md#contribute-a-pull-request). + +### Before you begin + +We know you are excited to create your first pull request. Before we get started, make sure your code follows the relevant [code conventions](../2-conventions/2-code-conventions.md). + +### Your First Pull Request + +Before submitting your PR, run the following commands to ensure they all succeed: + +``` +make test +make lint +``` + +If this is your first time contributing to an open-source project on GitHub, please make sure to read the instructions on [creating a pull request](https://help.github.com/en/articles/creating-a-pull-request). + +To increase the chances of your pull request being accepted, please ensure your pull request follows these guidelines: + +- The title and description match the implementation. +- The commits in the pull request follow the [formatting guidelines](#Formatting-guidelines). +- The pull request closes a related issue. +- The pull request includes necessary tests to verify the expected behavior. +- If your pull request has conflicts, please rebase your branch onto the main branch. + +If the pull request fixes a bug: + +- The pull request description must contain `Closes #` or `Fixes #`. +- To prevent regressions, the pull request should include tests that replicate the bug being fixed. + +## Code Review + +Once you have created a pull request, the next step is to have others review your changes. Review is a learning opportunity for both reviewers and the author of the pull request. + +If you believe a specific person should review your pull request, you can tag them in the description or a comment. +Tag a user by typing an `@` symbol followed by their username. + +We recommend that you read [How to do a code review](https://google.github.io/eng-practices/review/reviewer/) to learn more about code reviews. + +## Formatting Guidelines + +A well-crafted pull request can minimize the time to get your changes accepted. These guidelines will help you write well-formulated commit messages and descriptions for your pull requests. + +### Commit Message Format + +More see: [Commit Conventions](../2-conventions/4-commit-conventions.md) + +### Pull Request Title + +When accepting pull requests, the Karpor team merges all commits into one. + +The pull request title becomes the subject line of the merged commit message. + +We still encourage contributors to write informative commit messages, as they will be part of the Git commit body. + +We use the pull request titles when generating change logs for releases. Hence, we strive to make the titles as informative as possible. + +Make sure your pull request title uses the same format as the commit message subject line. If the format is not followed, we will add a `title-needs-formatting` label on the pull request. + +### Passing All CI Checks + +Before merging, all testing CIs should pass: + +- Coverage should not drop. Currently, the pull request coverage should be at least 70%. +- Karpor uses a **CLA** for the contributor agreement. It requires you to sign for every commit before merging the pull request. + +## Updating Documentation and Website + +If your pull request has been merged, and it is a new feature or enhancement, you need to update the documentation and send a pull request to the [kusionstack.io](https://github.com/KusionStack/kusionstack.io) repository. + +Learn how to write documentation through the following guide: + +- [kusionstack.io Developer Guide](https://github.com/KusionStack/kusionstack.io/blob/main/README.md) + +Awesome, you've completed the lifecycle of code contribution! diff --git a/karpor_versioned_docs/version-v0.4/4-developer-guide/1-contribution-guide/3-roles.md b/karpor_versioned_docs/version-v0.4/4-developer-guide/1-contribution-guide/3-roles.md new file mode 100644 index 00000000..e9193204 --- /dev/null +++ b/karpor_versioned_docs/version-v0.4/4-developer-guide/1-contribution-guide/3-roles.md @@ -0,0 +1,41 @@ +--- +title: Roles +--- +Thank you for your interest and support for karpor! + +This document outlines the roles and responsibilities of contributors in the project, as well as the process for becoming a Contributor and losing Maintainer status. We hope that this document will help every contributor understand the growth path and make a greater contribution to the project's development. + +## Contributor Roles and Responsibilities + +we have two main contributor roles: Contributor and Maintainer. + +Here is a brief introduction to these two roles: + +1. Contributor: A contributor to the project who can contribute code, documentation, testing, and other resources. Contributors provide valuable resources to the project, helping it to continuously improve and develop. +2. Maintainer: A maintainer of the project who is responsible for the day-to-day maintenance of the project, including reviewing and merging PRs, handling issues, and releasing versions. Maintainers are key members of the project and have a significant impact on the project's development direction and decision-making. + +## How to become a Maintainer + +We welcome every contributor to contribute to the project's development and encourage contributors to upgrade to the role of Maintainer. + +The following are the conditions for upgrading from Contributor to Maintainer: + +1. Continuous contribution: Contributors need to contribute to the project continuously for a period of time (e.g., 3 months). This demonstrates the contributor's attention and enthusiasm for the project. +2. Quality assurance: The code or documentation submitted by contributors needs to maintain a high level of quality, meet the project's specifications, and have a positive impact on the project. +3. Active participation: Contributors need to actively participate in project discussions and decision-making, providing constructive opinions and suggestions for the project's development. +4. Team collaboration: Contributors need to have good teamwork skills, communicate friendly with other contributors and maintainers, and work together to solve problems. +5. Responsibility: Contributors need to have a certain sense of responsibility and be willing to undertake some of the project maintenance work, including reviewing PRs and handling issues. When a contributor meets the above conditions, existing maintainers will evaluate them. + +If they meet the requirements of Maintainer, they will be invited to become a new Maintainer. + +## Losing Maintainers status + +Maintainer have important responsibilities in the project, and we hope that every Maintainer can maintain their attention and enthusiasm for the project. + +However, we also understand that everyone's time and energy are limited, so when Maintainers cannot continue to fulfill their responsibilities, they will be downgraded to the role of Contributor: + +1. Long-term inactivity: If a Maintainer has not participated in project maintenance work, including reviewing PRs and handling issues, for a period of time (e.g., 3 months), they will be considered inactive. +2. Quality issues: If a Maintainer's work in the project has serious quality issues that affect the project's development, they will be considered not meeting the requirements of Maintainer. +3. Team collaboration issues: If a Maintainer has serious communication or teamwork issues with other contributors and maintainers, such as disrespecting others' opinions, frequent conflicts, or refusing to collaborate, which affects the project's normal operation and atmosphere, they will be considered not meeting the requirements of Maintainer. +4. Violation of rules: If a Maintainer violates the project's rules or code of conduct, including but not limited to leaking sensitive information or abusing privileges, they will be considered not meeting the requirements of Maintainer. +5. Voluntary application: If a Maintainer cannot continue to fulfill their responsibilities due to personal reasons, they can voluntarily apply to be downgraded to the role of Contributor. diff --git a/karpor_versioned_docs/version-v0.4/4-developer-guide/1-contribution-guide/_category_.json b/karpor_versioned_docs/version-v0.4/4-developer-guide/1-contribution-guide/_category_.json new file mode 100644 index 00000000..09eab23b --- /dev/null +++ b/karpor_versioned_docs/version-v0.4/4-developer-guide/1-contribution-guide/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Contribution Guide" +} diff --git a/karpor_versioned_docs/version-v0.4/4-developer-guide/1-contribution-guide/index.md b/karpor_versioned_docs/version-v0.4/4-developer-guide/1-contribution-guide/index.md new file mode 100644 index 00000000..a23e2100 --- /dev/null +++ b/karpor_versioned_docs/version-v0.4/4-developer-guide/1-contribution-guide/index.md @@ -0,0 +1,118 @@ +# Contributing Guide + +Contributing Guide that introduces how to participate and contribute to the community. + +To help us create a safe and positive community experience for all, we require all participants adhere to the CNCF Community [Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md). + +## Before contributing + +### Find a Contribution Point + +You can contribute to Karpor in several ways including code and non-code contributions, +we appreciate every effort you contribute to the community. + +Here are some examples: + +* Contribute to the codebase and docs. +* Report and triage issues. +* Organize meetups and user groups in your local area. +* Help others by answering questions about Karpor. + +And: + +- If you don’t know what issues start, we have prepared a [Community tasks | 新手任务清单 🎖︎](https://github.com/KusionStack/karpor/issues/463), or you can filter [help wanted](https://github.com/KusionStack/karpor/issues?q=is%3Aopen+is%3Aissue+label%3A%22help+wanted%22) or [good first issue](https://github.com/KusionStack/karpor/issues?q=is%3Aopen+is%3Aissue++label%3A%22good+first+issue%22) label in issue tracker. you can choose the issue you like. +- If you have any questions, please [Submit the Issue](https://github.com/KusionStack/karpor/issues/new/choose) or [Post on the discussions](https://github.com/KusionStack/karpor/discussions/new/choose), we will answer as soon as possible. + +### How to Contribute Non-code + +We regard non-coding contribution as equally important with code contribution for the community's very existence and its future growth. + +- Refer to [Non-code Contribution Guide](./non-code-contribute) to know how you could help. + +### How to Contribute Code + +Unsure where to begin contributing to Karpor codebase? Start by browsing issues labeled `good first issue` or `help wanted`. + +- [Good first issue](https://github.com/KusionStack/karpor/labels/good%20first%20issue) issues are generally straightforward to complete. +- [Help wanted](https://github.com/KusionStack/karpor/labels/help%20wanted) issues are problems we would like the community to help us with regardless of complexity. +- Refer to [Code Contribution Guide](./code-contribute) for more details. + +Learn [Code Conventions](../conventions/code-conventions) and [Test Conventions](../conventions/test-conventions) and understand what to pay attention to when writing code. + +And learn the [Release Process And Cadence](../conventions/release-process) to know when your code changes will be released. + +## Contribute a Pull Request + +After opening or claiming an issue, you could contribute codes or non-codes to karpor by a pull request. Here are the steps you should follow: + +### Fork Repository + +Karpor adopts trunk-based development, i.e., the code used for release is maintained on the main branch. + +Thus, to develop karpor, you have to fork one project in [karpor](https://github.com/KusionStack/karpor) repository to your workspace, and then check out a new branch to develop coding. + +### Develop Code/Non-Code + +Now you can start writing to solve the issue. To maintain the quality of karpor, after submitting the PR, some necessary checks will be triggered. + +After the development is completed, commit and push to your forked repository. Since the PR Title will be used as a merging commit message, we ask your PR Title to meet the [Commit Conventions](../2-conventions/4-commit-conventions.md). + +Here are some simple explanations: + +PR Title should be organized following this structure: + +``` +[optional scope]: + +[optional body] +``` + +The required type helps better capture the area of the commit, based on the [Angular guidelines](https://github.com/angular/angular/blob/22b96b9/CONTRIBUTING.md#-commit-message-guidelines). + +We use lowercase for `` to avoid spending time on case-sensitive issues. `` can be one of the following: + +``` +feat: A new feature +fix: A bug fix +docs: Documentation only changes +build: Changes that affect the build system or external dependencies +style: Changes that do not affect the meaning of the code (white-space, formatting, missing semi-colons, etc) +refactor: A code change that neither fixes a bug nor adds a feature +perf: A code change that improves performance +test: Adding missing tests or correcting existing tests +chore: Changes to the build process or auxiliary tools and libraries such as documentation generation +``` + +### Open a Pull Request + +[Open a pull request](https://github.com/KusionStack/karpor/pulls) from the develop branch of your forked repository to the main branch of karpor. You should clearly describe what you do in the PR, and link it to an issue. Besides, the PR title should also follow the commit conventions described above, and must be 5-256 characters in length, prefix `WIP` and `[WIP]` are not allowed. + +### Sign CLA + +If it was your first pull request, you need to sign our [CLA(Contributor License Agreement)](https://github.com/KusionStack/.github/blob/main/CLA.md). The only thing you need to do is to post a pull request comment same as the below format: + +`I have read the CLA Document and I hereby sign the CLA` + +If your CLA signature failed, you may find the solutions below: + +* The comment must be in the same format as above, with no extra spaces, line breaks, etc. +* The git committer must be the same one who created the Karpor PR + +### PR Checks + +To keep the reliability of the karpor project, the following check will get triggered automatically: + +* Unit Test +* Golang Lint +* Commit Lint +* PR Title Lint +* License Lint +* Markdown Link Lint + +Please make sure your PR passes these checks. + +## Become a Community Member + +If you're interested to become a community member or learn more about the governance, please check the [Roles](./3-roles.md) for details. + +Enjoy coding and collaboration in Karpor world! diff --git a/karpor_versioned_docs/version-v0.4/4-developer-guide/2-conventions/1-release-process.md b/karpor_versioned_docs/version-v0.4/4-developer-guide/2-conventions/1-release-process.md new file mode 100644 index 00000000..6dda486a --- /dev/null +++ b/karpor_versioned_docs/version-v0.4/4-developer-guide/2-conventions/1-release-process.md @@ -0,0 +1,49 @@ +--- +title: Release Process And Cadence +--- +## Release Planning + +We will establish and continuously follow up on the release plan through [GitHub Milestones](https://github.com/KusionStack/karpor/milestones). Each release milestone will include two types of tasks: + +- Tasks Maintainers commit to complete. Maintainers will decide on the features they are committed to implementing before the next release based on their available time and effort. Usually, tasks are finalized after offline discussions and then added to the milestone. These tasks will be assigned to the Maintainer who plans to implement or test them. +- Additional items contributed by community contributors, typically non-urgent features or optimizations. Maintainers do not commit to completing these issues within the release cycle but will commit to reviewing submissions from the community. + +The milestones will clearly describe the most important features and their expected completion dates. This will clearly inform end-users about the timing and contents of the next release. + +In addition to the next milestone, we will also maintain drafts of future release milestones. + +## Release Standards + +- All **official releases** should be tagged on the `main` branch, with optional pre-release version suffixes such as: `alpha`, `beta`, `rc`, for example, a regular official release version might be `v1.2.3`, `v1.2.3-alpha.0`. For instance, if we want to perform some validations before releasing the official version `v1.2.3`, we could first release a pre-release version like `v1.2.3-alpha.0`, followed by `v1.2.3` after the validation is complete. +- Maintainers commit to completing certain features and enhancements, tracking progress through [GitHub Milestones](https://github.com/KusionStack/karpor/milestones). +- We will do our best to avoid release delays; thus, if we cannot complete a feature on time, it will be moved to the next release. +- A new version will be released every **1 month**. + +## Release Standard Procedure + +Maintainers are responsible for driving the release process and following standard operating procedures to ensure the quality of the release. + +1. Tag the git commit designated for release and push it upstream; the tag needs to comply with [Semantic Versioning](#semantic-versioning). +2. Ensure that the triggered Github Actions pipeline is executed successfully. Once successful, it will automatically generate a new Github Release, which includes the Changelog calculated from commit messages, as well as artifacts such as images and tar.gz files. +3. Write clear release notes based on the **Github Release**, including: + - User-friendly release highlights. + - Deprecated and incompatible changes. + - Brief instructions on how to install and upgrade. + +## Gate Testing + +Before creating the release branch, we will have a **1-week** code freeze period. During this period, we will refrain from merging any feature PRs and will only fix bugs. + +Maintainers will test and fix these last-minute issues before each release. + +## Semantic Versioning + +`Karpor` adopts [Semantic Versioning](https://semver.org/) for its version numbers. + +The version format: `MAJOR.MINOR.PATCH`, for example, `v1.2.3`. The version number **incrementing rules** are as follows: + +- MAJOR version when you make incompatible API changes. +- MINOR version when you add functionality in a backwards-compatible manner. +- PATCH version when you make backwards-compatible bug fixes. + +**Pre-release version numbers and build metadata** can be added to the `MAJOR.MINOR.PATCH` as an extension, like `v1.2.3-alpha.0`, `v1.2.3-beta.1`, `v1.2.3-rc.2`, where `-alpha.0`, `-beta.1`, `-rc.2` are pre-release versions. diff --git a/karpor_versioned_docs/version-v0.4/4-developer-guide/2-conventions/2-code-conventions.md b/karpor_versioned_docs/version-v0.4/4-developer-guide/2-conventions/2-code-conventions.md new file mode 100644 index 00000000..0a523c67 --- /dev/null +++ b/karpor_versioned_docs/version-v0.4/4-developer-guide/2-conventions/2-code-conventions.md @@ -0,0 +1,94 @@ +--- +title: Code Conventions +--- +In this section, you will find the code conventions for all kinds of code Karpor project related. It's not necessary to learn all of them at once, but make sure you have read corresponding parts before you start to code. + +- [Go Code Conventions](#go-code-conventions) +- [Bash or Script Conventions](#bash-or-script-conventions) +- [Directory and File Conventions](#directory-and-file-conventions) +- [Linting and Formatting](#linting-and-formatting) + +## Go Code Conventions + +- [Go Code Review Comments](https://go.dev/wiki/CodeReviewComments) +- [Effective Go](https://go.dev/doc/effective_go) +- Know and avoid [Go landmines](https://gist.github.com/lavalamp/4bd23295a9f32706a48f) +- Comment your code. + + - [Go's commenting conventions](https://go.dev/blog/godoc) + - If reviewers ask questions about why the code is the way it is, that's a + sign that comments might be helpful. +- Command-line flags should use dashes, not underscores +- API + + - According to RFC3986, URLs are "case sensitive". Karpor uses `kebab-case` for API URLs. + - e.g.: `POST /rest-api/v1/resource-group-rule` +- Naming + + - Please consider package name when selecting an interface name, and avoid + redundancy. + + - e.g.: `storage.Interface` is better than `storage.StorageInterface`. + - Do not use uppercase characters, underscores, or dashes in package + names. + - Please consider parent directory name when choosing a package name. + + - so pkg/manager/cluster/foo.go should say `package cluster` + not `package clustermanager`. + - Unless there's a good reason, the `package foo` line should match + the name of the directory in which the .go file exists. + - Importers can use a different name if they need to disambiguate. + - Locks should be called `lock` and should never be embedded (always `lock sync.Mutex`). When multiple locks are present, give each lock a distinct name + following Go conventions - `stateLock`, `mapLock` etc. + +## Bash or Script Conventions + +- [https://google.github.io/styleguide/shell.xml](https://google.github.io/styleguide/shell.xml) +- Ensure that build, release, test, and cluster-management scripts run on + macOS + +## Directory and File Conventions + +- Avoid package sprawl. Find an appropriate subdirectory for new packages. + + - Libraries with no more appropriate home belong in new package + subdirectories of pkg/util +- Avoid general utility packages. Packages called "util" are suspect. Instead, + derive a name that describes your desired function. For example, the utility + functions dealing with waiting for operations are in the "wait" package and + include functionality like Poll. So the full name is wait.Poll +- All filenames should be lowercase +- Go source files and directories use underscores, not dashes + + - Package directories should generally avoid using separators as much as + possible (when packages are multiple words, they usually should be in nested + subdirectories). +- Document directories and filenames should use dashes rather than underscores +- Contrived examples that illustrate system features belong in + `/docs/user-guide` or `/docs/admin`, depending on whether it is a feature primarily + intended for users that deploy applications or cluster administrators, + respectively. Actual application examples belong in /examples. + + - Examples should also illustrate [best practices for configuration and using the system](https://kubernetes.io/docs/concepts/configuration/overview/) +- Third-party code + + - Go code for normal third-party dependencies is managed using + [go modules](https://github.com/golang/go/wiki/Modules) + - Other third-party code belongs in `/third_party` + + - forked third party Go code goes in `/third_party/forked` + - forked _golang stdlib_ code goes in `/third_party/forked/golang` + - Third-party code must include licenses + - This includes modified third-party code and excerpts, as well + +## Linting and Formatting + +To ensure consistency across the Go codebase, we require all code to pass a number of linter checks. + +To run all linters, use the `lint` Makefile target: + +```shell +make lint +``` + +The command will clean code along with some lint checks. Please remember to check in all changes after that. diff --git a/karpor_versioned_docs/version-v0.4/4-developer-guide/2-conventions/3-test-conventions.md b/karpor_versioned_docs/version-v0.4/4-developer-guide/2-conventions/3-test-conventions.md new file mode 100644 index 00000000..9eb0e2e4 --- /dev/null +++ b/karpor_versioned_docs/version-v0.4/4-developer-guide/2-conventions/3-test-conventions.md @@ -0,0 +1,267 @@ +--- +title: Test Conventions +--- +## Testing Principles + +In Karpor, we primarily focus on the following three types of tests: + +- Unit tests: Tests targeting the **smallest testable units** (such as functions, methods, utility classes, etc.) +- Integration tests: Tests targeting the interaction and integration between **multiple units (or modules)** +- End-to-End tests (e2e tests): Tests targeting the **entire system's behavior**, usually requiring the simulation of real user scenarios + +Each has its strengths, weaknesses, and suitable scenarios. To achieve a better development experience, we should adhere to the following principles when writing tests. + +**Testing principles**: + +- A case should only cover one scenario +- Follow the **7-2-1 principle**, i.e., 70% unit tests, 20% integration tests, and 10% end-to-end tests +- **Avoid using Mock frameworks in unit tests unless necessary** (e.g., `golang/mock`). If you feel the need to use a Mock framework in unit tests, what you actually need might be integration tests or even end-to-end tests + +## Technology Selection + +At the current point in time, the most popular testing frameworks in the Go language ecosystem are [Ginkgo](https://onsi.github.io/ginkgo/)/[Gomega](https://onsi.github.io/gomega/) and [Testify](https://github.com/stretchr/testify). Therefore, this section mainly discusses the characteristics, pros and cons, and the final selection of these two testing frameworks. + +### Ginkgo/Gomega + +**Advantages**: + +1. **BDD Support**: Ginkgo is favored by many developers for its support of Behavior-Driven Development (BDD) style. It offers a rich DSL syntax, making test cases more descriptive and readable through keywords like `Describe`, `Context`, `It`, etc. +2. **Parallel Execution**: Ginkgo can execute tests in parallel across different processes, improving the efficiency of test execution. +3. **Rich Matchers**: Used in conjunction with the Gomega matchers library, it provides a wealth of assertion capabilities, making tests more intuitive and convenient. +4. **Asynchronous Support**: Ginkgo provides native support for handling complex asynchronous scenarios, reducing the risk of deadlocks and timeouts. +5. **Test Case Organization**: Supports organizing test cases into Suites for easy management and expansion. +6. **Documentation**: Ginkgo's [official documentation](http://onsi.github.io/ginkgo/) is very detailed, offering guides from beginner to advanced usage. + +**Disadvantages**: + +1. **Learning Curve:** For developers not familiar with BDD, Ginkgo's DSL syntax may take some time to get used to. +2. **Complexity in Parallel Testing:** Although Ginkgo supports parallel execution, managing resources and environment for parallel tests can introduce additional complexity in some cases. + +### Testify + +**Advantages**: + +1. **Simplified API**: Testify provides a simple and intuitive API, easy to get started with, especially for developers accustomed to the `testing` package. +2. **Mock Support**: Testify offers powerful Mock functionalities, facilitating the simulation of dependencies and interfaces for unit testing. +3. **Table-Driven Tests**: Supports table-driven testing, allowing for easy provision of various inputs and expected outputs for the same test function, enhancing test case reusability. +4. **Compatibility with `testing` Package**: Testify is highly compatible with the Go standard library's `testing` package, allowing for seamless integration into existing testing workflows. +5. Documentation: Testify's [official documentation](https://pkg.go.dev/github.com/stretchr/testify) also provides rich introductions on how to use its assertion and mocking functionalities. + +**Disadvantages**: + +1. **Lack of BDD Support**: Testify does not support the BDD style, potentially less intuitive for developers looking to improve test case readability. +2. **Relatively Simple Features**: Compared to Ginkgo, Testify's features are relatively simple and may not meet some complex testing scenarios' requirements. + +### Summary + +In short, Ginkgo/Gomega offers better readability and maintainability, producing clean and clear tests, but with a higher learning curve requiring familiarity with BDD style. Testify is simpler, more practical, with a lower learning curve, but as time progresses, the testing code style may become more varied, lowering maintainability. + +Considering the actual situation of Karpor and the pros and cons of both frameworks, we decide to use these two frameworks in combination: + +- Use Testify for unit testing, adhering to [Table-Driven Testing](https://go.dev/wiki/TableDrivenTests) to constrain the code style and prevent decay; +- Utilize Ginkgo's BDD features for writing higher-level integration and end-to-end tests; + +This combination fully leverages the strengths of both frameworks, improving the overall efficiency, readability, and quality of testing. + +## Writing Specifications + +### Test Style + +[Table-Driven Testing](https://go.dev/wiki/TableDrivenTests) is a best practice for writing test cases, akin to design patterns in programming, and it is also the style recommended by the official Go language. Table-Driven Testing uses tables to provide a variety of inputs and expected outputs, allowing the same test function to verify different scenarios. The advantages of this method are that it increases the reusability of test cases, reduces repetitive code, and makes tests clearer and easier to maintain. + +While there is no direct syntax support for Table-Driven Testing in Go's `testing` package, it can be emulated by writing helper functions and using anonymous functions. + +Here is an example of Table-Driven Testing implemented in the Go standard library's `fmt` package: + +```go +var flagtests = []struct { + in string + out string +}{ + {"%a", "[%a]"}, + {"%-a", "[%-a]"}, + {"%+a", "[%+a]"}, + {"%#a", "[%#a]"}, + {"% a", "[% a]"}, + {"%0a", "[%0a]"}, + {"%1.2a", "[%1.2a]"}, + {"%-1.2a", "[%-1.2a]"}, + {"%+1.2a", "[%+1.2a]"}, + {"%-+1.2a", "[%+-1.2a]"}, + {"%-+1.2abc", "[%+-1.2a]bc"}, + {"%-1.2abc", "[%-1.2a]bc"}, +} +func TestFlagParser(t *testing.T) { + var flagprinter flagPrinter + for _, tt := range flagtests { + t.Run(tt.in, func(t *testing.T) { + s := Sprintf(tt.in, &flagprinter) + if s != tt.out { + t.Errorf("got %q, want %q", s, tt.out) + } + }) + } +} +``` + +It is worth noting that most mainstream IDEs have already integrated [gotests](https://github.com/cweill/gotests), enabling the automatic generation of table-driven style Go unit tests, which I believe can enhance the efficiency of writing your unit tests: + +- [GoLand](https://blog.jetbrains.com/go/2020/03/13/test-driven-development-with-goland/) +- [Visual Studio Code](https://juandes.com/go-test-vsc/) + +### File Naming + +- **Specification Content**:Test files should end with `_test.go` to distinguish between test code and production code. +- **Positive Example**:`xxx_test.go` +- **Negative Example**:`testFile.go`、`test_xxx.go` + +### Test Function Naming + +- **Specification**: The name of the test function should start with `Test`, followed by the name of the function being tested, using camel case notation. +- **Positive Example**: + ```go + func TestAdd(t *testing.T) { + // Test logic ... + } + ``` +- **Negative Example**: + ```go + func TestAddWrong(t *testing.T) { + // Test logic ... + } + ``` + +### Test Function Signature + +- **Specification Content**: The signature of the test function should be `func TestXxx(t *testing.T)`, where `t` is the test object, of type `*testing.T`, and there should be no other parameters or return values. +- **Positive Example**: + ```go + func TestSubtraction(t *testing.T) { + // Test logic ... + } + ``` +- **Negative Example**: + ```go + func TestSubtraction(value int) { + // Test logic ... + } + ``` + +### Test Organization + +- **Specification Content**:Test cases should be independent of each other to avoid mutual influence between tests; use sub-tests (`t.Run`) to organize complex test scenarios. +- **Positive Example**: + ```go + func TestMathOperations(t *testing.T) { + t.Run("Addition", func(t *testing.T) { + // Test addition logic ... + }) + t.Run("Subtraction", func(t *testing.T) { + // Test subtraction logic ... + }) + } + ``` +- **Negative Example**: + ```go + func TestMathOperations(t *testing.T) { + // Mixed addition and subtraction logic... + } + ``` + +### Test Coverage + +- **Specification Content**:Attention should be paid to test coverage, use the `go test -cover` command to examine the test coverage of the code. +- **Positive Example**: + + ```shell + $ go test -cover + ``` +- **Negative Example**: + + ```shell + $ go test # Without checking test coverage + ``` +- **Note**: Karpor has wrapped this command as `make cover`, which will output the coverage for each package and total coverage in the command line. If you would like to view the coverage report in the browser, please execute `make cover-html`. + +### Benchmark Tests + +- **Specification Content**:Benchmark test functions should start with `Benchmark` and accept an argument of type `*testing.B`, focusing on performance testing. +- **Positive Example**: + ```go + func BenchmarkAdd(b *testing.B) { + for i := 0; i < b.N; i++ { + add(1, 1) + } + } + ``` +- **Negative Example**: + ```go + func BenchmarkAddWrong(b *testing.B) { + for i := 0; i < 1000; i++ { + add(1, 1) + } + } + ``` + +### Concurrency Testing + +- **Specification Content**:For concurrent code, appropriate test cases should be written to ensure the correctness of the concurrency logic. +- **Positive Example**: + ```go + func TestConcurrentAccess(t *testing.T) { + // Set up concurrent environment ... + // Test logic for concurrent access ... + } + ``` +- **Negative Example**: + ```go + func TestConcurrentAccess(t *testing.T) { + // Only test single-thread logic... + } + ``` + +### Test Helper Functions + +- **Specification Content**:Helper functions can be defined within the test files to help set up the test environment or clean up resources. +- **Positive Example**: + ```go + func setupTest(t *testing.T) { + // Set up test environment ... + } + + func tearDownTest(t *testing.T) { + // Clean up resources ... + } + + func TestMyFunction(t *testing.T) { + t.Run("TestSetup", func(t *testing.T) { + setupTest(t) + // Test logic ... + }) + } + ``` +- **Negative Example**: + ```go + // Directly setting up and cleaning up resources in the test + func TestMyFunction(t *testing.T) { + // Set up test environment... + // Test logic... + // Clean up resources... + } + ``` + +### Avoid Using Global Variables + +- **Specification Content**: Try to avoid using global variables in tests to ensure test independence. +- **Positive Example**: Declare and use the necessary variables inside the test function. +- **Negative Example**: Declare global variables at the top of the test file. + +### Clear Error Messages + +- **Specification Content**: When a test fails, output clear and understandable error messages to help developers locate the problem. +- **Positive Example**: + - `t.Errorf("Expected value %d, but got %d", expected, real)` +- **Negative Example**: + - `t.Errorf("Error occurred")` + - `fmt.Println("Error occurred")` + - `panic("Error occurred")` diff --git a/karpor_versioned_docs/version-v0.4/4-developer-guide/2-conventions/4-commit-conventions.md b/karpor_versioned_docs/version-v0.4/4-developer-guide/2-conventions/4-commit-conventions.md new file mode 100644 index 00000000..fd3980e3 --- /dev/null +++ b/karpor_versioned_docs/version-v0.4/4-developer-guide/2-conventions/4-commit-conventions.md @@ -0,0 +1,71 @@ +--- +title: Commit Conventions +--- +## Commit Message Structure + +Karpor adheres to [conventional-commits](https://www.conventionalcommits.org/en/v1.0.0/). + +Commit messages should be organized following this structure: + +``` +[optional scope]: + +[optional body] +``` + +## Example + +Commit message with scope: + +``` +feat(lang): add polish language +``` + +Commit message without body: + +``` +docs: correct spelling of CHANGELOG +``` + +Commit message with multiple body paragraphs:: + +``` +fix: correct minor typos in code + +see the issue for details + +on typos fixed. + +reviewed-by: Z +refs #133 +``` + +## ``(Required) + +The required type helps better capture the area of the commit, based on the [Angular guidelines](https://github.com/angular/angular/blob/22b96b9/CONTRIBUTING.md#-commit-message-guidelines). + +We use lowercase for `` to avoid spending time on case-sensitive issues. `` can be one of the following: + +- **feat**: A new feature +- **fix**: A bug fix +- **docs**: Documentation only changes +- **build**: Changes that affect the build system or external dependencies +- **style**: Changes that do not affect the meaning of the code (white-space, formatting, missing semi-colons, etc) +- **refactor**: A code change that neither fixes a bug nor adds a feature +- **perf**: A code change that improves performance +- **test**: Adding missing tests or correcting existing tests +- **chore**: Changes to the build process or auxiliary tools and libraries such as documentation generation + +## ``(Optional) + +Scope is optional and can be provided to the type of commit to provide additional contextual information, enclosed in parentheses. It can be anything specifying the place of the commit change. Github issue links are also valid scopes e.g., fix(ui), feat(api), fix(#233), etc. + +When the change affects multiple scopes, `*` can be used. + +## ``(Required) + +The subject must come immediately after the type/scope prefix, followed by a colon and space. It is a concise summary of the code changes, for example, "fix: array parsing issue when multiple spaces were contained in string", rather than "fix: bug". + +## ``(Required) + +A longer commit body can be provided after the brief subject, giving additional context information about the code change. The body must begin one line after the description. diff --git a/karpor_versioned_docs/version-v0.4/4-developer-guide/2-conventions/_category_.json b/karpor_versioned_docs/version-v0.4/4-developer-guide/2-conventions/_category_.json new file mode 100644 index 00000000..3287fa06 --- /dev/null +++ b/karpor_versioned_docs/version-v0.4/4-developer-guide/2-conventions/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Conventions" +} diff --git a/karpor_versioned_docs/version-v0.4/4-developer-guide/_category_.json b/karpor_versioned_docs/version-v0.4/4-developer-guide/_category_.json new file mode 100644 index 00000000..8de262b6 --- /dev/null +++ b/karpor_versioned_docs/version-v0.4/4-developer-guide/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Developer Guide" +} diff --git a/karpor_versioned_docs/version-v0.4/5-references/1-cli-commands/1-karpor.md b/karpor_versioned_docs/version-v0.4/5-references/1-cli-commands/1-karpor.md new file mode 100644 index 00000000..891809d7 --- /dev/null +++ b/karpor_versioned_docs/version-v0.4/5-references/1-cli-commands/1-karpor.md @@ -0,0 +1,230 @@ +--- +title: karpor +--- +### Synopsis + +Launch an API server + +``` +karpor [flags] +``` + +### Options + +``` + --admission-control-config-file string File with admission control configuration. + --advertise-address ip The IP address on which to advertise the apiserver to members of the cluster. This address must be reachable by the rest of the cluster. If blank, the --bind-address will be used. If --bind-address is unspecified, the host's default interface will be used. + --anonymous-auth Enables anonymous requests to the secure port of the API server. Requests that are not rejected by another authentication method are treated as anonymous requests. Anonymous requests have a username of system:anonymous, and a group name of system:unauthenticated. (default true) + --api-audiences strings Identifiers of the API. The service account token authenticator will validate that tokens used against the API are bound to at least one of these audiences. If the --service-account-issuer flag is configured and this flag is not, this field defaults to a single element list containing the issuer URL. + --audit-log-batch-buffer-size int The size of the buffer to store events before batching and writing. Only used in batch mode. (default 10000) + --audit-log-batch-max-size int The maximum size of a batch. Only used in batch mode. (default 1) + --audit-log-batch-max-wait duration The amount of time to wait before force writing the batch that hadn't reached the max size. Only used in batch mode. + --audit-log-batch-throttle-burst int Maximum number of requests sent at the same moment if ThrottleQPS was not utilized before. Only used in batch mode. + --audit-log-batch-throttle-enable Whether batching throttling is enabled. Only used in batch mode. + --audit-log-batch-throttle-qps float32 Maximum average number of batches per second. Only used in batch mode. + --audit-log-compress If set, the rotated log files will be compressed using gzip. + --audit-log-format string Format of saved audits. "legacy" indicates 1-line text format for each event. "json" indicates structured json format. Known formats are legacy,json. (default "json") + --audit-log-maxage int The maximum number of days to retain old audit log files based on the timestamp encoded in their filename. + --audit-log-maxbackup int The maximum number of old audit log files to retain. Setting a value of 0 will mean there's no restriction on the number of files. + --audit-log-maxsize int The maximum size in megabytes of the audit log file before it gets rotated. + --audit-log-mode string Strategy for sending audit events. Blocking indicates sending events should block server responses. Batch causes the backend to buffer and write events asynchronously. Known modes are batch,blocking,blocking-strict. (default "blocking") + --audit-log-path string If set, all requests coming to the apiserver will be logged to this file. '-' means standard out. + --audit-log-truncate-enabled Whether event and batch truncating is enabled. + --audit-log-truncate-max-batch-size int Maximum size of the batch sent to the underlying backend. Actual serialized size can be several hundreds of bytes greater. If a batch exceeds this limit, it is split into several batches of smaller size. (default 10485760) + --audit-log-truncate-max-event-size int Maximum size of the audit event sent to the underlying backend. If the size of an event is greater than this number, first request and response are removed, and if this doesn't reduce the size enough, event is discarded. (default 102400) + --audit-log-version string API group and version used for serializing audit events written to log. (default "audit.k8s.io/v1") + --audit-policy-file string Path to the file that defines the audit policy configuration. + --audit-webhook-batch-buffer-size int The size of the buffer to store events before batching and writing. Only used in batch mode. (default 10000) + --audit-webhook-batch-max-size int The maximum size of a batch. Only used in batch mode. (default 400) + --audit-webhook-batch-max-wait duration The amount of time to wait before force writing the batch that hadn't reached the max size. Only used in batch mode. (default 30s) + --audit-webhook-batch-throttle-burst int Maximum number of requests sent at the same moment if ThrottleQPS was not utilized before. Only used in batch mode. (default 15) + --audit-webhook-batch-throttle-enable Whether batching throttling is enabled. Only used in batch mode. (default true) + --audit-webhook-batch-throttle-qps float32 Maximum average number of batches per second. Only used in batch mode. (default 10) + --audit-webhook-config-file string Path to a kubeconfig formatted file that defines the audit webhook configuration. + --audit-webhook-initial-backoff duration The amount of time to wait before retrying the first failed request. (default 10s) + --audit-webhook-mode string Strategy for sending audit events. Blocking indicates sending events should block server responses. Batch causes the backend to buffer and write events asynchronously. Known modes are batch,blocking,blocking-strict. (default "batch") + --audit-webhook-truncate-enabled Whether event and batch truncating is enabled. + --audit-webhook-truncate-max-batch-size int Maximum size of the batch sent to the underlying backend. Actual serialized size can be several hundreds of bytes greater. If a batch exceeds this limit, it is split into several batches of smaller size. (default 10485760) + --audit-webhook-truncate-max-event-size int Maximum size of the audit event sent to the underlying backend. If the size of an event is greater than this number, first request and response are removed, and if this doesn't reduce the size enough, event is discarded. (default 102400) + --audit-webhook-version string API group and version used for serializing audit events written to webhook. (default "audit.k8s.io/v1") + --authorization-mode strings Ordered list of plug-ins to do authorization on secure port. Comma-delimited list of: AlwaysAllow,AlwaysDeny,ABAC,Webhook,RBAC,Node. (default [RBAC]) + --authorization-policy-file string File with authorization policy in json line by line format, used with --authorization-mode=ABAC, on the secure port. + --authorization-webhook-cache-authorized-ttl duration The duration to cache 'authorized' responses from the webhook authorizer. (default 5m0s) + --authorization-webhook-cache-unauthorized-ttl duration The duration to cache 'unauthorized' responses from the webhook authorizer. (default 30s) + --authorization-webhook-config-file string File with webhook configuration in kubeconfig format, used with --authorization-mode=Webhook. The API server will query the remote service to determine access on the API server's secure port. + --authorization-webhook-version string The API version of the authorization.k8s.io SubjectAccessReview to send to and expect from the webhook. (default "v1beta1") + --bind-address ip The IP address on which to listen for the --secure-port port. The associated interface(s) must be reachable by the rest of the cluster, and by CLI/web clients. If blank or an unspecified address (0.0.0.0 or ::), all interfaces will be used. (default 0.0.0.0) + --cert-dir string The directory where the TLS certs are located. If --tls-cert-file and --tls-private-key-file are provided, this flag will be ignored. (default "apiserver.local.config/certificates") + --client-ca-file string If set, any request presenting a client certificate signed by one of the authorities in the client-ca-file is authenticated with an identity corresponding to the CommonName of the client certificate. + --contention-profiling Enable lock contention profiling, if profiling is enabled + --cors-allowed-origins strings List of allowed origins for CORS, comma separated. An allowed origin can be a regular expression to support subdomain matching. If this list is empty CORS will not be enabled. (default [.*]) + --delete-collection-workers int Number of workers spawned for DeleteCollection call. These are used to speed up namespace cleanup. (default 1) + --disable-admission-plugins strings admission plugins that should be disabled although they are in the default enabled plugins list (NamespaceLifecycle, MutatingAdmissionWebhook, ValidatingAdmissionPolicy, ValidatingAdmissionWebhook). Comma-delimited list of admission plugins: MutatingAdmissionWebhook, NamespaceLifecycle, ValidatingAdmissionPolicy, ValidatingAdmissionWebhook. The order of plugins in this flag does not matter. (default [MutatingAdmissionWebhook,NamespaceLifecycle,ValidatingAdmissionWebhook,ValidatingAdmissionPolicy]) + --egress-selector-config-file string File with apiserver egress selector configuration. + --elastic-search-addresses strings The elastic search address + --elastic-search-password string The elastic search password + --elastic-search-username string The elastic search username + --enable-admission-plugins strings admission plugins that should be enabled in addition to default enabled ones (NamespaceLifecycle, MutatingAdmissionWebhook, ValidatingAdmissionPolicy, ValidatingAdmissionWebhook). Comma-delimited list of admission plugins: MutatingAdmissionWebhook, NamespaceLifecycle, ValidatingAdmissionPolicy, ValidatingAdmissionWebhook. The order of plugins in this flag does not matter. + --enable-garbage-collector Enables the generic garbage collector. MUST be synced with the corresponding flag of the kube-controller-manager. (default true) + --enable-priority-and-fairness If true and the APIPriorityAndFairness feature gate is enabled, replace the max-in-flight handler with an enhanced one that queues and dispatches with priority and fairness (default true) + --encryption-provider-config string The file containing configuration for encryption providers to be used for storing secrets in etcd + --encryption-provider-config-automatic-reload Determines if the file set by --encryption-provider-config should be automatically reloaded if the disk contents change. Setting this to true disables the ability to uniquely identify distinct KMS plugins via the API server healthz endpoints. + --etcd-cafile string SSL Certificate Authority file used to secure etcd communication. + --etcd-certfile string SSL certification file used to secure etcd communication. + --etcd-compaction-interval duration The interval of compaction requests. If 0, the compaction request from apiserver is disabled. (default 5m0s) + --etcd-count-metric-poll-period duration Frequency of polling etcd for number of resources per type. 0 disables the metric collection. (default 1m0s) + --etcd-db-metric-poll-interval duration The interval of requests to poll etcd and update metric. 0 disables the metric collection (default 30s) + --etcd-healthcheck-timeout duration The timeout to use when checking etcd health. (default 2s) + --etcd-keyfile string SSL key file used to secure etcd communication. + --etcd-prefix string The prefix to prepend to all resource paths in etcd. (default "/registry/karpor") + --etcd-readycheck-timeout duration The timeout to use when checking etcd readiness (default 2s) + --etcd-servers strings List of etcd servers to connect with (scheme://ip:port), comma separated. + --etcd-servers-overrides strings Per-resource etcd servers overrides, comma separated. The individual override format: group/resource#servers, where servers are URLs, semicolon separated. Note that this applies only to resources compiled into this server binary. + --external-hostname string The hostname to use when generating externalized URLs for this master (e.g. Swagger API Docs or OpenID Discovery). + --feature-gates mapStringBool A set of key=value pairs that describe feature gates for alpha/experimental features. Options are: + APIListChunking=true|false (BETA - default=true) + APIPriorityAndFairness=true|false (BETA - default=true) + APIResponseCompression=true|false (BETA - default=true) + APISelfSubjectReview=true|false (ALPHA - default=false) + APIServerIdentity=true|false (BETA - default=true) + APIServerTracing=true|false (ALPHA - default=false) + AggregatedDiscoveryEndpoint=true|false (ALPHA - default=false) + AllAlpha=true|false (ALPHA - default=false) + AllBeta=true|false (BETA - default=false) + AnyVolumeDataSource=true|false (BETA - default=true) + AppArmor=true|false (BETA - default=true) + CPUManagerPolicyAlphaOptions=true|false (ALPHA - default=false) + CPUManagerPolicyBetaOptions=true|false (BETA - default=true) + CPUManagerPolicyOptions=true|false (BETA - default=true) + CSIMigrationPortworx=true|false (BETA - default=false) + CSIMigrationRBD=true|false (ALPHA - default=false) + CSINodeExpandSecret=true|false (ALPHA - default=false) + CSIVolumeHealth=true|false (ALPHA - default=false) + ComponentSLIs=true|false (ALPHA - default=false) + ContainerCheckpoint=true|false (ALPHA - default=false) + CronJobTimeZone=true|false (BETA - default=true) + CrossNamespaceVolumeDataSource=true|false (ALPHA - default=false) + CustomCPUCFSQuotaPeriod=true|false (ALPHA - default=false) + CustomResourceValidationExpressions=true|false (BETA - default=true) + DisableCloudProviders=true|false (ALPHA - default=false) + DisableKubeletCloudCredentialProviders=true|false (ALPHA - default=false) + DownwardAPIHugePages=true|false (BETA - default=true) + DynamicResourceAllocation=true|false (ALPHA - default=false) + EventedPLEG=true|false (ALPHA - default=false) + ExpandedDNSConfig=true|false (BETA - default=true) + ExperimentalHostUserNamespaceDefaulting=true|false (BETA - default=false) + GRPCContainerProbe=true|false (BETA - default=true) + GracefulNodeShutdown=true|false (BETA - default=true) + GracefulNodeShutdownBasedOnPodPriority=true|false (BETA - default=true) + HPAContainerMetrics=true|false (ALPHA - default=false) + HPAScaleToZero=true|false (ALPHA - default=false) + HonorPVReclaimPolicy=true|false (ALPHA - default=false) + IPTablesOwnershipCleanup=true|false (ALPHA - default=false) + InTreePluginAWSUnregister=true|false (ALPHA - default=false) + InTreePluginAzureDiskUnregister=true|false (ALPHA - default=false) + InTreePluginAzureFileUnregister=true|false (ALPHA - default=false) + InTreePluginGCEUnregister=true|false (ALPHA - default=false) + InTreePluginOpenStackUnregister=true|false (ALPHA - default=false) + InTreePluginPortworxUnregister=true|false (ALPHA - default=false) + InTreePluginRBDUnregister=true|false (ALPHA - default=false) + InTreePluginvSphereUnregister=true|false (ALPHA - default=false) + JobMutableNodeSchedulingDirectives=true|false (BETA - default=true) + JobPodFailurePolicy=true|false (BETA - default=true) + JobReadyPods=true|false (BETA - default=true) + KMSv2=true|false (ALPHA - default=false) + KubeletInUserNamespace=true|false (ALPHA - default=false) + KubeletPodResources=true|false (BETA - default=true) + KubeletPodResourcesGetAllocatable=true|false (BETA - default=true) + KubeletTracing=true|false (ALPHA - default=false) + LegacyServiceAccountTokenTracking=true|false (ALPHA - default=false) + LocalStorageCapacityIsolationFSQuotaMonitoring=true|false (ALPHA - default=false) + LogarithmicScaleDown=true|false (BETA - default=true) + MatchLabelKeysInPodTopologySpread=true|false (ALPHA - default=false) + MaxUnavailableStatefulSet=true|false (ALPHA - default=false) + MemoryManager=true|false (BETA - default=true) + MemoryQoS=true|false (ALPHA - default=false) + MinDomainsInPodTopologySpread=true|false (BETA - default=false) + MinimizeIPTablesRestore=true|false (ALPHA - default=false) + MultiCIDRRangeAllocator=true|false (ALPHA - default=false) + NetworkPolicyStatus=true|false (ALPHA - default=false) + NodeInclusionPolicyInPodTopologySpread=true|false (BETA - default=true) + NodeOutOfServiceVolumeDetach=true|false (BETA - default=true) + NodeSwap=true|false (ALPHA - default=false) + OpenAPIEnums=true|false (BETA - default=true) + OpenAPIV3=true|false (BETA - default=true) + PDBUnhealthyPodEvictionPolicy=true|false (ALPHA - default=false) + PodAndContainerStatsFromCRI=true|false (ALPHA - default=false) + PodDeletionCost=true|false (BETA - default=true) + PodDisruptionConditions=true|false (BETA - default=true) + PodHasNetworkCondition=true|false (ALPHA - default=false) + PodSchedulingReadiness=true|false (ALPHA - default=false) + ProbeTerminationGracePeriod=true|false (BETA - default=true) + ProcMountType=true|false (ALPHA - default=false) + ProxyTerminatingEndpoints=true|false (BETA - default=true) + QOSReserved=true|false (ALPHA - default=false) + ReadWriteOncePod=true|false (ALPHA - default=false) + RecoverVolumeExpansionFailure=true|false (ALPHA - default=false) + RemainingItemCount=true|false (BETA - default=true) + RetroactiveDefaultStorageClass=true|false (BETA - default=true) + RotateKubeletServerCertificate=true|false (BETA - default=true) + SELinuxMountReadWriteOncePod=true|false (ALPHA - default=false) + SeccompDefault=true|false (BETA - default=true) + ServerSideFieldValidation=true|false (BETA - default=true) + SizeMemoryBackedVolumes=true|false (BETA - default=true) + StatefulSetAutoDeletePVC=true|false (ALPHA - default=false) + StatefulSetStartOrdinal=true|false (ALPHA - default=false) + StorageVersionAPI=true|false (ALPHA - default=false) + StorageVersionHash=true|false (BETA - default=true) + TopologyAwareHints=true|false (BETA - default=true) + TopologyManager=true|false (BETA - default=true) + TopologyManagerPolicyAlphaOptions=true|false (ALPHA - default=false) + TopologyManagerPolicyBetaOptions=true|false (BETA - default=false) + TopologyManagerPolicyOptions=true|false (ALPHA - default=false) + UserNamespacesStatelessPodsSupport=true|false (ALPHA - default=false) + ValidatingAdmissionPolicy=true|false (ALPHA - default=false) + VolumeCapacityPriority=true|false (ALPHA - default=false) + WinDSR=true|false (ALPHA - default=false) + WinOverlay=true|false (BETA - default=true) + WindowsHostNetwork=true|false (ALPHA - default=true) (default APIPriorityAndFairness=true) + --goaway-chance float To prevent HTTP/2 clients from getting stuck on a single apiserver, randomly close a connection (GOAWAY). The client's other in-flight requests won't be affected, and the client will reconnect, likely landing on a different apiserver after going through the load balancer again. This argument sets the fraction of requests that will be sent a GOAWAY. Clusters with single apiservers, or which don't use a load balancer, should NOT enable this. Min is 0 (off), Max is .02 (1/50 requests); .001 (1/1000) is a recommended starting point. + -h, --help help for karpor + --http2-max-streams-per-connection int The limit that the server gives to clients for the maximum number of streams in an HTTP/2 connection. Zero means to use golang's default. (default 1000) + --lease-reuse-duration-seconds int The time in seconds that each lease is reused. A lower value could avoid large number of objects reusing the same lease. Notice that a too small value may cause performance problems at storage layer. (default 60) + --livez-grace-period duration This option represents the maximum amount of time it should take for apiserver to complete its startup sequence and become live. From apiserver's start time to when this amount of time has elapsed, /livez will assume that unfinished post-start hooks will complete successfully and therefore return true. + --max-mutating-requests-inflight int This and --max-requests-inflight are summed to determine the server's total concurrency limit (which must be positive) if --enable-priority-and-fairness is true. Otherwise, this flag limits the maximum number of mutating requests in flight, or a zero value disables the limit completely. (default 200) + --max-requests-inflight int This and --max-mutating-requests-inflight are summed to determine the server's total concurrency limit (which must be positive) if --enable-priority-and-fairness is true. Otherwise, this flag limits the maximum number of non-mutating requests in flight, or a zero value disables the limit completely. (default 400) + --min-request-timeout int An optional field indicating the minimum number of seconds a handler must keep a request open before timing it out. Currently only honored by the watch request handler, which picks a randomized value above this number as the connection timeout, to spread out load. (default 1800) + --permit-address-sharing If true, SO_REUSEADDR will be used when binding the port. This allows binding to wildcard IPs like 0.0.0.0 and specific IPs in parallel, and it avoids waiting for the kernel to release sockets in TIME_WAIT state. [default=false] + --permit-port-sharing If true, SO_REUSEPORT will be used when binding the port, which allows more than one instance to bind on the same address and port. [default=false] + --profiling Enable profiling via web interface host:port/debug/pprof/ (default true) + --read-only-mode turn on the read only mode + --request-timeout duration An optional field indicating the duration a handler must keep a request open before timing it out. This is the default request timeout for requests but may be overridden by flags such as --min-request-timeout for specific types of requests. (default 1m0s) + --requestheader-allowed-names strings List of client certificate common names to allow to provide usernames in headers specified by --requestheader-username-headers. If empty, any client certificate validated by the authorities in --requestheader-client-ca-file is allowed. + --requestheader-client-ca-file string Root certificate bundle to use to verify client certificates on incoming requests before trusting usernames in headers specified by --requestheader-username-headers. WARNING: generally do not depend on authorization being already done for incoming requests. + --requestheader-extra-headers-prefix strings List of request header prefixes to inspect. X-Remote-Extra- is suggested. + --requestheader-group-headers strings List of request headers to inspect for groups. X-Remote-Group is suggested. + --requestheader-username-headers strings List of request headers to inspect for usernames. X-Remote-User is common. + --search-storage-type string The search storage type + --secure-port int The port on which to serve HTTPS with authentication and authorization. If 0, don't serve HTTPS at all. (default 443) + --shutdown-delay-duration duration Time to delay the termination. During that time the server keeps serving requests normally. The endpoints /healthz and /livez will return success, but /readyz immediately returns failure. Graceful termination starts after this delay has elapsed. This can be used to allow load balancer to stop sending traffic to this server. + --shutdown-send-retry-after If true the HTTP Server will continue listening until all non long running request(s) in flight have been drained, during this window all incoming requests will be rejected with a status code 429 and a 'Retry-After' response header, in addition 'Connection: close' response header is set in order to tear down the TCP connection when idle. + --storage-backend string The storage backend for persistence. Options: 'etcd3' (default). + --storage-media-type string The media type to use to store objects in storage. Some resources or storage backends may only support a specific media type and will ignore this setting. Supported media types: [application/json, application/yaml, application/vnd.kubernetes.protobuf] (default "application/json") + --strict-transport-security-directives strings List of directives for HSTS, comma separated. If this list is empty, then HSTS directives will not be added. Example: 'max-age=31536000,includeSubDomains,preload' + --tls-cert-file string File containing the default x509 Certificate for HTTPS. (CA cert, if any, concatenated after server cert). If HTTPS serving is enabled, and --tls-cert-file and --tls-private-key-file are not provided, a self-signed certificate and key are generated for the public address and saved to the directory specified by --cert-dir. (default "apiserver.local.config/certificates/apiserver.crt") + --tls-cipher-suites strings Comma-separated list of cipher suites for the server. If omitted, the default Go cipher suites will be used. + Preferred values: TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_256_CBC_SHA, TLS_RSA_WITH_AES_256_GCM_SHA384. + Insecure values: TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_RSA_WITH_RC4_128_SHA, TLS_RSA_WITH_3DES_EDE_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA256, TLS_RSA_WITH_RC4_128_SHA. + --tls-min-version string Minimum TLS version supported. Possible values: VersionTLS10, VersionTLS11, VersionTLS12, VersionTLS13 + --tls-private-key-file string File containing the default x509 private key matching --tls-cert-file. (default "apiserver.local.config/certificates/apiserver.key") + --tls-sni-cert-key namedCertKey A pair of x509 certificate and private key file paths, optionally suffixed with a list of domain patterns which are fully qualified domain names, possibly with prefixed wildcard segments. The domain patterns also allow IP addresses, but IPs should only be used if the apiserver has visibility to the IP address requested by a client. If no domain patterns are provided, the names of the certificate are extracted. Non-wildcard matches trump over wildcard matches, explicit domain patterns trump over extracted names. For multiple key/certificate pairs, use the --tls-sni-cert-key multiple times. Examples: "example.crt,example.key" or "foo.crt,foo.key:*.foo.com,foo.com". (default []) + --tracing-config-file string File with apiserver tracing configuration. + --watch-cache Enable watch caching in the apiserver (default true) + --watch-cache-sizes strings Watch cache size settings for some resources (pods, nodes, etc.), comma separated. The individual setting format: resource[.group]#size, where resource is lowercase plural (no version), group is omitted for resources of apiVersion v1 (the legacy core API) and included for others, and size is a number. This option is only meaningful for resources built into the apiserver, not ones defined by CRDs or aggregated from external servers, and is only consulted if the watch-cache is enabled. The only meaningful size setting to supply here is zero, which means to disable watch caching for the associated resource; all non-zero values are equivalent and mean to not disable watch caching for that resource +``` + +### SEE ALSO + +* [karpor syncer](2-karpor-syncer.md) - start a resource syncer to sync resource from clusters + +###### Auto generated by spf13/cobra on 7-May-2024 diff --git a/karpor_versioned_docs/version-v0.4/5-references/1-cli-commands/2-karpor-syncer.md b/karpor_versioned_docs/version-v0.4/5-references/1-cli-commands/2-karpor-syncer.md new file mode 100644 index 00000000..d25245ae --- /dev/null +++ b/karpor_versioned_docs/version-v0.4/5-references/1-cli-commands/2-karpor-syncer.md @@ -0,0 +1,25 @@ +--- +title: karpor syncer +--- +## karpor syncer + +start a resource syncer to sync resource from clusters + +``` +karpor syncer [flags] +``` + +### Options + +``` + --elastic-search-addresses strings The elastic search address. + --health-probe-bind-address string The address the probe endpoint binds to. (default ":8081") + -h, --help help for syncer + --metrics-bind-address string The address the metric endpoint binds to. (default ":8080") +``` + +### SEE ALSO + +* [karpor](1-karpor.md) - Launch an API server + +###### Auto generated by spf13/cobra on 7-May-2024 diff --git a/karpor_versioned_docs/version-v0.4/5-references/1-cli-commands/_category_.json b/karpor_versioned_docs/version-v0.4/5-references/1-cli-commands/_category_.json new file mode 100644 index 00000000..41757f5f --- /dev/null +++ b/karpor_versioned_docs/version-v0.4/5-references/1-cli-commands/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "CLI Commands" +} diff --git a/karpor_versioned_docs/version-v0.4/5-references/2-openapi.md b/karpor_versioned_docs/version-v0.4/5-references/2-openapi.md new file mode 100644 index 00000000..81c0321d --- /dev/null +++ b/karpor_versioned_docs/version-v0.4/5-references/2-openapi.md @@ -0,0 +1,1862 @@ +--- +title: OpenAPI +--- +## Informations + +### Version + +1.0 + +### Contact + +## Content negotiation + +### URI Schemes + +* http + +### Consumes + +* application/json +* multipart/form-data +* text/plain + +### Produces + +* application/json +* text/plain + +## All endpoints + +### cluster + +| Method | URI | Name | Summary | +| ------ | ------------------------------------ | ------------------------------------------------------------------------------------- | -------------------------------------------- | +| DELETE | /rest-api/v1/cluster/{clusterName} | [delete rest API v1 cluster cluster name](#delete-rest-api-v1-cluster-cluster-name) | Delete removes a cluster resource by name. | +| GET | /rest-api/v1/cluster/{clusterName} | [get rest API v1 cluster cluster name](#get-rest-api-v1-cluster-cluster-name) | Get returns a cluster resource by name. | +| GET | /rest-api/v1/clusters | [get rest API v1 clusters](#get-rest-api-v1-clusters) | List lists all cluster resources. | +| POST | /rest-api/v1/cluster/{clusterName} | [post rest API v1 cluster cluster name](#post-rest-api-v1-cluster-cluster-name) | Create creates a cluster resource. | +| POST | /rest-api/v1/cluster/config/file | [post rest API v1 cluster config file](#post-rest-api-v1-cluster-config-file) | Upload kubeConfig file for cluster | +| POST | /rest-api/v1/cluster/config/validate | [post rest API v1 cluster config validate](#post-rest-api-v1-cluster-config-validate) | Validate KubeConfig | +| PUT | /rest-api/v1/cluster/{clusterName} | [put rest API v1 cluster cluster name](#put-rest-api-v1-cluster-cluster-name) | Update updates the cluster metadata by name. | + +### debug + +| Method | URI | Name | Summary | +| ------ | ---------- | ------------------------------- | ---------------------------- | +| GET | /endpoints | [get endpoints](#get-endpoints) | List all available endpoints | + +### insight + +| Method | URI | Name | Summary | +| ------ | ----------------------------- | --------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------- | +| GET | /rest-api/v1/insight/audit | [get rest API v1 insight audit](#get-rest-api-v1-insight-audit) | Audit based on resource group. | +| GET | /rest-api/v1/insight/detail | [get rest API v1 insight detail](#get-rest-api-v1-insight-detail) | GetDetail returns a Kubernetes resource by name, namespace, cluster, apiVersion and kind. | +| GET | /rest-api/v1/insight/events | [get rest API v1 insight events](#get-rest-api-v1-insight-events) | GetEvents returns events for a Kubernetes resource by name, namespace, cluster, apiVersion and kind. | +| GET | /rest-api/v1/insight/score | [get rest API v1 insight score](#get-rest-api-v1-insight-score) | ScoreHandler calculates a score for the audited manifest. | +| GET | /rest-api/v1/insight/stats | [get rest API v1 insight stats](#get-rest-api-v1-insight-stats) | Get returns a global statistics info. | +| GET | /rest-api/v1/insight/summary | [get rest API v1 insight summary](#get-rest-api-v1-insight-summary) | Get returns a Kubernetes resource summary by name, namespace, cluster, apiVersion and kind. | +| GET | /rest-api/v1/insight/topology | [get rest API v1 insight topology](#get-rest-api-v1-insight-topology) | GetTopology returns a topology map for a Kubernetes resource by name, namespace, cluster, apiVersion and kind. | + +### resourcegroup + +| Method | URI | Name | Summary | +| ------ | ---------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | ------------------------------------------- | +| GET | /rest-api/v1/resource-groups/{resourceGroupRuleName} | [get rest API v1 resource groups resource group rule name](#get-rest-api-v1-resource-groups-resource-group-rule-name) | List lists all ResourceGroups by rule name. | + +### resourcegrouprule + +| Method | URI | Name | Summary | +| ------ | -------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------ | +| DELETE | /rest-api/v1/resource-group-rule/{resourceGroupRuleName} | [delete rest API v1 resource group rule resource group rule name](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name) | Delete removes a ResourceGroupRule by name. | +| GET | /rest-api/v1/resource-group-rule/{resourceGroupRuleName} | [get rest API v1 resource group rule resource group rule name](#get-rest-api-v1-resource-group-rule-resource-group-rule-name) | Get returns a ResourceGroupRule by name. | +| GET | /rest-api/v1/resource-group-rules | [get rest API v1 resource group rules](#get-rest-api-v1-resource-group-rules) | List lists all ResourceGroupRules. | +| POST | /rest-api/v1/resource-group-rule | [post rest API v1 resource group rule](#post-rest-api-v1-resource-group-rule) | Create creates a ResourceGroupRule. | +| PUT | /rest-api/v1/resource-group-rule | [put rest API v1 resource group rule](#put-rest-api-v1-resource-group-rule) | Update updates the ResourceGroupRule metadata by name. | + +### search + +| Method | URI | Name | Summary | +| ------ | ------------------- | ------------------------------------------------- | ----------------------------------------------------------------------------------------------------- | +| GET | /rest-api/v1/search | [get rest API v1 search](#get-rest-api-v1-search) | SearchForResource returns an array of Kubernetes runtime Object matched using the query from context. | + +## Paths + +### Delete removes a cluster resource by name. (*DeleteRestAPIV1ClusterClusterName*) + +``` +DELETE /rest-api/v1/cluster/{clusterName} +``` + +This endpoint deletes the cluster resource by name. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ----------- | ------ | ------ | -------- | --------- | :------: | ------- | ----------------------- | +| clusterName | `path` | string | `string` | | ✓ | | The name of the cluster | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| --------------------------------------------------- | --------------------- | --------------------- | :---------: | ------------------------------------------------------------- | +| [200](#delete-rest-api-v1-cluster-cluster-name-200) | OK | Operation status | | [schema](#delete-rest-api-v1-cluster-cluster-name-200-schema) | +| [400](#delete-rest-api-v1-cluster-cluster-name-400) | Bad Request | Bad Request | | [schema](#delete-rest-api-v1-cluster-cluster-name-400-schema) | +| [401](#delete-rest-api-v1-cluster-cluster-name-401) | Unauthorized | Unauthorized | | [schema](#delete-rest-api-v1-cluster-cluster-name-401-schema) | +| [404](#delete-rest-api-v1-cluster-cluster-name-404) | Not Found | Not Found | | [schema](#delete-rest-api-v1-cluster-cluster-name-404-schema) | +| [405](#delete-rest-api-v1-cluster-cluster-name-405) | Method Not Allowed | Method Not Allowed | | [schema](#delete-rest-api-v1-cluster-cluster-name-405-schema) | +| [429](#delete-rest-api-v1-cluster-cluster-name-429) | Too Many Requests | Too Many Requests | | [schema](#delete-rest-api-v1-cluster-cluster-name-429-schema) | +| [500](#delete-rest-api-v1-cluster-cluster-name-500) | Internal Server Error | Internal Server Error | | [schema](#delete-rest-api-v1-cluster-cluster-name-500-schema) | + +#### Responses + +##### 200 - Operation status + +Status: OK + +###### Schema + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### Delete removes a ResourceGroupRule by name. (*DeleteRestAPIV1ResourceGroupRuleResourceGroupRuleName*) + +``` +DELETE /rest-api/v1/resource-group-rule/{resourceGroupRuleName} +``` + +This endpoint deletes the ResourceGroupRule by name. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| --------------------- | ------ | ------ | -------- | --------- | :------: | ------- | ----------------------------------- | +| resourceGroupRuleName | `path` | string | `string` | | ✓ | | The name of the resource group rule | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| --------------------------------------------------------------------------- | --------------------- | --------------------- | :---------: | ------------------------------------------------------------------------------------- | +| [200](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-200) | OK | Operation status | | [schema](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-200-schema) | +| [400](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-400) | Bad Request | Bad Request | | [schema](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-400-schema) | +| [401](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-401) | Unauthorized | Unauthorized | | [schema](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-401-schema) | +| [404](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-404) | Not Found | Not Found | | [schema](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-404-schema) | +| [405](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-405) | Method Not Allowed | Method Not Allowed | | [schema](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-405-schema) | +| [429](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-429) | Too Many Requests | Too Many Requests | | [schema](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-429-schema) | +| [500](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-500) | Internal Server Error | Internal Server Error | | [schema](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-500-schema) | + +#### Responses + +##### 200 - Operation status + +Status: OK + +###### Schema + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### List all available endpoints (*GetEndpoints*) + +``` +GET /endpoints +``` + +List all registered endpoints in the router + +#### Consumes + +* text/plain + +#### Produces + +* text/plain + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ------------------------- | ------ | ----------------------------- | :---------: | ----------------------------------- | +| [200](#get-endpoints-200) | OK | Endpoints listed successfully | | [schema](#get-endpoints-200-schema) | + +#### Responses + +##### 200 - Endpoints listed successfully + +Status: OK + +###### Schema + +### Get returns a cluster resource by name. (*GetRestAPIV1ClusterClusterName*) + +``` +GET /rest-api/v1/cluster/{clusterName} +``` + +This endpoint returns a cluster resource by name. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ----------- | ------- | ------ | -------- | --------- | :------: | ------- | -------------------------------------------------- | +| clusterName | `path` | string | `string` | | ✓ | | The name of the cluster | +| format | `query` | string | `string` | | | | The format of the response. Either in json or yaml | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ------------------------------------------------ | --------------------- | --------------------- | :---------: | ---------------------------------------------------------- | +| [200](#get-rest-api-v1-cluster-cluster-name-200) | OK | Unstructured object | | [schema](#get-rest-api-v1-cluster-cluster-name-200-schema) | +| [400](#get-rest-api-v1-cluster-cluster-name-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-cluster-cluster-name-400-schema) | +| [401](#get-rest-api-v1-cluster-cluster-name-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-cluster-cluster-name-401-schema) | +| [404](#get-rest-api-v1-cluster-cluster-name-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-cluster-cluster-name-404-schema) | +| [405](#get-rest-api-v1-cluster-cluster-name-405) | Method Not Allowed | Method Not Allowed | | [schema](#get-rest-api-v1-cluster-cluster-name-405-schema) | +| [429](#get-rest-api-v1-cluster-cluster-name-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-cluster-cluster-name-429-schema) | +| [500](#get-rest-api-v1-cluster-cluster-name-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-cluster-cluster-name-500-schema) | + +#### Responses + +##### 200 - Unstructured object + +Status: OK + +###### Schema + +[UnstructuredUnstructured](#unstructured-unstructured) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### List lists all cluster resources. (*GetRestAPIV1Clusters*) + +``` +GET /rest-api/v1/clusters +``` + +This endpoint lists all cluster resources. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ---------- | ------- | ------- | -------- | --------- | :------: | ------- | -------------------------------------------------------------- | +| descending | `query` | boolean | `bool` | | | | Whether to sort the list in descending order. Default to false | +| orderBy | `query` | string | `string` | | | | The order to list the cluster. Default to order by name | +| summary | `query` | boolean | `bool` | | | | Whether to display summary or not. Default to false | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ------------------------------------ | --------------------- | ----------------------- | :---------: | ---------------------------------------------- | +| [200](#get-rest-api-v1-clusters-200) | OK | List of cluster objects | | [schema](#get-rest-api-v1-clusters-200-schema) | +| [400](#get-rest-api-v1-clusters-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-clusters-400-schema) | +| [401](#get-rest-api-v1-clusters-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-clusters-401-schema) | +| [404](#get-rest-api-v1-clusters-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-clusters-404-schema) | +| [405](#get-rest-api-v1-clusters-405) | Method Not Allowed | Method Not Allowed | | [schema](#get-rest-api-v1-clusters-405-schema) | +| [429](#get-rest-api-v1-clusters-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-clusters-429-schema) | +| [500](#get-rest-api-v1-clusters-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-clusters-500-schema) | + +#### Responses + +##### 200 - List of cluster objects + +Status: OK + +###### Schema + +[][UnstructuredUnstructured](#unstructured-unstructured) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### Audit based on resource group. (*GetRestAPIV1InsightAudit*) + +``` +GET /rest-api/v1/insight/audit +``` + +This endpoint audits based on the specified resource group. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ---------- | ------- | ------- | -------- | --------- | :------: | ------- | ----------------------------------------------------- | +| apiVersion | `query` | string | `string` | | | | The specified apiVersion, such as 'apps/v1' | +| cluster | `query` | string | `string` | | | | The specified cluster name, such as 'example-cluster' | +| forceNew | `query` | boolean | `bool` | | | | Switch for forced scanning, default is 'false' | +| kind | `query` | string | `string` | | | | The specified kind, such as 'Deployment' | +| name | `query` | string | `string` | | | | The specified resource name, such as 'foo' | +| namespace | `query` | string | `string` | | | | The specified namespace, such as 'default' | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ----------------------------------------- | --------------------- | --------------------- | :---------: | --------------------------------------------------- | +| [200](#get-rest-api-v1-insight-audit-200) | OK | Audit results | | [schema](#get-rest-api-v1-insight-audit-200-schema) | +| [400](#get-rest-api-v1-insight-audit-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-insight-audit-400-schema) | +| [401](#get-rest-api-v1-insight-audit-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-insight-audit-401-schema) | +| [404](#get-rest-api-v1-insight-audit-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-insight-audit-404-schema) | +| [429](#get-rest-api-v1-insight-audit-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-insight-audit-429-schema) | +| [500](#get-rest-api-v1-insight-audit-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-insight-audit-500-schema) | + +#### Responses + +##### 200 - Audit results + +Status: OK + +###### Schema + +[ScannerAuditData](#scanner-audit-data) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### GetDetail returns a Kubernetes resource by name, namespace, cluster, apiVersion and kind. (*GetRestAPIV1InsightDetail*) + +``` +GET /rest-api/v1/insight/detail +``` + +This endpoint returns a Kubernetes resource by name, namespace, cluster, apiVersion and kind. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ---------- | ------- | ------ | -------- | --------- | :------: | ------- | ---------------------------------------------------------------------- | +| apiVersion | `query` | string | `string` | | | | The specified apiVersion, such as 'apps/v1'. Should be percent-encoded | +| cluster | `query` | string | `string` | | | | The specified cluster name, such as 'example-cluster' | +| format | `query` | string | `string` | | | | The format of the response. Either in json or yaml. Default to json | +| kind | `query` | string | `string` | | | | The specified kind, such as 'Deployment' | +| name | `query` | string | `string` | | | | The specified resource name, such as 'foo' | +| namespace | `query` | string | `string` | | | | The specified namespace, such as 'default' | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ------------------------------------------ | --------------------- | --------------------- | :---------: | ---------------------------------------------------- | +| [200](#get-rest-api-v1-insight-detail-200) | OK | Unstructured object | | [schema](#get-rest-api-v1-insight-detail-200-schema) | +| [400](#get-rest-api-v1-insight-detail-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-insight-detail-400-schema) | +| [401](#get-rest-api-v1-insight-detail-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-insight-detail-401-schema) | +| [404](#get-rest-api-v1-insight-detail-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-insight-detail-404-schema) | +| [405](#get-rest-api-v1-insight-detail-405) | Method Not Allowed | Method Not Allowed | | [schema](#get-rest-api-v1-insight-detail-405-schema) | +| [429](#get-rest-api-v1-insight-detail-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-insight-detail-429-schema) | +| [500](#get-rest-api-v1-insight-detail-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-insight-detail-500-schema) | + +#### Responses + +##### 200 - Unstructured object + +Status: OK + +###### Schema + +[UnstructuredUnstructured](#unstructured-unstructured) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### GetEvents returns events for a Kubernetes resource by name, namespace, cluster, apiVersion and kind. (*GetRestAPIV1InsightEvents*) + +``` +GET /rest-api/v1/insight/events +``` + +This endpoint returns events for a Kubernetes resource YAML by name, namespace, cluster, apiVersion and kind. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ---------- | ------- | ------ | -------- | --------- | :------: | ------- | ---------------------------------------------------------------------- | +| apiVersion | `query` | string | `string` | | | | The specified apiVersion, such as 'apps/v1'. Should be percent-encoded | +| cluster | `query` | string | `string` | | | | The specified cluster name, such as 'example-cluster' | +| kind | `query` | string | `string` | | | | The specified kind, such as 'Deployment' | +| name | `query` | string | `string` | | | | The specified resource name, such as 'foo' | +| namespace | `query` | string | `string` | | | | The specified namespace, such as 'default' | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ------------------------------------------ | --------------------- | --------------------- | :---------: | ---------------------------------------------------- | +| [200](#get-rest-api-v1-insight-events-200) | OK | List of events | | [schema](#get-rest-api-v1-insight-events-200-schema) | +| [400](#get-rest-api-v1-insight-events-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-insight-events-400-schema) | +| [401](#get-rest-api-v1-insight-events-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-insight-events-401-schema) | +| [404](#get-rest-api-v1-insight-events-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-insight-events-404-schema) | +| [405](#get-rest-api-v1-insight-events-405) | Method Not Allowed | Method Not Allowed | | [schema](#get-rest-api-v1-insight-events-405-schema) | +| [429](#get-rest-api-v1-insight-events-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-insight-events-429-schema) | +| [500](#get-rest-api-v1-insight-events-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-insight-events-500-schema) | + +#### Responses + +##### 200 - List of events + +Status: OK + +###### Schema + +[][UnstructuredUnstructured](#unstructured-unstructured) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### ScoreHandler calculates a score for the audited manifest. (*GetRestAPIV1InsightScore*) + +``` +GET /rest-api/v1/insight/score +``` + +This endpoint calculates a score for the provided manifest based on the number and severity of issues detected during the audit. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ---------- | ------- | ------- | -------- | --------- | :------: | ------- | ----------------------------------------------------- | +| apiVersion | `query` | string | `string` | | | | The specified apiVersion, such as 'apps/v1' | +| cluster | `query` | string | `string` | | | | The specified cluster name, such as 'example-cluster' | +| forceNew | `query` | boolean | `bool` | | | | Switch for forced compute score, default is 'false' | +| kind | `query` | string | `string` | | | | The specified kind, such as 'Deployment' | +| name | `query` | string | `string` | | | | The specified resource name, such as 'foo' | +| namespace | `query` | string | `string` | | | | The specified namespace, such as 'default' | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ----------------------------------------- | --------------------- | ------------------------ | :---------: | --------------------------------------------------- | +| [200](#get-rest-api-v1-insight-score-200) | OK | Score calculation result | | [schema](#get-rest-api-v1-insight-score-200-schema) | +| [400](#get-rest-api-v1-insight-score-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-insight-score-400-schema) | +| [401](#get-rest-api-v1-insight-score-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-insight-score-401-schema) | +| [404](#get-rest-api-v1-insight-score-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-insight-score-404-schema) | +| [429](#get-rest-api-v1-insight-score-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-insight-score-429-schema) | +| [500](#get-rest-api-v1-insight-score-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-insight-score-500-schema) | + +#### Responses + +##### 200 - Score calculation result + +Status: OK + +###### Schema + +[InsightScoreData](#insight-score-data) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### Get returns a global statistics info. (*GetRestAPIV1InsightStats*) + +``` +GET /rest-api/v1/insight/stats +``` + +This endpoint returns a global statistics info. + +#### Produces + +* application/json + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ----------------------------------------- | --------------------- | ---------------------- | :---------: | --------------------------------------------------- | +| [200](#get-rest-api-v1-insight-stats-200) | OK | Global statistics info | | [schema](#get-rest-api-v1-insight-stats-200-schema) | +| [400](#get-rest-api-v1-insight-stats-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-insight-stats-400-schema) | +| [401](#get-rest-api-v1-insight-stats-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-insight-stats-401-schema) | +| [404](#get-rest-api-v1-insight-stats-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-insight-stats-404-schema) | +| [405](#get-rest-api-v1-insight-stats-405) | Method Not Allowed | Method Not Allowed | | [schema](#get-rest-api-v1-insight-stats-405-schema) | +| [429](#get-rest-api-v1-insight-stats-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-insight-stats-429-schema) | +| [500](#get-rest-api-v1-insight-stats-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-insight-stats-500-schema) | + +#### Responses + +##### 200 - Global statistics info + +Status: OK + +###### Schema + +[InsightStatistics](#insight-statistics) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### Get returns a Kubernetes resource summary by name, namespace, cluster, apiVersion and kind. (*GetRestAPIV1InsightSummary*) + +``` +GET /rest-api/v1/insight/summary +``` + +This endpoint returns a Kubernetes resource summary by name, namespace, cluster, apiVersion and kind. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ---------- | ------- | ------ | -------- | --------- | :------: | ------- | ---------------------------------------------------------------------- | +| apiVersion | `query` | string | `string` | | | | The specified apiVersion, such as 'apps/v1'. Should be percent-encoded | +| cluster | `query` | string | `string` | | | | The specified cluster name, such as 'example-cluster' | +| kind | `query` | string | `string` | | | | The specified kind, such as 'Deployment' | +| name | `query` | string | `string` | | | | The specified resource name, such as 'foo' | +| namespace | `query` | string | `string` | | | | The specified namespace, such as 'default' | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ------------------------------------------- | --------------------- | --------------------- | :---------: | ----------------------------------------------------- | +| [200](#get-rest-api-v1-insight-summary-200) | OK | Resource Summary | | [schema](#get-rest-api-v1-insight-summary-200-schema) | +| [400](#get-rest-api-v1-insight-summary-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-insight-summary-400-schema) | +| [401](#get-rest-api-v1-insight-summary-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-insight-summary-401-schema) | +| [404](#get-rest-api-v1-insight-summary-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-insight-summary-404-schema) | +| [405](#get-rest-api-v1-insight-summary-405) | Method Not Allowed | Method Not Allowed | | [schema](#get-rest-api-v1-insight-summary-405-schema) | +| [429](#get-rest-api-v1-insight-summary-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-insight-summary-429-schema) | +| [500](#get-rest-api-v1-insight-summary-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-insight-summary-500-schema) | + +#### Responses + +##### 200 - Resource Summary + +Status: OK + +###### Schema + +[InsightResourceSummary](#insight-resource-summary) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### GetTopology returns a topology map for a Kubernetes resource by name, namespace, cluster, apiVersion and kind. (*GetRestAPIV1InsightTopology*) + +``` +GET /rest-api/v1/insight/topology +``` + +This endpoint returns a topology map for a Kubernetes resource by name, namespace, cluster, apiVersion and kind. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ---------- | ------- | ------- | -------- | --------- | :------: | ------- | ---------------------------------------------------------------------- | +| apiVersion | `query` | string | `string` | | | | The specified apiVersion, such as 'apps/v1'. Should be percent-encoded | +| cluster | `query` | string | `string` | | | | The specified cluster name, such as 'example-cluster' | +| forceNew | `query` | boolean | `bool` | | | | Force re-generating the topology, default is 'false' | +| kind | `query` | string | `string` | | | | The specified kind, such as 'Deployment' | +| name | `query` | string | `string` | | | | The specified resource name, such as 'foo' | +| namespace | `query` | string | `string` | | | | The specified namespace, such as 'default' | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| -------------------------------------------- | --------------------- | -------------------------------------------- | :---------: | ------------------------------------------------------ | +| [200](#get-rest-api-v1-insight-topology-200) | OK | map from string to resource.ResourceTopology | | [schema](#get-rest-api-v1-insight-topology-200-schema) | +| [400](#get-rest-api-v1-insight-topology-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-insight-topology-400-schema) | +| [401](#get-rest-api-v1-insight-topology-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-insight-topology-401-schema) | +| [404](#get-rest-api-v1-insight-topology-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-insight-topology-404-schema) | +| [405](#get-rest-api-v1-insight-topology-405) | Method Not Allowed | Method Not Allowed | | [schema](#get-rest-api-v1-insight-topology-405-schema) | +| [429](#get-rest-api-v1-insight-topology-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-insight-topology-429-schema) | +| [500](#get-rest-api-v1-insight-topology-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-insight-topology-500-schema) | + +#### Responses + +##### 200 - map from string to resource.ResourceTopology + +Status: OK + +###### Schema + +map of [InsightResourceTopology](#insight-resource-topology) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### Get returns a ResourceGroupRule by name. (*GetRestAPIV1ResourceGroupRuleResourceGroupRuleName*) + +``` +GET /rest-api/v1/resource-group-rule/{resourceGroupRuleName} +``` + +This endpoint returns a ResourceGroupRule by name. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| --------------------- | ------ | ------ | -------- | --------- | :------: | ------- | ----------------------------------- | +| resourceGroupRuleName | `path` | string | `string` | | ✓ | | The name of the resource group rule | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ------------------------------------------------------------------------ | --------------------- | --------------------- | :---------: | ---------------------------------------------------------------------------------- | +| [200](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-200) | OK | Unstructured object | | [schema](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-200-schema) | +| [400](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-400-schema) | +| [401](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-401-schema) | +| [404](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-404-schema) | +| [405](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-405) | Method Not Allowed | Method Not Allowed | | [schema](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-405-schema) | +| [429](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-429-schema) | +| [500](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-500-schema) | + +#### Responses + +##### 200 - Unstructured object + +Status: OK + +###### Schema + +[UnstructuredUnstructured](#unstructured-unstructured) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### List lists all ResourceGroupRules. (*GetRestAPIV1ResourceGroupRules*) + +``` +GET /rest-api/v1/resource-group-rules +``` + +This endpoint lists all ResourceGroupRules. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ---------- | ------- | ------- | -------- | --------- | :------: | ------- | ----------------------------------------------------------------- | +| descending | `query` | boolean | `bool` | | | | Whether to sort the list in descending order. Default to false | +| orderBy | `query` | string | `string` | | | | The order to list the resourceGroupRule. Default to order by name | +| summary | `query` | boolean | `bool` | | | | Whether to display summary or not. Default to false | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ------------------------------------------------ | --------------------- | --------------------------------- | :---------: | ---------------------------------------------------------- | +| [200](#get-rest-api-v1-resource-group-rules-200) | OK | List of resourceGroupRule objects | | [schema](#get-rest-api-v1-resource-group-rules-200-schema) | +| [400](#get-rest-api-v1-resource-group-rules-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-resource-group-rules-400-schema) | +| [401](#get-rest-api-v1-resource-group-rules-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-resource-group-rules-401-schema) | +| [404](#get-rest-api-v1-resource-group-rules-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-resource-group-rules-404-schema) | +| [405](#get-rest-api-v1-resource-group-rules-405) | Method Not Allowed | Method Not Allowed | | [schema](#get-rest-api-v1-resource-group-rules-405-schema) | +| [429](#get-rest-api-v1-resource-group-rules-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-resource-group-rules-429-schema) | +| [500](#get-rest-api-v1-resource-group-rules-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-resource-group-rules-500-schema) | + +#### Responses + +##### 200 - List of resourceGroupRule objects + +Status: OK + +###### Schema + +[][UnstructuredUnstructured](#unstructured-unstructured) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### List lists all ResourceGroups by rule name. (*GetRestAPIV1ResourceGroupsResourceGroupRuleName*) + +``` +GET /rest-api/v1/resource-groups/{resourceGroupRuleName} +``` + +This endpoint lists all ResourceGroups. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| --------------------- | ------ | ------ | -------- | --------- | :------: | ------- | ----------------------------------- | +| resourceGroupRuleName | `path` | string | `string` | | ✓ | | The name of the resource group rule | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| -------------------------------------------------------------------- | --------------------- | ----------------------------- | :---------: | ------------------------------------------------------------------------------ | +| [200](#get-rest-api-v1-resource-groups-resource-group-rule-name-200) | OK | List of resourceGroup objects | | [schema](#get-rest-api-v1-resource-groups-resource-group-rule-name-200-schema) | +| [400](#get-rest-api-v1-resource-groups-resource-group-rule-name-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-resource-groups-resource-group-rule-name-400-schema) | +| [401](#get-rest-api-v1-resource-groups-resource-group-rule-name-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-resource-groups-resource-group-rule-name-401-schema) | +| [404](#get-rest-api-v1-resource-groups-resource-group-rule-name-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-resource-groups-resource-group-rule-name-404-schema) | +| [405](#get-rest-api-v1-resource-groups-resource-group-rule-name-405) | Method Not Allowed | Method Not Allowed | | [schema](#get-rest-api-v1-resource-groups-resource-group-rule-name-405-schema) | +| [429](#get-rest-api-v1-resource-groups-resource-group-rule-name-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-resource-groups-resource-group-rule-name-429-schema) | +| [500](#get-rest-api-v1-resource-groups-resource-group-rule-name-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-resource-groups-resource-group-rule-name-500-schema) | + +#### Responses + +##### 200 - List of resourceGroup objects + +Status: OK + +###### Schema + +[][UnstructuredUnstructured](#unstructured-unstructured) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### SearchForResource returns an array of Kubernetes runtime Object matched using the query from context. (*GetRestAPIV1Search*) + +``` +GET /rest-api/v1/search +``` + +This endpoint returns an array of Kubernetes runtime Object matched using the query from context. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| -------- | ------- | ------ | -------- | --------- | :------: | ------- | ------------------------------------------------------ | +| page | `query` | string | `string` | | | | The current page to fetch. Default to 1 | +| pageSize | `query` | string | `string` | | | | The size of the page. Default to 10 | +| pattern | `query` | string | `string` | | ✓ | | The search pattern. Can be either sql or dsl. Required | +| query | `query` | string | `string` | | ✓ | | The query to use for search. Required | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ---------------------------------- | --------------------- | ----------------------- | :---------: | -------------------------------------------- | +| [200](#get-rest-api-v1-search-200) | OK | Array of runtime.Object | | [schema](#get-rest-api-v1-search-200-schema) | +| [400](#get-rest-api-v1-search-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-search-400-schema) | +| [401](#get-rest-api-v1-search-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-search-401-schema) | +| [404](#get-rest-api-v1-search-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-search-404-schema) | +| [405](#get-rest-api-v1-search-405) | Method Not Allowed | Method Not Allowed | | [schema](#get-rest-api-v1-search-405-schema) | +| [429](#get-rest-api-v1-search-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-search-429-schema) | +| [500](#get-rest-api-v1-search-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-search-500-schema) | + +#### Responses + +##### 200 - Array of runtime.Object + +Status: OK + +###### Schema + +[][interface{}](#interface) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### Create creates a cluster resource. (*PostRestAPIV1ClusterClusterName*) + +``` +POST /rest-api/v1/cluster/{clusterName} +``` + +This endpoint creates a new cluster resource using the payload. + +#### Consumes + +* application/json +* text/plain + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ----------- | ------ | ------------------------------------------------- | ------------------------------ | --------- | :------: | ------- | ---------------------------------------------------- | +| clusterName | `path` | string | `string` | | ✓ | | The name of the cluster | +| request | `body` | [ClusterClusterPayload](#cluster-cluster-payload) | `models.ClusterClusterPayload` | | ✓ | | cluster to create (either plain text or JSON format) | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ------------------------------------------------- | --------------------- | --------------------- | :---------: | ----------------------------------------------------------- | +| [200](#post-rest-api-v1-cluster-cluster-name-200) | OK | Unstructured object | | [schema](#post-rest-api-v1-cluster-cluster-name-200-schema) | +| [400](#post-rest-api-v1-cluster-cluster-name-400) | Bad Request | Bad Request | | [schema](#post-rest-api-v1-cluster-cluster-name-400-schema) | +| [401](#post-rest-api-v1-cluster-cluster-name-401) | Unauthorized | Unauthorized | | [schema](#post-rest-api-v1-cluster-cluster-name-401-schema) | +| [404](#post-rest-api-v1-cluster-cluster-name-404) | Not Found | Not Found | | [schema](#post-rest-api-v1-cluster-cluster-name-404-schema) | +| [405](#post-rest-api-v1-cluster-cluster-name-405) | Method Not Allowed | Method Not Allowed | | [schema](#post-rest-api-v1-cluster-cluster-name-405-schema) | +| [429](#post-rest-api-v1-cluster-cluster-name-429) | Too Many Requests | Too Many Requests | | [schema](#post-rest-api-v1-cluster-cluster-name-429-schema) | +| [500](#post-rest-api-v1-cluster-cluster-name-500) | Internal Server Error | Internal Server Error | | [schema](#post-rest-api-v1-cluster-cluster-name-500-schema) | + +#### Responses + +##### 200 - Unstructured object + +Status: OK + +###### Schema + +[UnstructuredUnstructured](#unstructured-unstructured) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### Upload kubeConfig file for cluster (*PostRestAPIV1ClusterConfigFile*) + +``` +POST /rest-api/v1/cluster/config/file +``` + +Uploads a KubeConfig file for cluster, with a maximum size of 2MB. + +#### Consumes + +* multipart/form-data + +#### Produces + +* text/plain + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ----------- | ---------- | ------ | --------------- | --------- | :------: | ------- | ---------------------------------- | +| description | `formData` | string | `string` | | ✓ | | cluster description | +| displayName | `formData` | string | `string` | | ✓ | | cluster display name | +| file | `formData` | file | `io.ReadCloser` | | ✓ | | Upload file with field name 'file' | +| name | `formData` | string | `string` | | ✓ | | cluster name | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ------------------------------------------------ | --------------------- | --------------------------------------------------------- | :---------: | ---------------------------------------------------------- | +| [200](#post-rest-api-v1-cluster-config-file-200) | OK | Returns the content of the uploaded KubeConfig file. | | [schema](#post-rest-api-v1-cluster-config-file-200-schema) | +| [400](#post-rest-api-v1-cluster-config-file-400) | Bad Request | The uploaded file is too large or the request is invalid. | | [schema](#post-rest-api-v1-cluster-config-file-400-schema) | +| [500](#post-rest-api-v1-cluster-config-file-500) | Internal Server Error | Internal server error. | | [schema](#post-rest-api-v1-cluster-config-file-500-schema) | + +#### Responses + +##### 200 - Returns the content of the uploaded KubeConfig file. + +Status: OK + +###### Schema + +[ClusterUploadData](#cluster-upload-data) + +##### 400 - The uploaded file is too large or the request is invalid. + +Status: Bad Request + +###### Schema + +##### 500 - Internal server error. + +Status: Internal Server Error + +###### Schema + +### Validate KubeConfig (*PostRestAPIV1ClusterConfigValidate*) + +``` +POST /rest-api/v1/cluster/config/validate +``` + +Validates the provided KubeConfig using cluster manager methods. + +#### Consumes + +* application/json +* text/plain + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ------- | ------ | --------------------------------------------------- | ------------------------------- | --------- | :------: | ------- | ------------------------------ | +| request | `body` | [ClusterValidatePayload](#cluster-validate-payload) | `models.ClusterValidatePayload` | | ✓ | | KubeConfig payload to validate | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ---------------------------------------------------- | --------------------- | ---------------------------------- | :---------: | -------------------------------------------------------------- | +| [200](#post-rest-api-v1-cluster-config-validate-200) | OK | Verification passed server version | | [schema](#post-rest-api-v1-cluster-config-validate-200-schema) | +| [400](#post-rest-api-v1-cluster-config-validate-400) | Bad Request | Bad Request | | [schema](#post-rest-api-v1-cluster-config-validate-400-schema) | +| [401](#post-rest-api-v1-cluster-config-validate-401) | Unauthorized | Unauthorized | | [schema](#post-rest-api-v1-cluster-config-validate-401-schema) | +| [404](#post-rest-api-v1-cluster-config-validate-404) | Not Found | Not Found | | [schema](#post-rest-api-v1-cluster-config-validate-404-schema) | +| [429](#post-rest-api-v1-cluster-config-validate-429) | Too Many Requests | Too Many Requests | | [schema](#post-rest-api-v1-cluster-config-validate-429-schema) | +| [500](#post-rest-api-v1-cluster-config-validate-500) | Internal Server Error | Internal Server Error | | [schema](#post-rest-api-v1-cluster-config-validate-500-schema) | + +#### Responses + +##### 200 - Verification passed server version + +Status: OK + +###### Schema + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### Create creates a ResourceGroupRule. (*PostRestAPIV1ResourceGroupRule*) + +``` +POST /rest-api/v1/resource-group-rule +``` + +This endpoint creates a new ResourceGroupRule using the payload. + +#### Consumes + +* application/json +* text/plain + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ------- | ------ | ------------------------------------------------------------------------------------------- | -------------------------------------------------- | --------- | :------: | ------- | -------------------------------------------------------------- | +| request | `body` | [ResourcegroupruleResourceGroupRulePayload](#resourcegrouprule-resource-group-rule-payload) | `models.ResourcegroupruleResourceGroupRulePayload` | | ✓ | | resourceGroupRule to create (either plain text or JSON format) | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ------------------------------------------------ | --------------------- | --------------------- | :---------: | ---------------------------------------------------------- | +| [200](#post-rest-api-v1-resource-group-rule-200) | OK | Unstructured object | | [schema](#post-rest-api-v1-resource-group-rule-200-schema) | +| [400](#post-rest-api-v1-resource-group-rule-400) | Bad Request | Bad Request | | [schema](#post-rest-api-v1-resource-group-rule-400-schema) | +| [401](#post-rest-api-v1-resource-group-rule-401) | Unauthorized | Unauthorized | | [schema](#post-rest-api-v1-resource-group-rule-401-schema) | +| [404](#post-rest-api-v1-resource-group-rule-404) | Not Found | Not Found | | [schema](#post-rest-api-v1-resource-group-rule-404-schema) | +| [405](#post-rest-api-v1-resource-group-rule-405) | Method Not Allowed | Method Not Allowed | | [schema](#post-rest-api-v1-resource-group-rule-405-schema) | +| [429](#post-rest-api-v1-resource-group-rule-429) | Too Many Requests | Too Many Requests | | [schema](#post-rest-api-v1-resource-group-rule-429-schema) | +| [500](#post-rest-api-v1-resource-group-rule-500) | Internal Server Error | Internal Server Error | | [schema](#post-rest-api-v1-resource-group-rule-500-schema) | + +#### Responses + +##### 200 - Unstructured object + +Status: OK + +###### Schema + +[UnstructuredUnstructured](#unstructured-unstructured) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### Update updates the cluster metadata by name. (*PutRestAPIV1ClusterClusterName*) + +``` +PUT /rest-api/v1/cluster/{clusterName} +``` + +This endpoint updates the display name and description of an existing cluster resource. + +#### Consumes + +* application/json +* text/plain + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ----------- | ------ | ------------------------------------------------- | ------------------------------ | --------- | :------: | ------- | ---------------------------------------------------- | +| clusterName | `path` | string | `string` | | ✓ | | The name of the cluster | +| request | `body` | [ClusterClusterPayload](#cluster-cluster-payload) | `models.ClusterClusterPayload` | | ✓ | | cluster to update (either plain text or JSON format) | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ------------------------------------------------ | --------------------- | --------------------- | :---------: | ---------------------------------------------------------- | +| [200](#put-rest-api-v1-cluster-cluster-name-200) | OK | Unstructured object | | [schema](#put-rest-api-v1-cluster-cluster-name-200-schema) | +| [400](#put-rest-api-v1-cluster-cluster-name-400) | Bad Request | Bad Request | | [schema](#put-rest-api-v1-cluster-cluster-name-400-schema) | +| [401](#put-rest-api-v1-cluster-cluster-name-401) | Unauthorized | Unauthorized | | [schema](#put-rest-api-v1-cluster-cluster-name-401-schema) | +| [404](#put-rest-api-v1-cluster-cluster-name-404) | Not Found | Not Found | | [schema](#put-rest-api-v1-cluster-cluster-name-404-schema) | +| [405](#put-rest-api-v1-cluster-cluster-name-405) | Method Not Allowed | Method Not Allowed | | [schema](#put-rest-api-v1-cluster-cluster-name-405-schema) | +| [429](#put-rest-api-v1-cluster-cluster-name-429) | Too Many Requests | Too Many Requests | | [schema](#put-rest-api-v1-cluster-cluster-name-429-schema) | +| [500](#put-rest-api-v1-cluster-cluster-name-500) | Internal Server Error | Internal Server Error | | [schema](#put-rest-api-v1-cluster-cluster-name-500-schema) | + +#### Responses + +##### 200 - Unstructured object + +Status: OK + +###### Schema + +[UnstructuredUnstructured](#unstructured-unstructured) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### Update updates the ResourceGroupRule metadata by name. (*PutRestAPIV1ResourceGroupRule*) + +``` +PUT /rest-api/v1/resource-group-rule +``` + +This endpoint updates the display name and description of an existing ResourceGroupRule. + +#### Consumes + +* application/json +* text/plain + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ------- | ------ | ------------------------------------------------------------------------------------------- | -------------------------------------------------- | --------- | :------: | ------- | -------------------------------------------------------------- | +| request | `body` | [ResourcegroupruleResourceGroupRulePayload](#resourcegrouprule-resource-group-rule-payload) | `models.ResourcegroupruleResourceGroupRulePayload` | | ✓ | | resourceGroupRule to update (either plain text or JSON format) | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ----------------------------------------------- | --------------------- | --------------------- | :---------: | --------------------------------------------------------- | +| [200](#put-rest-api-v1-resource-group-rule-200) | OK | Unstructured object | | [schema](#put-rest-api-v1-resource-group-rule-200-schema) | +| [400](#put-rest-api-v1-resource-group-rule-400) | Bad Request | Bad Request | | [schema](#put-rest-api-v1-resource-group-rule-400-schema) | +| [401](#put-rest-api-v1-resource-group-rule-401) | Unauthorized | Unauthorized | | [schema](#put-rest-api-v1-resource-group-rule-401-schema) | +| [404](#put-rest-api-v1-resource-group-rule-404) | Not Found | Not Found | | [schema](#put-rest-api-v1-resource-group-rule-404-schema) | +| [405](#put-rest-api-v1-resource-group-rule-405) | Method Not Allowed | Method Not Allowed | | [schema](#put-rest-api-v1-resource-group-rule-405-schema) | +| [429](#put-rest-api-v1-resource-group-rule-429) | Too Many Requests | Too Many Requests | | [schema](#put-rest-api-v1-resource-group-rule-429-schema) | +| [500](#put-rest-api-v1-resource-group-rule-500) | Internal Server Error | Internal Server Error | | [schema](#put-rest-api-v1-resource-group-rule-500-schema) | + +#### Responses + +##### 200 - Unstructured object + +Status: OK + +###### Schema + +[UnstructuredUnstructured](#unstructured-unstructured) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +## Models + +### cluster.ClusterPayload + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| ----------- | ------ | -------- | :------: | ------- | --------------------------------------------------------------- | ------- | +| description | string | `string` | | | ClusterDescription is the description of cluster to be created | | +| displayName | string | `string` | | | ClusterDisplayName is the display name of cluster to be created | | +| kubeconfig | string | `string` | | | ClusterKubeConfig is the kubeconfig of cluster to be created | | + +### cluster.UploadData + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| ----------------------- | ------- | -------- | :------: | ------- | ----------- | ------- | +| content | string | `string` | | | | | +| fileName | string | `string` | | | | | +| fileSize | integer | `int64` | | | | | +| sanitizedClusterContent | string | `string` | | | | | + +### cluster.ValidatePayload + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| ---------- | ------ | -------- | :------: | ------- | ----------- | ------- | +| kubeConfig | string | `string` | | | | | + +### entity.ResourceGroup + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| ----------- | ------------- | ------------------- | :------: | ------- | ----------- | ------- | +| annotations | map of string | `map[string]string` | | | | | +| apiVersion | string | `string` | | | | | +| cluster | string | `string` | | | | | +| kind | string | `string` | | | | | +| labels | map of string | `map[string]string` | | | | | +| name | string | `string` | | | | | +| namespace | string | `string` | | | | | + +### insight.ResourceSummary + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| ----------------- | --------------------------------------------- | --------------------- | :------: | ------- | ----------- | ------- | +| creationTimestamp | string | `string` | | | | | +| resource | [EntityResourceGroup](#entity-resource-group) | `EntityResourceGroup` | | | | | +| resourceVersion | string | `string` | | | | | +| uid | string | `string` | | | | | + +### insight.ResourceTopology + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| ------------- | --------------------------------------------- | --------------------- | :------: | ------- | ----------- | ------- | +| children | []string | `[]string` | | | | | +| parents | []string | `[]string` | | | | | +| resourceGroup | [EntityResourceGroup](#entity-resource-group) | `EntityResourceGroup` | | | | | + +### insight.ScoreData + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| ------------------------------------------------------------------------- | -------------- | ------------------ | :------: | ------- | ----------------------------------------------------------------------- | ------- | +| issuesTotal | integer | `int64` | | | IssuesTotal is the total count of all issues found during the audit. | | +| This count can be used to understand the overall number of problems | | | | | | | +| that need to be addressed. | | | | | | | +| resourceTotal | integer | `int64` | | | ResourceTotal is the count of unique resources audited during the scan. | | +| score | number | `float64` | | | Score represents the calculated score of the audited manifest based on | | +| the number and severity of issues. It provides a quantitative measure | | | | | | | +| of the security posture of the resources in the manifest. | | | | | | | +| severityStatistic | map of integer | `map[string]int64` | | | SeverityStatistic is a mapping of severity levels to their respective | | +| number of occurrences. It allows for a quick overview of the distribution | | | | | | | +| of issues across different severity categories. | | | | | | | + +### insight.Statistics + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| ---------------------- | ------- | ------- | :------: | ------- | ----------- | ------- | +| clusterCount | integer | `int64` | | | | | +| resourceCount | integer | `int64` | | | | | +| resourceGroupRuleCount | integer | `int64` | | | | | + +### resourcegrouprule.ResourceGroupRulePayload + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| ----------- | -------- | ---------- | :------: | ------- | ----------- | ------- | +| description | string | `string` | | | | | +| fields | []string | `[]string` | | | | | +| name | string | `string` | | | | | + +### scanner.AuditData + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| ------------- | ------------------------------------------- | ---------------------- | :------: | ------- | ----------- | ------- | +| bySeverity | map of integer | `map[string]int64` | | | | | +| issueGroups | [][ScannerIssueGroup](#scanner-issue-group) | `[]*ScannerIssueGroup` | | | | | +| issueTotal | integer | `int64` | | | | | +| resourceTotal | integer | `int64` | | | | | + +### scanner.Issue + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| -------- | ------- | -------- | :------: | ------- | ------------------------------------------------------------------------------------- | ------- | +| message | string | `string` | | | Message provides a detailed human-readable description of the issue. | | +| scanner | string | `string` | | | Scanner is the name of the scanner that discovered the issue. | | +| severity | integer | `int64` | | | Severity indicates how critical the issue is, using the IssueSeverityLevel constants. | | +| title | string | `string` | | | Title is a brief summary of the issue. | | + +### scanner.IssueGroup + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| -------------- | ----------------------------------------------- | ------------------------ | :------: | ------- | ----------- | ------- | +| issue | [ScannerIssue](#scanner-issue) | `ScannerIssue` | | | | | +| resourceGroups | [][EntityResourceGroup](#entity-resource-group) | `[]*EntityResourceGroup` | | | | | + +### unstructured.Unstructured + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| ---------------------- | ------------------------- | ------------- | :------: | ------- | -------------------------------------------------------------------------------- | ------- | +| object | [interface{}](#interface) | `interface{}` | | | Object is a JSON compatible map with string, float, int, bool, []interface{}, or | | +| map[string]interface{} | | | | | | | +| children. | | | | | | | diff --git a/karpor_versioned_docs/version-v0.4/5-references/3-search-methods.md b/karpor_versioned_docs/version-v0.4/5-references/3-search-methods.md new file mode 100644 index 00000000..e1e63903 --- /dev/null +++ b/karpor_versioned_docs/version-v0.4/5-references/3-search-methods.md @@ -0,0 +1,109 @@ +--- +title: Search Methods +--- +Karpor is an open-source project that offers robust capabilities for searching resources across multiple clusters. This document outlines the two main search methods supported by Karpor: DSL (Domain Specific Language) and SQL (Structured Query Language), and explains how to utilize them for resource searches. + +## Keywords + +Karpor facilitates resource searches using two methods: DSL and SQL. Both methodologies leverage the following keywords for resource discovery: + +- cluster +- apiVersion +- kind +- namespace +- name +- creationTimestamp +- deletionTimestamp +- ownerReferences +- resourceVersion +- labels.`key` +- annotations.`key` +- content + +## SQL + +Karpor offers a SQL-like approach for querying Kubernetes resources, enabling users to employ SQL syntax for their searches. Below are examples illustrating the use of SQL syntax for various search scenarios: + +**Query resources of the Namespace kind** + +```sql +select * from resources where kind='Namespace' +``` + +**Query resources where the labels contain the key 'key1' with value 'value1'** + +```sql +select * from resources where labels.key1='value1' +``` + +**Query resources where the annotations contain the key 'key1' with value 'value1'** + +```sql +select * from resources where annotations.key1='value1' +``` + +**Query resources that are not of the Pod kind** + +```sql +select * from resources where kind!='Pod' +``` + +**Query resources of the Pod kind within a specific cluster** + +```sql +select * from resources where cluster='demo' and kind='Pod' +``` + +**Query resources of kind within a specified list** + +```sql +select * from resources where kind in ('pod','service') +``` + +**Query resources of kinds not within a specified list** + +```sql +select * from resources where kind not in ('pod','service') +``` + +**Query resources where the namespace starts with appl (where % represents any number of characters)** + +```sql +select * from resources where namespace like 'appl%' +``` + +**Query resources where the namespace contains banan (where \_ represents any single character)** + +```sql +select * from resources where namespace like 'banan_' +``` + +**Query resources where the namespace does not start with appl** + +```sql +select * from resources where namespace not like 'appl%' +``` + +**Query resources where the namespace does not contain banan** + +```sql +select * from resources where namespace notlike 'banan_' +``` + +**Query resources of kind Deployment and created before January 1, 2024, at 18:00:00** + +```sql +select * from resources where kind='Deployment' and creationTimestamp < '2024-01-01T18:00:00Z' +``` + +**Query resources of kind Service and order by creation timestamp in descending order** + +```sql +select * from resources where kind='Service' order by creationTimestamp desc +``` + +**Query resources whose content contains apple** + +```sql +select * from resources where contains(content, 'apple') +``` diff --git a/karpor_versioned_docs/version-v0.4/5-references/_category_.json b/karpor_versioned_docs/version-v0.4/5-references/_category_.json new file mode 100644 index 00000000..1fd07096 --- /dev/null +++ b/karpor_versioned_docs/version-v0.4/5-references/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "References" +} diff --git a/karpor_versioned_docs/version-v0.4/6-roadmap/README.md b/karpor_versioned_docs/version-v0.4/6-roadmap/README.md new file mode 100644 index 00000000..f5ffc68e --- /dev/null +++ b/karpor_versioned_docs/version-v0.4/6-roadmap/README.md @@ -0,0 +1,17 @@ +--- +title: Roadmap +--- +Karpor is an emerging open-source project, and we are committed to diligently polishing it into a **small and beautiful, vendor-neutral, developer-friendly, community-driven** open-source project! 🚀 Moving forward, we will focus our efforts in the following areas: + +- Refine Karpor's **usability** to lower the barrier of entry and make it sufficiently "user-friendly." +- Strengthen Karpor's **reliability** to ensure it is dependable in production environments. +- Deepen the **ecosystem integration** with more community tools to ensure openness. +- Explore **AI + Karpor** to create more possibilities. +- Embrace the open-source community; we love the **open-source spirit**. If you're interested in open source, then start here! +- ...... + +Karpor follows the [Release Process and Cadence Guide](../4-developer-guide/2-conventions/1-release-process.md), but actions may not strictly adhere to the roadmap. We may adjust milestones based on feedback from community meetings and [GitHub issues](https://github.com/KusionStack/karpor/issues), expecting all community members to join the discussions. For final decisions, please refer to the [GitHub milestones](https://github.com/KusionStack/karpor/milestones). + +Below is the detailed roadmap, which we will continue to update ⬇️ + +- **2024 Roadmap**: [https://github.com/KusionStack/karpor/issues/273](https://github.com/KusionStack/karpor/issues/273) diff --git a/karpor_versioned_docs/version-v0.5/1-getting-started/1-overview.mdx b/karpor_versioned_docs/version-v0.5/1-getting-started/1-overview.mdx new file mode 100644 index 00000000..8c83972f --- /dev/null +++ b/karpor_versioned_docs/version-v0.5/1-getting-started/1-overview.mdx @@ -0,0 +1,419 @@ +--- +id: overview +title: Overview +slug: / +--- + +import { + AiOutlineArrowRight, + AiFillCheckCircle, + AiFillCloseCircle, +} from "react-icons/ai"; +import logoImg from "@site/static/karpor/assets/logo/logo-full.png"; +import searchImg from "@site/static/karpor/assets/overview/search.png"; +import insightImg from "@site/static/karpor/assets/overview/insight.png"; +import visionImg from "@site/static/karpor/assets/overview/vision.png"; +import comingSoonImg from "@site/static/karpor/assets/misc/coming-soon.jpeg"; +import KarporButton from "@site/src/components/KarporButton"; +import GithubStar from "@site/src/components/GithubStars"; +import ReactPlayer from "react-player"; +import Typed from "typed.js"; +import clsx from "clsx"; + +export const Feature = ({ imgSrc, title, description, reverse }) => { + const reverseStyle = reverse ? { flexDirection: "row-reverse" } : {}; + return ( + <> +

{title}

+
+
+ +
+
+ {description} +
+
+ + ); +}; + +export const Content = () => { + const karporVsOthers = [ + { + label: "User Interface", + karpor: true, + kubernetesDashboard: true, + labelDesc: "", + }, + { + label: "Multi-Cluster", + karpor: true, + kubernetesDashboard: false, + labelDesc: "Ability to connect to multiple clusters simultaneously", + }, + { + label: "Aggregated Resource View", + karpor: true, + kubernetesDashboard: false, + labelDesc: "Human-friendly view for resources", + }, + { + label: "Security Compliance", + karpor: true, + kubernetesDashboard: false, + labelDesc: "Automatic scanning risk, assessing health score", + }, + { + label: "Resource Relationship Topology", + karpor: true, + kubernetesDashboard: false, + labelDesc: "Insight into the context of resources", + }, + ]; + const h2Style = { + paddingBottom: "14px", + borderBottom: "2px solid #f1f1f1", + fontSize: 28, + }; + const flexDirectionStyle = { + display: "flex", + flexDirection: "column", + alignItems: "center", + }; + // Setup typed animation + const el = React.useRef(null); + React.useEffect(() => { + const typed = new Typed(el.current, { + strings: [ + "Locate resources, for Developers.", + "Explore cluster insights, for Admins.", + "Connect multi-clusters, for Platforms.", + ], + typeSpeed: 40, + backDelay: 1500, + loop: true, + }); + return () => { + // Destroy Typed instance during cleanup to stop animation + typed.destroy(); + }; + }, []); + return ( + <> +
+
+ +
+
+ + +
+
+ Intelligence for Kubernetes ✨ +
+
+ +
+
+
+
+
+

📖 What is Karpor?

+
+ Karpor is Intelligence for Kubernetes. It brings advanced{" "} + 🔍 Search, 💡 Insight and ✨ AI to Kubernetes. It is + essentially a Kubernetes Visualization Tool. With Karpor, you can gain crucial + visibility into your Kubernetes clusters across any clouds. +
+
+ We hope to become a{" "} + + small and beautiful, vendor-neutral, developer-friendly, + community-driven + {" "} + open-source project! 🚀 +
+
+
+
+ +
+
+
+
+

💡 Why Karpor?

+
+ + ⚡️ Automatic Syncing +
+ Automatically synchronize your resources across any clusters + managed by the multi-cloud platform. +
+
+ 🔍 Powerful, flexible queries +
+ Effectively retrieve and locate resources across multi clusters + that you are looking for in a quick and easy way. + + } + /> +
+ + 🔒 Compliance Governance +
+ Understand your compliance status across multiple clusters and + compliance standards. +
+
+ 📊 Resource Topology +
+ Logical and topological views of relevant resources within their + operational context. +
+
+ 📉 Cost Optimization +
+ Coming soon. + + } + /> +
+ + 💬 Natural Language Operations +
+ Interact with Kubernetes using plain language for more + intuitive operations. +
+
+ 📦 Contextual AI Responses +
+ Get smart, contextual assistance that understands your needs. +
+
+ 🤖 AIOps for Kubernetes +
+ Automate and optimize Kubernetes management with AI-powered + insights. + + } + /> +
+
+
+
+
+

🌈 Our Vision

+
+ The increasing complexity of the kubernetes ecosystem is an + undeniable trend that is becoming more and more difficult to + manage. This complexity not only entails a heavier burden on + operations and maintenance but also slows down the adoption of + new technologies by users, limiting their ability to fully + leverage the potential of kubernetes. +
+
+ We wish Karpor to focus on 🔍 search, 📊 insights, + and ✨ AI, to break through the increasingly complex maze of + kubernetes, achieving the following value proposition: +
+
+
+ +
+
+
+

🙌 Karpor vs. Kubernetes Dashboard

+
+ + {karporVsOthers?.map((item) => { + return ( +
+
+
{item?.label}
+ {item?.labelDesc && ( +
{item?.labelDesc}
+ )} +
+
+ {item?.karpor ? ( + + ) : ( + + )} +
+
+ {item?.kubernetesDashboard ? ( + + ) : ( + + )} +
+
+ ); + })} +
+

🎖️ Open Source Contributors

+
+

Thanks all! 🍻

+

+ Follow{" "} + Contributing Guide, + come and join us! 👇 +

+ +
+

👉 Next Step

+
+ +
+ + ); +}; + + diff --git a/karpor_versioned_docs/version-v0.5/1-getting-started/2-installation.md b/karpor_versioned_docs/version-v0.5/1-getting-started/2-installation.md new file mode 100644 index 00000000..055982b2 --- /dev/null +++ b/karpor_versioned_docs/version-v0.5/1-getting-started/2-installation.md @@ -0,0 +1,170 @@ +--- +title: Installation +--- + +## Install with helm + +If you have a kubernetes cluster, helm is the recommended installation method. + +The following tutorial will guide you to install Karpor using Helm, which will install the chart with the release name `karpor-release` in namespace `karpor`. + +### Prerequisites + +* Helm v3+ +* A Kubernetes Cluster (The simplest way is to deploy a kubernetes cluster locally using `kind` or `minikube`) + +### Remote Installation + +First, add the karpor chart repo to your local repository. + +```shell +helm repo add kusionstack https://kusionstack.github.io/charts +helm repo update +``` + +Then you can use the following command to install the latest version of Karpor. + +```shell +helm install karpor-release kusionstack/karpor +``` + +![Install](./assets/2-installation/install.gif) + +**Note** that installing this chart directly means it will use the [default template values](https://github.com/KusionStack/charts/blob/master/charts/karpor/values.yaml) for Karpor. + +You may have to set your specific configurations if it is deployed into a production cluster, or you want to customize the chart configuration, such as `resources`, `replicas`, `port` etc. + +All configurable parameters of the Karpor chart are detailed [here](#chart-parameters). + +```shell +helm install karpor-release kusionstack/karpor --set server.replicas=3 --set syncer.port=7654 +``` + +### Search all available versions + +You can use the following command to view all installable chart versions. + +```shell +helm repo update +helm search repo kusionstack/karpor --versions +``` + +### Upgrade specified version + +You can specify the version to be upgraded through the `--version`. + +```shell +# Upgrade to the latest version. +helm upgrade karpor-release kusionstack/karpor + +# Upgrade to the specified version. +helm upgrade karpor-release kusionstack/karpor --version 1.2.3 +``` + +### Local Installation + +If you have problem connecting to [https://kusionstack.github.io/charts/](https://kusionstack.github.io/charts/) in production, you may need to manually download the chart from [here](https://github.com/KusionStack/charts) and use it to install or upgrade locally. + +```shell +git clone https://github.com/KusionStack/charts.git +helm install karpor-release charts/karpor +helm upgrade karpor-release charts/karpor +``` + +### Uninstall + +To uninstall/delete the `karpor-release` helm release in namespace `karpor`: + +```shell +helm uninstall karpor-release +``` + +### Image Registry Proxy for China + +If you are in China and have problem to pull image from official DockerHub, you can use the registry proxy: + +```shell +helm install karpor-release kusionstack/karpor --set registryProxy=docker.m.daocloud.io +``` + +**NOTE**: The above is just an example, you can replace the value of `registryProxy` as needed. + +### Chart Parameters + +The following table lists the configurable parameters of the chart and their default values. + +#### General Parameters + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| namespace | string | `"karpor"` | Which namespace to be deployed. | +| namespaceEnabled | bool | `true` | Whether to generate namespace. | +| registryProxy | string | `""` | Image registry proxy will be the prefix as all component image. | + +#### Global Parameters + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| global.image.imagePullPolicy | string | `"IfNotPresent"` | Image pull policy to be applied to all Karpor components. | + +#### Karpor Server + +The Karpor Server Component is main backend server. It itself is an `apiserver`, which also provides `/rest-api` to serve Dashboard. + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| server.image.repo | string | `"kusionstack/karpor"` | Repository for Karpor server image. | +| server.image.tag | string | `""` | Tag for Karpor server image. Defaults to the chart's appVersion if not specified. | +| server.name | string | `"karpor-server"` | Component name for karpor server. | +| server.port | int | `7443` | Port for karpor server. | +| server.replicas | int | `1` | The number of karpor server pods to run. | +| server.resources | object | `{"limits":{"cpu":"500m","ephemeral-storage":"10Gi","memory":"1Gi"},"requests":{"cpu":"250m","ephemeral-storage":"2Gi","memory":"256Mi"}}` | Resource limits and requests for the karpor server pods. | + +#### Karpor Syncer + +The Karpor Syncer Component is independent server to synchronize cluster resources in real-time. + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| syncer.image.repo | string | `"kusionstack/karpor"` | Repository for Karpor syncer image. | +| syncer.image.tag | string | `""` | Tag for Karpor syncer image. Defaults to the chart's appVersion if not specified. | +| syncer.name | string | `"karpor-syncer"` | Component name for Karpor syncer. | +| syncer.port | int | `7443` | Port for Karpor syncer. | +| syncer.replicas | int | `1` | The number of karpor syncer pods to run. | +| syncer.resources | object | `{"limits":{"cpu":"500m","ephemeral-storage":"10Gi","memory":"1Gi"},"requests":{"cpu":"250m","ephemeral-storage":"2Gi","memory":"256Mi"}}` | Resource limits and requests for the karpor syncer pods. | + +#### ElasticSearch + +The ElasticSearch Component to store the synchronized resources and user data. + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| elasticsearch.image.repo | string | `"docker.elastic.co/elasticsearch/elasticsearch"` | Repository for ElasticSearch image. | +| elasticsearch.image.tag | string | `"8.6.2"` | Specific tag for ElasticSearch image. | +| elasticsearch.name | string | `"elasticsearch"` | Component name for ElasticSearch. | +| elasticsearch.port | int | `9200` | Port for ElasticSearch. | +| elasticsearch.replicas | int | `1` | The number of ElasticSearch pods to run. | +| elasticsearch.resources | object | `{"limits":{"cpu":"2","ephemeral-storage":"10Gi","memory":"4Gi"},"requests":{"cpu":"2","ephemeral-storage":"10Gi","memory":"4Gi"}}` | Resource limits and requests for the karpor elasticsearch pods. | + +#### ETCD + +The ETCD Component is the storage of Karpor Server as `apiserver`. + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| etcd.image.repo | string | `"quay.io/coreos/etcd"` | Repository for ETCD image. | +| etcd.image.tag | string | `"v3.5.11"` | Specific tag for ETCD image. | +| etcd.name | string | `"etcd"` | Component name for ETCD. | +| etcd.port | int | `2379` | Port for ETCD. | +| etcd.replicas | int | `1` | The number of etcd pods to run. | +| etcd.resources | object | `{"limits":{"cpu":"500m","ephemeral-storage":"10Gi","memory":"1Gi"},"requests":{"cpu":"250m","ephemeral-storage":"2Gi","memory":"256Mi"}}` | Resource limits and requests for the karpor etcd pods. | + +#### Job + +This one-time job is used to generate root certificates and some preliminary work. + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| job.image.repo | string | `"kusionstack/karpor"` | Repository for the Job image. | +| job.image.tag | string | `""` | Tag for Karpor image. Defaults to the chart's appVersion if not specified. | + diff --git a/karpor_versioned_docs/version-v0.5/1-getting-started/3-quick-start.md b/karpor_versioned_docs/version-v0.5/1-getting-started/3-quick-start.md new file mode 100644 index 00000000..592bb5c1 --- /dev/null +++ b/karpor_versioned_docs/version-v0.5/1-getting-started/3-quick-start.md @@ -0,0 +1,145 @@ +--- +title: Quick Start +--- +## Prerequisites + +* Ensure [kubectl](https://kubernetes.io/docs/tasks/tools/) is installed. +* Ensure [helm](https://helm.sh/docs/intro/install/) is installed. +* If you do not have a ready-made cluster, you still need a [kind](https://kind.sigs.k8s.io/docs/user/quick-start/#installation/). + +## Create Cluster (Optional) + +First, if you do not have a ready-made cluster, you need to create a kubernetes cluster in your local environment with the `kind` tool. Follow these steps: + +1. Create a cluster. You can create a cluster named `demo-cluster` using the following command: + ```shell + kind create cluster --name demo-cluster + ``` + + This will create a new Kubernetes cluster in your local Docker environment. Wait for a moment until the cluster creation is complete. +2. Verify that the cluster is running properly by executing the command: + ```shell + kubectl cluster-info + ``` + + If everything is set up correctly, you'll see information about your Kubernetes cluster. + +## Installation + +To install Karpor, execute the following command in your terminal: + +```shell +helm repo add kusionstack https://kusionstack.github.io/charts +helm repo update +helm install karpor kusionstack/karpor +``` + +For more installation details, please refer to the [Installation Documentation](2-installation.md). + +![Install](./assets/2-installation/install.gif) + +## Access Karpor Dashboard + +1. Run the following command to access the Karpor service running in the cluster: + ```shell + kubectl -n karpor port-forward service/karpor-server 7443:7443 + ``` + + After executing this command, if you access port 7443 on your local machine, the traffic will be forwarded to port 7443 of the karpor-server service in the Kubernetes cluster. +2. Open your browser and enter the following URL: + ```shell + https://127.0.0.1:7443 + ``` + +This will open the Karpor Web interface. 👇 + +![Open in Browser](./assets/2-installation/open-in-browser.gif) + +Congratulations! 🎉 You have successfully installed Karpor. Now you can start using Karpor to explore and gain insights into resources across multiple clusters. + +## Create Access Token + +Before registering clusters, you need to create an access token to log in to the Karpor Web interface. Here are the brief steps to create a token: + +1. Export the KubeConfig of the Hub Cluster: + +```shell +kubectl get configmap karpor-kubeconfig -n karpor -o go-template='{{.data.config}}' > $HOME/.kube/karpor-hub-cluster.kubeconfig +``` + +2. Create ServiceAccount and ClusterRoleBinding: + +```shell +export KUBECONFIG=$HOME/.kube/karpor-hub-cluster.kubeconfig +kubectl create serviceaccount karpor-admin +kubectl create clusterrolebinding karpor-admin --clusterrole=karpor-admin --serviceaccount=default:karpor-admin +``` + +3. Create token: + +```shell +kubectl create token karpor-admin --duration=1000h +``` + +Copy the generated token, which will be used later to log in to the Karpor Web interface. + +For detailed instructions on creating tokens, please refer to the [How to Create Token](../3-user-guide/1-how-to-create-token.md) documentation. + +## Register Cluster + +To register a new cluster with Karpor, follow these steps: + +1. Log in to the Karpor Web interface using the token created in the previous step. +2. Navigate to the `Cluster Management` section in the Karpor UI. +3. Click on the `Register Cluster` button. +4. Follow the on-screen instructions to complete the registration process. + +5. When registering a cluster, please note the following: + + - The cluster name must be unique and cannot be changed once created. + - Ensure that there is network connectivity between the server address (target cluster address) in the uploaded cluster certificate and Karpor. + - If you deployed Karpor in a local cluster and want to register that local cluster, you need to modify the server address in the cluster certificate to the internal cluster address `https://kubernetes.default.svc.cluster.local:443` to ensure that Karpor can directly access the target cluster. + - If you want to register an EKS cluster, additional configuration of the kubeconfig is required, including adding the `env`, `interactiveMode`, and `provideClusterInfo` fields. For detailed steps, please refer to the "Registering an EKS Cluster" section in the [Multi-cluster Management](../3-user-guide/2-multi-cluster-management.md) documentation. + +6. After completing the above steps, click the `Validate and Submit` button. + +An example of the `Register Cluster` page can be found in the image below: + +![](/karpor/assets/cluster-mng/cluster-mng-register-new-cluster.png) + +For a more detailed explanation of the registration process, refer to the [Multi-cluster management](../3-user-guide/2-multi-cluster-management.md) Documentation. + +## Search Resources + +Karpor provides a powerful search feature that allows you to quickly find resources across the registered clusters. To use this feature: + +1. Go to the `Search` page within the Karpor UI. +2. Enter the search criteria for the resources you are looking for. + +Here is an example of the `Search` page: + +![](/karpor/assets/search/search-auto-complete.png) +![](/karpor/assets/search/search-result.png) + +To learn more about the search capabilities and how to use them effectively, check out the [Search Methods Documentation](../5-references/3-search-methods.md). + +## Gain Insight into Resources + +By clicking on a result from your search, you can delve into the `Insight` page, where you'll be able to investigate risks related to the resource, see a topological view with its relevant resources, and examine its detailed information. + +Here are examples for what you can find on the Insight page: + +![](/karpor/assets/insight/insight-home.png) +![](/karpor/assets/insight/insight-single-issue.png) +![](/karpor/assets/insight/insight-topology.png) + +## Conclusion + +Please note that this guide only provides a quick start for Karpor, and you may need to refer to additional documentations and resources to configure and use other features. + +If you have any questions or concerns, check out the official documentation of Karpor or seek relevant support. + +## Next Step + +- Learn Karpor's [Architecture](../concepts/architecture) and [Glossary](../concepts/glossary). +- View [User Guide](../user-guide/multi-cluster-management) to look on more of what you can achieve with Karpor. diff --git a/karpor_versioned_docs/version-v0.5/1-getting-started/_category_.json b/karpor_versioned_docs/version-v0.5/1-getting-started/_category_.json new file mode 100644 index 00000000..41f4c00e --- /dev/null +++ b/karpor_versioned_docs/version-v0.5/1-getting-started/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Getting Started" +} diff --git a/karpor_versioned_docs/version-v0.5/1-getting-started/assets/2-installation/install.gif b/karpor_versioned_docs/version-v0.5/1-getting-started/assets/2-installation/install.gif new file mode 100644 index 00000000..68889793 Binary files /dev/null and b/karpor_versioned_docs/version-v0.5/1-getting-started/assets/2-installation/install.gif differ diff --git a/karpor_versioned_docs/version-v0.5/1-getting-started/assets/2-installation/open-in-browser.gif b/karpor_versioned_docs/version-v0.5/1-getting-started/assets/2-installation/open-in-browser.gif new file mode 100644 index 00000000..00adfb18 Binary files /dev/null and b/karpor_versioned_docs/version-v0.5/1-getting-started/assets/2-installation/open-in-browser.gif differ diff --git a/karpor_versioned_docs/version-v0.5/2-concepts/1-architecture.md b/karpor_versioned_docs/version-v0.5/2-concepts/1-architecture.md new file mode 100644 index 00000000..c53e8491 --- /dev/null +++ b/karpor_versioned_docs/version-v0.5/2-concepts/1-architecture.md @@ -0,0 +1,24 @@ +--- +title: Architecture +--- +![](assets/1-architecture/architecture.png) + +## Components + +- `Dashboard`: Web UI for Karpor. +- `Server`: Main Backend Server for Karpor. +- `Syncer`: Independent Server to synchronize cluster resources in real-time. +- `Storage`: Storage Backend to store the synchronized resources and user data. + +## How Karpor Works + +1. After installation, users can register clusters of interest into Karpor. +2. The Syncer runs and automatically synchronizes the resources of interest from the cluster to Storage. It also ensures the real-time changes to the resources are automatically sync-ed to Karpor Storage. +3. When a user wishes to locate specific resource(s), a search query can be typed into the search box in the Dashboard. The Dashboard interacts with the search endpoint of the Server. The search module within the Server parses the search query, searches for relevant resources in Storage, and returns the results to the Dashboard. +4. Upon clicking a search result, the user is directed to a resource insight page. The Dashboard calls the insight endpoint of the Server, where the Server's insight module performs a static scan of the resource, generates issue reports, and locates its relevant resources to draw a resource topology map with all of its parents and children. +5. The insight page also applies to groups of resources, such as all resources in a cluster, a Group-Version-Kind combination, a namespace or a custom-defined resource group. + +## Next Step + +- Learn Karpor's [Glossary](../concepts/glossary). +- View [User Guide](../user-guide/multi-cluster-management) to look on more of what you can achieve with Karpor. diff --git a/karpor_versioned_docs/version-v0.5/2-concepts/3-glossary.md b/karpor_versioned_docs/version-v0.5/2-concepts/3-glossary.md new file mode 100644 index 00000000..780e374b --- /dev/null +++ b/karpor_versioned_docs/version-v0.5/2-concepts/3-glossary.md @@ -0,0 +1,57 @@ +--- +title: Glossary +--- +## Cluster + +Equivalent to the concept of a cluster in `Kubernetes`, such as a cluster named `democluster`. + +`Karpor` can manage multiple clusters, including cluster registration, certificate rotation, generating and viewing insights, and other operations through a Dashboard. It also supports accessing any managed cluster using a unified certificate issued by `Karpor` through command-line tools such as `kubectl` and `kubectx`. + +For more details, please refer to the best practice: [One Pass with Proxy](../3-user-guide/5-best-production-practices/1-one-pass-with-proxy.md). + +## Hub Cluster + +Cluster that manages other clusters. Since Karpor itself is also a Kubernetes Apiserver, we have registered some custom resources in this special cluster to manage cluster metadata, resource recycling strategies, and so on. We refer to this special cluster as the Hub Cluster, distinguishing it from the hosted user clusters. + +## Managed Cluster + +It generally refers to the clusters managed by the Hub Cluster, which are typically the user clusters hosted in Karpor. + +## Resource + +Equivalent to the resource concept in `Kubernetes`, such as a `Deployment` named `mockDeployment`. + +`Karpor` performs real-time synchronization, search, and insights on resources within the managed clusters. A resource is the object with the smallest granularity for searching and insights in `Karpor`. + +## Resource Group + +**A resource group is a logical organizational structure** used to combine related `Kubernetes` resources for a more intuitive view, search, and insight experience. For example, an `Application` named `mockapp` resource group can be created to includes a `Namespace`, a `Deployment`, and multiple `Pods`, all with a specific label such as `app.kubernetes.io/name: mockapp`. + +## Resource Group Rule + +**A resource group rule is a set of conditions** that groups specific resources into appropriate resource groups. These rules aim to organize resources into logical units based on properties such as `annotations`, `labels`, `namespace`, and so on. For example, to define an Application resource group rule, you can specify the `app.kubernetes.io/name` annotation as a grouping condition. + +`Karpor` has a preset resource group rule - `Namespace` - as well as custom resource group rules. + +![](assets/3-glossary/image-20240326171327110.png) + +## Topology + +In `Karpor`, the topology refers to the **relations and dependencies between relevant resources within a given resource group**. Viewing and understanding the interior structure of a resource group is made easier with a visual topology diagram, which is helpful for troubleshooting and locating issues. + +## Audit + +Audit refers to **performing a compliance scan on all resources within a given resource group**. The goal is to help users discover potential risks. The scanning tools and rules used for the audit are currently built into the system, but we will support customization in the future. + +## Issue + +**The output of the audit is referred to as issues**. If there are no problems with the scanned object, the audit results will be empty. Otherwise, all identified risks will be categorized by their risk level and displayed, including descriptions of each risk, associated resources, etc., guiding users to fix the issues, ensure the security and compliance of the cluster resources. + +## Score + +The score is used to reflect the **overall health status of a resource group or a resource**, reminding users to take timely adjustments and measures. The health score is calculated based on the resource group's audit results. The factors that impact the score include: **risk level**, **number of risks**, and **total number of resources**. + +## Next Step + +- Learn Karpor's [Architecture](../concepts/architecture). +- View [User Guide](../user-guide/multi-cluster-management) to look on more of what you can achieve with Karpor. diff --git a/karpor_versioned_docs/version-v0.5/2-concepts/_category_.json b/karpor_versioned_docs/version-v0.5/2-concepts/_category_.json new file mode 100644 index 00000000..bccddbf1 --- /dev/null +++ b/karpor_versioned_docs/version-v0.5/2-concepts/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Concepts" +} diff --git a/karpor_versioned_docs/version-v0.5/2-concepts/assets/1-architecture/architecture.png b/karpor_versioned_docs/version-v0.5/2-concepts/assets/1-architecture/architecture.png new file mode 100644 index 00000000..afec9346 Binary files /dev/null and b/karpor_versioned_docs/version-v0.5/2-concepts/assets/1-architecture/architecture.png differ diff --git a/karpor_versioned_docs/version-v0.5/2-concepts/assets/3-glossary/image-20240326171327110.png b/karpor_versioned_docs/version-v0.5/2-concepts/assets/3-glossary/image-20240326171327110.png new file mode 100644 index 00000000..f5673eb8 Binary files /dev/null and b/karpor_versioned_docs/version-v0.5/2-concepts/assets/3-glossary/image-20240326171327110.png differ diff --git a/karpor_versioned_docs/version-v0.5/3-user-guide/1-how-to-create-token.md b/karpor_versioned_docs/version-v0.5/3-user-guide/1-how-to-create-token.md new file mode 100644 index 00000000..3e2ef484 --- /dev/null +++ b/karpor_versioned_docs/version-v0.5/3-user-guide/1-how-to-create-token.md @@ -0,0 +1,83 @@ +--- +title: How to Create Token +--- +In this document, you will learn how to use a token to access the Karpor dashboard. + +[Hub Cluster](../2-concepts/3-glossary.md#hub-cluster) adopts the same Role-Based Access Control (RBAC) mechanism as Kubernetes. This means that in order to access the Hub Cluster, users need to create a ClusterRole, ServiceAccount, and the corresponding ClusterRoleBinding in the Hub Cluster to bind the two. To enhance user experience, we have preset two ClusterRoles: karpor-admin and karpor-guest. The karpor-admin role has permissions to perform all actions on the dashboard, including but not limited to adding or deleting clusters, creating resource groups, etc., while the karpor-guest role is limited to view-only actions on the dashboard. As users gain a deeper understanding of Karpor, they can create additional ClusterRoles based on their needs to achieve more granular permission management. + +## Exporting the KubeConfig for the Hub Cluster + +Since the Hub Cluster requires a KubeConfig for authentication, you can export the KubeConfig to access the Hub Cluster using the following command. +```shell +# The following operation is performed in the Kubernetes cluster where Karpor is installed +kubectl get configmap karpor-kubeconfig -n karpor -o go-template='{{.data.config}}' > $HOME/.kube/karpor-hub-cluster.kubeconfig +``` + +**Note**: Please ensure that the server address in the Hub Cluster's KubeConfig is accessible from your local machine. The default address is the internal cluster address (https://karpor-server.karpor.svc:7443), which cannot be directly connected from local. If you deployed Karpor in a local cluster, you need to forward the karpor-server service to local port 7443 and change the server address to `https://127.0.0.1:7443`. + +You can use the following sed command to change the access address in the Hub Cluster certificate to the local address: + +For MacOS/BSD systems (need an extra `''` after `-i`): +```shell +sed -i '' 's/karpor-server.karpor.svc/127.0.0.1/g' $HOME/.kube/karpor-hub-cluster.kubeconfig +``` + +For Linux/GNU systems (only `-i`): +```shell +sed -i 's/karpor-server.karpor.svc/127.0.0.1/g' $HOME/.kube/karpor-hub-cluster.kubeconfig +``` + +For Windows systems: +Please modify the server address manually in the kubeconfig file. + +## Forward the Services of the Hub Cluster to the Local Machine + +In this section, we assume that you have deployed Karpor in a local cluster. + +As mentioned in the previous section, to access the Hub Cluster locally, you need to forward the karpor-server service to your local machine. If you have used other methods for forwarding, you can skip this step. Here, we will use a simple port-forwarding method. Open another terminal and run: + +```shell +# The following operation is performed in the Kubernetes cluster where Karpor is installed +kubectl -n karpor port-forward svc/karpor-server 7443:7443 +``` + +## Create ServiceAccount and ClusterRoleBinding for Your Users + +This section will guide you on how to create karpor-admin and karpor-guest users in the Hub Cluster and assign the corresponding ClusterRoleBinding to them. Here are the specific steps: + +First, specify the target cluster for kubectl to connect to as the Hub Cluster: +```shell +export KUBECONFIG=$HOME/.kube/karpor-hub-cluster.kubeconfig +``` + +Then, we will create two common identities: administrator (karpor-admin) and guest (karpor-guest). This process includes creating ServiceAccounts and binding them to the corresponding ClusterRoles: + +```shell +kubectl create serviceaccount karpor-admin +kubectl create clusterrolebinding karpor-admin --clusterrole=karpor-admin --serviceaccount=default:karpor-admin +kubectl create serviceaccount karpor-guest +kubectl create clusterrolebinding karpor-guest --clusterrole=karpor-guest --serviceaccount=default:karpor-guest +``` + +## Create Tokens for Your Users + +The following operations need to be performed in the Hub Cluster. Please ensure that kubectl is correctly set to connect to the Hub Cluster: +```shell +export KUBECONFIG=$HOME/.kube/karpor-hub-cluster.kubeconfig +``` + +By default, the validity period of a token is 1 hour. If you need a long-term token, you can specify the expiration time when generating the token. For example: + +```shell +kubectl create token karpor-admin --duration=1000h +``` + +By default, the maximum validity period of the token is 8760 hours (1 year). If you need to modify this maximum validity period, you can add `--service-account-max-token-expiration={MAX_EXPIRATION:h/m/s}` to the startup parameters of the karpor-server. + +**Note**: Creating a token requires kubectl version 1.25.0 or higher. + +## Start Using Karpor Safely + +Copy the token you just generated and paste it into the token input box on the Karpor dashboard, then click login. + +Start your Karpor journey in a secure environment! diff --git a/karpor_versioned_docs/version-v0.5/3-user-guide/2-multi-cluster-management.md b/karpor_versioned_docs/version-v0.5/3-user-guide/2-multi-cluster-management.md new file mode 100644 index 00000000..9352b7a0 --- /dev/null +++ b/karpor_versioned_docs/version-v0.5/3-user-guide/2-multi-cluster-management.md @@ -0,0 +1,93 @@ +--- +title: Multi-Cluster Management +--- +Multi-cluster management is the entrance to register clusters into Karpor, enabling search and insight capabilities across a large number of clusters. + +## Register Cluster + +1. Click the Cluster Management Tab. +2. Click the Register Cluster button. + ![](/karpor/assets/cluster-mng/cluster-mng-empty.png) +3. Add the cluster name. The cluster name must be unique and CANNOT be altered once created. +4. Upload the cluster's KubeConfig file. One with read permission is sufficient. +5. Click the Verify and Submit button. + ![](/karpor/assets/cluster-mng/cluster-mng-register-new-cluster.png) +6. Once verified, the cluster will be added under the Cluster Management page + ![](/karpor/assets/cluster-mng/cluster-mng-register-success.png) + +**Note**: Please ensure network connectivity between the server address (target cluster address) in the uploaded cluster certificate and Karpor. For example, if you have deployed Karpor in a local cluster and want to register that local cluster, you need to modify the server address in the cluster certificate to the internal cluster address `https://kubernetes.default.svc.cluster.local:443` to ensure that Karpor can directly access the target cluster. + +### Register EKS Cluster + +If you want to register an EKS cluster, you need to perform some additional operations on the KubeConfig: + +1. Export the KubeConfig for the EKS cluster. For example, you can obtain the KubeConfig for the specified cluster using the following AWS command: + +```shell +aws eks --region update-kubeconfig --name --kubeconfig= +``` + +2. Add the fields `env`, `interactiveMode`, and `provideClusterInfo` to the `users/exec` section of the exported KubeConfig file. You can refer to the following KubeConfig structure: + +```yaml +apiVersion: v1 +clusters: +- cluster: + certificate-authority-data: CA + server: SERVER + name: CLUSTER +contexts: +- context: + cluster: CLUSTER + user: USER + name: CONTEXT +current-context: CONTEXT +kind: Config +preferences: {} +users: +- name: USER + user: + exec: + apiVersion: client.authentication.k8s.io/v1beta1 + args: + - --region + - ap-southeast-1 + - eks + - get-token + - --cluster-name + - mycluster3 + - --output + - json + command: aws + ### The following fields need to be added to the KubeConfig. + env: + - name: AWS_ACCESS_KEY_ID + value: + - name: AWS_SECRET_ACCESS_KEY + value: + - name: AWS_DEFAULT_REGION + value: + - name: AWS_DEFAULT_OUTPUT + value: json + interactiveMode: IfAvailable + provideClusterInfo: false +``` + +3. Use the modified kubeconfig in [Register Cluster](#register-cluster). + +## Edit Cluster + +The Edit button allows for modifications to the Display Name and Description, thus altering how the cluster's name and description appear on the Dashboard. +![](/karpor/assets/cluster-mng/cluster-mng-edit-cluster.png) + +## Rotate Certificate + +When the kubeconfig expires, you can update the certificate by clicking Rotate Certificate. +![](/karpor/assets/cluster-mng/cluster-mng-rotate-cluster-1.png) +![](/karpor/assets/cluster-mng/cluster-mng-rotate-cluster-2.png) +![](/karpor/assets/cluster-mng/cluster-mng-rotate-cluster-3.png) + +## Remove Cluster + +The delete button facilitates the removal of a registered cluster. +![](/karpor/assets/cluster-mng/cluster-mng-delete-cluster.png) diff --git a/karpor_versioned_docs/version-v0.5/3-user-guide/3-search.md b/karpor_versioned_docs/version-v0.5/3-user-guide/3-search.md new file mode 100644 index 00000000..f72693de --- /dev/null +++ b/karpor_versioned_docs/version-v0.5/3-user-guide/3-search.md @@ -0,0 +1,34 @@ +--- +title: How to Search +--- +Within this section, we will explore how to perform multi-cluster resource searches using Karpor, with this guide being done entirely through the Dashboard. + +We support three methods of search: + +- **Search by SQL**: Perform resource searches using SQL query language. +- **Search by DSL**: Conduct resource searches through `Karpor`'s Domain Specific Language (DSL). +- **Search by Natural Language**: Using natural language for resource search. + +## Search by SQL + +Karpor offers a nifty SQL query feature that allows you to search and filter all Kubernetes resources within managed clusters using familiar SQL syntax and provides targeted optimizations and enhancements for multi-cluster resource searches. + +SQL is one of the easily accessible skills for practitioners in the software engineering industry, theoretically making the learning curve quite low. As such, this search method is prepared for you! It is particularly well-suited for beginners to Karpor. + +Below are the steps to use Search by SQL: + +1. **Enter the Search page**: We designed the homepage as the entry point for search, so opening `Karpor`'s Web UI immediately presents you with the search page. + ![](/karpor/assets/search/search-home.png) +2. **Compose SQL query statements**: Write your query statement using SQL syntax, specifying the cluster name, resource type, conditions, and filters you wish to search for. Additionally, if you enter a keyword and press a space, the search box will pop up with a dropdown with auto-completion, suggesting possible keywords you can type next. + ![](/karpor/assets/search/search-auto-complete.png) +3. **Execute the query**: Click the 'search' button to execute the query and be sent to the search results page. Karpor will return a list of resources that match the SQL query. + ![](/karpor/assets/search/search-result.png) +4. **Advanced features**: Utilize our built-in advanced SQL syntax, such as sorting, full-text search, etc., to refine your search further. For details, please refer to: [Search Methodology Documentation](../5-references/3-search-methods.md). + +## Search by DSL + +Coming soon. 🚧 + +## Search by Natural Language + +Coming soon. 🚧 diff --git a/karpor_versioned_docs/version-v0.5/3-user-guide/4-insight/1-inspecting-any-resource-group-and-resource.md b/karpor_versioned_docs/version-v0.5/3-user-guide/4-insight/1-inspecting-any-resource-group-and-resource.md new file mode 100644 index 00000000..6632f8e9 --- /dev/null +++ b/karpor_versioned_docs/version-v0.5/3-user-guide/4-insight/1-inspecting-any-resource-group-and-resource.md @@ -0,0 +1,27 @@ +--- +title: Inspecting Any Resource Group and Resource +--- +In this part, we will explain in detail through clear steps and examples how to use Karpor to inspect any resource group or resource. + +If you are not familiar with relevant concepts, you can refer to the [Glossary](../../2-concepts/3-glossary.md) section. + +## Inspecting Specific Resources + +1. Search for the resource you are interested in: + ![](/karpor/assets/search/search-home.png) +2. On the search results page, all resources filtered by the criteria will be listed: + ![](/karpor/assets/search/search-result.png) +3. Click on any resource name to jump to that resource's insight page: + ![](/karpor/assets/insight/insight-home.png) + +## Inspecting Specific Resource Groups + +You may notice that in each search result entry, tags for `Cluster`, `Kind`, `Namespace`, etc., of the resource are listed. Please note that these tags are **hyperlinks**, which we refer to as "**anchor points**". These represent the links to a particular resource group or a resource. By clicking on these **anchor points**, you can quickly jump to the insight page of that resource group or resource. + +![](/karpor/assets/search/search-result.png) + +## Flexible Switching Between Resource Groups/Resources + +In fact, besides the tags in the mentioned search results, any resource/resource group names you see on any page can be re-directed to as **anchor points**, which serve like space-time wormholes, allowing you to traverse back and forth through any dimension until you find the resources you are searching for. Both search and anchor points are means to expedite the retrieval, which are key features of Karpor as a Kubernetes Explorer. + +![](/karpor/assets/insight/insight-breadcrumbs.png) diff --git a/karpor_versioned_docs/version-v0.5/3-user-guide/4-insight/2-custom-resource-group.md b/karpor_versioned_docs/version-v0.5/3-user-guide/4-insight/2-custom-resource-group.md new file mode 100644 index 00000000..2f22fb79 --- /dev/null +++ b/karpor_versioned_docs/version-v0.5/3-user-guide/4-insight/2-custom-resource-group.md @@ -0,0 +1,92 @@ +--- +title: Custom Resource Group +--- +## Creating Custom Resource Group + +This section will focus on how to create custom resource group within Karpor. Through custom resource group, you can flexibly manage and organize resources in Karpor according to your own needs and logical concepts. We will guide you step by step to create and define custom resource group and show you how to use these groups for resource insight and management. + +If you're not familiar with **Resource Group** and **Resource Group Rule** related concepts, you can refer to the [Glossary](../../2-concepts/3-glossary.md) section. + +**Let's assume** that within your organization or company, there is a concept of `application unit` that represent **all resources of an application in a certain environment**. + +We mark the **name and environment of the application in the label**. For example, the following is the `application unit` of `mock-apple` in the `production environment`: + +```yaml +apiVersion: v1 +kind: Namespace +metadata: + labels: + app.kubernetes.io/name: mock-apple + name: mock-apple +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/environment: prod + app.kubernetes.io/name: mock-apple +spec: + replicas: 3 + selector: + matchLabels: + app.kubernetes.io/environment: prod + app.kubernetes.io/name: mock-apple + template: + metadata: + labels: + app.kubernetes.io/environment: prod + app.kubernetes.io/name: mock-apple + fruit: apple + spec: + containers: + - image: nginx:latest + name: mock-container + dnsPolicy: ClusterFirst + restartPolicy: Always +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/environment: prod + app.kubernetes.io/name: mock-apple + name: mock-service-apple-prod + namespace: mock-apple +spec: + ports: + - port: 80 + protocol: TCP + targetPort: 80 + selector: + app.kubernetes.io/environment: prod + app.kubernetes.io/name: mock-apple + type: ClusterIP +``` + +Now, we will create a custom `resource group rule` called the `application unit` by following the steps below. It will classify all resources in the cluster according to the rules specified by the user and list all `resource groups` that comply with the rules. + +1. Click on the Insight tab to enter the insight homepage. +2. At the bottom of the page, you will see a default resource group rule `namespace`, which is a single rule classified by a namespace. + ![](/karpor/assets/insight/insight-homepage.png) +3. Click on the create button + of the resource group and fill in the **basic information and classification rules** of the `application unit` in the pop-up window. + ![](/karpor/assets/insight/insight-create-app-resource-group-rule.png) +4. Click on the Submit button, then click on the newly appearing application unit tab to list all application units. + ![](/karpor/assets/insight/insight-list-app-resource-groups.png) +5. You can enter keywords in the search box to quickly find the `application unit` of `mock-apple` in `production`. + ![](/karpor/assets/insight/insight-search-app-resource-group.png) +6. You can click the View button on a resource group card to jump to the corresponding `resource group insight page` and view aggregated information such as all resources, topology relationships, compliance reports, etc. of a certain application unit. +7. If necessary, you can also use the same steps to create an `environment resource group`. + ![](/karpor/assets/insight/insight-create-env-resource-group-rule.png) + ![](/karpor/assets/insight/insight-list-env-resource-groups.png) + +## Edit Custom Resource Group + +You can click the button on the right side of the custom resource group tab to modify basic information and classification rules in the pop-up window. + +![](/karpor/assets/insight/insight-edit-env-resource-group.png) + +## Delete Custom Resource Group + +You can click the button on the right side of the custom resource group tab, then click on the Delete to delete current resource group rule in the pop-up window. + +![](/karpor/assets/insight/insight-delete-env-resource-group.png) diff --git a/karpor_versioned_docs/version-v0.5/3-user-guide/4-insight/3-summary.md b/karpor_versioned_docs/version-v0.5/3-user-guide/4-insight/3-summary.md new file mode 100644 index 00000000..1f3970f7 --- /dev/null +++ b/karpor_versioned_docs/version-v0.5/3-user-guide/4-insight/3-summary.md @@ -0,0 +1,22 @@ +--- +title: Summary +--- +In this section, we will learn about the `summary card` on the Karpor insight page, which are used to quickly view and understand key metrics for the current resource group or resource. + +Under different resource groups, the content displayed by the `summary card` may also vary. + +If you are on: + +1. **Resource Group Insight Page**: + 1. **Cluster Insight Page**, the summary card shows the **Node, Pod numbers, CPU, memory capacity, and Kubernetes version of the cluster**. + ![](/karpor/assets/insight/insight-summary-cluster.png) + 2. **Resource Kind Insight Page**, the summary card shows the **affiliated cluster, GVK (Group Version Kind) information, and the number of that type of resource under the current cluster**. + ![](/karpor/assets/insight/insight-summary-kind.png) + 3. **Namespace Insight Page**, the summary card shows the **affiliated cluster, namespace, and the most abundant resource types under the current namespace.** + ![](/karpor/assets/insight/insight-summary-namespace.png) + 4. **Custom Resource Group Insight Page**, the summary card shows the **key-value of each rule, and several resource statistics under the current resource group.** + ![](/karpor/assets/insight/insight-summary-custom-resource-group.png) +2. **Resource Insight Page**, the summary card shows the **current resource's name, GVK information, affiliated cluster, and namespace.** + ![](/karpor/assets/insight/insight-summary-resource.png) + +⚠️ **Attention**: No matter which resource group insight page you are on, the summary card will always display a health score, calculated based on the risk compliance status of the subject. diff --git a/karpor_versioned_docs/version-v0.5/3-user-guide/4-insight/4-compliance-report.md b/karpor_versioned_docs/version-v0.5/3-user-guide/4-insight/4-compliance-report.md new file mode 100644 index 00000000..1c714804 --- /dev/null +++ b/karpor_versioned_docs/version-v0.5/3-user-guide/4-insight/4-compliance-report.md @@ -0,0 +1,16 @@ +--- +title: Compliance Report +--- +This section will introduce the compliance scan feature, primarily used to detect and assess whether all resources in the current resource or resource group comply with specific compliance standards and security policies. In this section, you will understand how to effectively utilize the compliance scan feature to ensure the security and compliance of the cluster and resources. + +If you're not familiar with **Compliance Report** or **Risk** related concepts, you can refer to the [Glossary](../../2-concepts/3-glossary.md) section. + +1. Follow the guidance on [Inspecting Any Resource Group and Resource](#inspecting-any-resource-group-and-resource) and resource to navigate to the insights page of a particular resource group/resource. +2. You can see the **Compliance Report** card of the resource. + ![](/karpor/assets/insight/insight-home.png) +3. This card displays the **Risk** identified during the scan of the current resource or all the resources under the resource group, categorized by risk level. Under each risk level tag, risks are sorted from highest to lowest occurrence. Each risk entry shows the title, description, number of occurrences, and the scanning tool that discovered the issue. +4. Clicking on a specific risk will display a popup with the details of the risk. + ![](/karpor/assets/insight/insight-single-issue.png) +5. Click on View All Risks, and a drawer will pop out listing all the risks. Here, you can search, categorize, paginate, etc + ![](/karpor/assets/insight/insight-all-issues.png) +6. Once you have resolved a risk following its indications, you can click the [Rescan] button, which will trigger a comprehensive compliance scan of all resources under the resource group. The Dashboard will display the new results once the scan is completed. diff --git a/karpor_versioned_docs/version-v0.5/3-user-guide/4-insight/5-topology.md b/karpor_versioned_docs/version-v0.5/3-user-guide/4-insight/5-topology.md new file mode 100644 index 00000000..73a4ef79 --- /dev/null +++ b/karpor_versioned_docs/version-v0.5/3-user-guide/4-insight/5-topology.md @@ -0,0 +1,19 @@ +--- +title: Topology +--- +## Topology + +In this section, we will explore the topology feature in Karpor. The topology view will help you more intuitively understand the relationships and dependencies among various resources in your cluster. Here's how to use the topology view. + +1. Follow the guidance on [Inspecting Any Resource Group and Resource](#inspecting-any-resource-group-and-resource) to navigate to the insights page of a particular resource group/resource. +2. At the bottom of the page, you can see the resource topology map. + ![](/karpor/assets/insight/insight-topology.png) +3. Depending on the current page: + 1. Resource Insights Page: + 1. The map will display relevant upstream and downstream resources related to the current resource. For example, if the current resource is a Deployment, the topology map will show the ReplicaSet under the Deployment and the Pods under the ReplicaSet. + ![](/karpor/assets/insight/insight-topology-example.png) + 2. Clicking on a node in the resource topology map is equivalent to clicking on an anchor of a specific resource, which will directly navigate to the insights page of that resource. + 2. Resource Group Insights Page: + 1. The map will intuitively show the quantity and relationship of all types of resources under the current resource group. + 2. Clicking on a node in the resource topology map is equivalent to clicking on a resource type, and the list below will refresh with all the resources under a specific type within the current resource group. + ![](/karpor/assets/insight/insight-linkage.png) diff --git a/karpor_versioned_docs/version-v0.5/3-user-guide/4-insight/_category_.json b/karpor_versioned_docs/version-v0.5/3-user-guide/4-insight/_category_.json new file mode 100644 index 00000000..c39e5397 --- /dev/null +++ b/karpor_versioned_docs/version-v0.5/3-user-guide/4-insight/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "How to Insight" +} diff --git a/karpor_versioned_docs/version-v0.5/3-user-guide/4-insight/index.md b/karpor_versioned_docs/version-v0.5/3-user-guide/4-insight/index.md new file mode 100644 index 00000000..0bdb110d --- /dev/null +++ b/karpor_versioned_docs/version-v0.5/3-user-guide/4-insight/index.md @@ -0,0 +1,6 @@ +--- +title: How to Insight +--- +In this section, we will introduce how to gain comprehensive insights into the resources within a cluster using Karpor. You can access the Insight page in various ways and easily toggle between insight pages for different scopes (such as Cluster, Kind, Namespace or individual Resource). If there are domain-specific logical scopes within your current organization, you can even customize resource groups (such as Application, Environment, etc.) by setting resource group rules. We also provide functionality to gain insights into these custom resource groups. + +This guide will be entirely operated from the Karpor Dashboard. diff --git a/karpor_versioned_docs/version-v0.5/3-user-guide/5-best-production-practices/1-one-pass-with-proxy.md b/karpor_versioned_docs/version-v0.5/3-user-guide/5-best-production-practices/1-one-pass-with-proxy.md new file mode 100644 index 00000000..cb8cb78f --- /dev/null +++ b/karpor_versioned_docs/version-v0.5/3-user-guide/5-best-production-practices/1-one-pass-with-proxy.md @@ -0,0 +1,42 @@ +--- +title: One-Pass with Proxy +--- +## Challenges and Demands + +### The Scale of Multicluster at a Grand Scale + +In June 2014, Kubernetes, born from Google's internal Borg project, made a striking debut. Endorsed by tech giants and aided by a thriving open-source community, it gradually became the de facto standard in the container orchestration field. As companies began to deploy Kubernetes in production environments, a single Kubernetes cluster could no longer meet the increasingly complex demands internally. It's common for the number of nodes in a single cluster to exceed the community-recommended limit (5,000), making the expansion into a multicluster configuration a natural choice. + +### The Basic Needs from Multicluster Accessors + +With the thriving development of multiclusters, various platforms may need to access resources across different clusters, requiring access to each cluster's KubeConfig. + +As the number of users and clusters increases, cluster administrators face significant time costs: If there are `M` clusters and `N` users, the time complexity for managing KubeConfig becomes `O(M*N)`. Moreover, users need to switch between different KubeConfigs when accessing different clusters, and the corresponding permissions for KubeConfigs vary across clusters, undoubtedly adding to the complexity of use. + +![Direct Connection: Users need to maintain multiple KubeConfigs](assets/1-one-pass-with-proxy/image-20240326163622363.png) + +Under these circumstances, is there a method to conveniently access resources in different clusters without maintaining a large number of KubeConfigs and managing various users' permissions across clusters? Moreover, this method should ideally be cloud-native, accessible through kubectl and Kubernetes' official client, to reduce the cost of transitioning to this method. The emergence of `Karpor` is to solve these problems. + +## The Idea of A "One-Pass Access" + +We developed `Karpor`, an open-source project. While serving as a Kubernetes Explorer with unique advantages in searching and insight into cluster resources, its foundational multicluster management component, featuring cluster certificate issuance and multicluster request proxying, makes it highly suitable as a unified access point for platforms to multiple clusters. This component supports forwarding user requests to designated clusters in a cloud-native manner, allowing users to maintain a single set of KubeConfigs to access different clusters, making multicluster access as simple as accessing a single cluster. So, how does it work? Below, we introduce `Karpor`'s architecture and functionality. + +![Using Multi-cluster Gateway: Users only need to maintain a single set of KubeConfigs](assets/1-one-pass-with-proxy/image-20240326164141400.png) + +### Multi-cluster Request Routing and Proxy + +`Karpor` includes an application layer gateway capable of forwarding any Kubernetes-style request to a specified Kubernetes cluster. `Karpor` is also developed based on the Kubernetes framework as a kube-apiserver, which can operate independently or as an extension to an existing kube-apiserver. `Karpor` supports handling two types of extended resources: `Cluster` and `Cluster/Proxy`, the former for storing cluster information and the latter for forwarding user requests to a specific cluster. Users can access through the Kubernetes official CLI (`kubectl`) or SDK (`client-go`, `client-java`, etc.). + +`Karpor` proxies all access to `Cluster/Proxy` subresources to the target cluster. For example, to retrieve Pod information from the `Cluster1` cluster, users need to send the `GET /apis/kusionstack.io/Cluster/cluster1/proxy/api/v1/pods` request to `Karpor`. `Karpor` will generate a KubeConfig from the `Cluster/Cluster1` resource for accessing the cluster and proxy the `/api/v1/pods` request to the `Cluster1` cluster. + +![Accessing any managed cluster with kubectl & karpor certificate](assets/1-one-pass-with-proxy/image-20240326165247891.png) + +### Supporting All Kubernetes Native Requests + +`Karpor` supports forwarding all kube-apiserver requests. Specifically, `Karpor` is an application layer gateway that proxies HTTP requests through the HTTP connect protocol. In addition to supporting `get`, `create`, `update`, and `delete` operations on resources, it also supports `watch`, `log`, `exec`, `attach`, etc. (Since the SPDY protocol used for `exec`, and `attach` does not support http2, `Karpor` will disable http2 when forwarding these requests, switching to http1.1 and supporting hijacker processing). + +![](assets/1-one-pass-with-proxy/image-20240326165632158.png) + +## Summary + +As can be gleaned from the text above, utilizing `Karpor`'s multi-cluster management component enables the issuance of a "multi-cluster pass" with controllable permissions for users. Users no longer need to concern themselves with issues such as frequent cluster certificate switching and onboarding of new clusters. With this "one-pass access", the cost of accessing multiple clusters is reduced, fulfilling the most fundamental needs of most users on multi-cluster platforms. diff --git a/karpor_versioned_docs/version-v0.5/3-user-guide/5-best-production-practices/_category_.json b/karpor_versioned_docs/version-v0.5/3-user-guide/5-best-production-practices/_category_.json new file mode 100644 index 00000000..82dd90e3 --- /dev/null +++ b/karpor_versioned_docs/version-v0.5/3-user-guide/5-best-production-practices/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Best Production Practices" +} diff --git a/karpor_versioned_docs/version-v0.5/3-user-guide/5-best-production-practices/assets/1-one-pass-with-proxy/image-20240326163622363.png b/karpor_versioned_docs/version-v0.5/3-user-guide/5-best-production-practices/assets/1-one-pass-with-proxy/image-20240326163622363.png new file mode 100644 index 00000000..ab8051fe Binary files /dev/null and b/karpor_versioned_docs/version-v0.5/3-user-guide/5-best-production-practices/assets/1-one-pass-with-proxy/image-20240326163622363.png differ diff --git a/karpor_versioned_docs/version-v0.5/3-user-guide/5-best-production-practices/assets/1-one-pass-with-proxy/image-20240326164141400.png b/karpor_versioned_docs/version-v0.5/3-user-guide/5-best-production-practices/assets/1-one-pass-with-proxy/image-20240326164141400.png new file mode 100644 index 00000000..de950079 Binary files /dev/null and b/karpor_versioned_docs/version-v0.5/3-user-guide/5-best-production-practices/assets/1-one-pass-with-proxy/image-20240326164141400.png differ diff --git a/karpor_versioned_docs/version-v0.5/3-user-guide/5-best-production-practices/assets/1-one-pass-with-proxy/image-20240326165247891.png b/karpor_versioned_docs/version-v0.5/3-user-guide/5-best-production-practices/assets/1-one-pass-with-proxy/image-20240326165247891.png new file mode 100644 index 00000000..27fffb47 Binary files /dev/null and b/karpor_versioned_docs/version-v0.5/3-user-guide/5-best-production-practices/assets/1-one-pass-with-proxy/image-20240326165247891.png differ diff --git a/karpor_versioned_docs/version-v0.5/3-user-guide/5-best-production-practices/assets/1-one-pass-with-proxy/image-20240326165632158.png b/karpor_versioned_docs/version-v0.5/3-user-guide/5-best-production-practices/assets/1-one-pass-with-proxy/image-20240326165632158.png new file mode 100644 index 00000000..99053c68 Binary files /dev/null and b/karpor_versioned_docs/version-v0.5/3-user-guide/5-best-production-practices/assets/1-one-pass-with-proxy/image-20240326165632158.png differ diff --git a/karpor_versioned_docs/version-v0.5/3-user-guide/_category_.json b/karpor_versioned_docs/version-v0.5/3-user-guide/_category_.json new file mode 100644 index 00000000..8f01ba26 --- /dev/null +++ b/karpor_versioned_docs/version-v0.5/3-user-guide/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "User Guide" +} diff --git a/karpor_versioned_docs/version-v0.5/4-developer-guide/1-contribution-guide/1-non-code-contribute.md b/karpor_versioned_docs/version-v0.5/4-developer-guide/1-contribution-guide/1-non-code-contribute.md new file mode 100644 index 00000000..721dbf92 --- /dev/null +++ b/karpor_versioned_docs/version-v0.5/4-developer-guide/1-contribution-guide/1-non-code-contribute.md @@ -0,0 +1,40 @@ +--- +title: Non-code Contribution Guide +--- +You can contribute in any of the following ways that interest you. + +## Contributing Use Cases and Demos + +* If you are using Karpor, the simplest way to contribute is to [express gratitude to the community](https://github.com/KusionStack/karpor/issues/343). + +## Reporting Bugs + +Before submitting a new issue, please make sure that no one has already reported the problem. + +Check the [Issue list](https://github.com/KusionStack/karpor/issues) for any similar issues. + +[Report bugs](https://github.com/KusionStack/karpor/issues/new?assignees=&labels=kind%2Fbug&projects=&template=bug-report.yaml) by submitting a Bug report, ensuring you provide as much information as possible to help reproduce the Bug. + +Follow the issue template and add additional information to help us replicate the issue. + +## Security Issues + +If you believe you have discovered a security vulnerability, please read our [security policy](https://github.com/KusionStack/karpor/blob/main/SECURITY.md) for more detailed information. + +## Suggesting Enhancements + +If you have ideas to improve Karpor, please submit a [feature request](https://github.com/KusionStack/karpor/issues/new?assignees=&labels=kind%2Ffeature&projects=&template=enhancement.yaml). + +## Answering Questions + +If you have a question and cannot find the answer in the [documentation](https://www.kusionstack.io/karpor/), the next step is to ask on [GitHub Discussions](https://github.com/KusionStack/karpor/discussions). + +Helping these users is important to us, and we would love to have your help. You can contribute by answering [their questions](https://github.com/KusionStack/karpor/discussions) to help other Karpor users. + +## Contributing Documentation + +Contributing to the documentation requires some knowledge on how to submit a pull request to Github, which I think won't be difficult if you follow the guide. + +* [kusionstack.io Developer's Guide](https://github.com/KusionStack/kusionstack.io/blob/main/README.md) + +For more ways to contribute, please look at the [Open Source Guide](https://opensource.guide/how-to-contribute/). diff --git a/karpor_versioned_docs/version-v0.5/4-developer-guide/1-contribution-guide/2-code-contribute.md b/karpor_versioned_docs/version-v0.5/4-developer-guide/1-contribution-guide/2-code-contribute.md new file mode 100644 index 00000000..e00a4374 --- /dev/null +++ b/karpor_versioned_docs/version-v0.5/4-developer-guide/1-contribution-guide/2-code-contribute.md @@ -0,0 +1,174 @@ +--- +title: Code Contribution Guide +--- +In this code contribution guide, you will learn about the following: + +- [How to run Karpor locally](#running-karpor-locally) +- [How to create a pull request](#creating-a-pull-request) +- [Code review guidelines](#code-review) +- [Formatting guidelines for pull requests](#formatting-guidelines) +- [Updating Documentation and Website](#updating-documentation-and-website) + +## Running Karpor Locally + +This guide will help you get started with Karpor development. + +### Prerequisites + +* Golang version 1.19+ + +
+ Installing Golang + +1. Install go1.19+ from the [official website](https://go.dev/dl/). Extract the binary files and place them at a location, assuming it is located under the home directory `~/go/`, here is an example command, you should choose the correct binary file for your system. + +``` +wget https://go.dev/dl/go1.20.2.linux-amd64.tar.gz +tar xzf go1.20.2.linux-amd64.tar.gz +``` + +If you would like to maintain multiple versions of golang in your local development environment, you can download the package and extract it to a location, like `~/go/go1.19.1`, and then alter the path in the command below accordingly. + +1. Set environment variables for Golang + +``` +export PATH=~/go/bin/:$PATH +export GOROOT=~/go/ +export GOPATH=~/gopath/ +``` + +If the `gopath` folder does not exist, create it with `mkdir ~/gopath`. These commands will add the go binary folder to the `PATH` environment variable (making it the primary choice for go) and set the `GOROOT` environment to this go folder. Please add these lines to your `~/.bashrc` or `~/.zshrc` file, so you won't need to set these environment variables every time you open a new terminal. + +1. (Optional) Some regions, such as China, may have slow connection to the default go registry; you can configure GOPROXY to speed up the download process. + +``` +go env -w GOPROXY=https://goproxy.cn,direct +``` + +
+ +* Kubernetes version v1.20+ configured with `~/.kube/config`. +* golangci-lint version v1.52.2+, it will be installed automatically if you run `make lint`, if the installation fails, you can install it manually. + +
+ Manually installing golangci-lint + +You can install it manually following the [guide](https://golangci-lint.run/welcome/install), or use the command: + +``` +cd ~/go/ && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s v1.52.2 +``` + +
+ +### Building + +- Clone this project + +```shell +git clone git@github.com:KusionStack/karpor.git +``` + +- Build locally + +Executing `make build-all` will build the executables for all platforms; if you only want to build for a specific platform, execute `make build-${PlatformName}`, e.g., `make build-darwin`. To see all available commands, execute `make help`. + +### Testing + +It's essential to write tests to maintain code quality, you can run all unit tests by executing the following command in the project root directory: + +```shell +make test +``` + +If you need to generate extra coverage report files, execute: + +```shell +make cover +``` + +Then you can view the content of the coverage report in a browser by running: + +```shell +make cover-html +``` + +## Creating a Pull Request + +We are thrilled that you are considering contributing to the Karpor project! + +This document will guide you through the process of [creating a pull request](./index.md#contribute-a-pull-request). + +### Before you begin + +We know you are excited to create your first pull request. Before we get started, make sure your code follows the relevant [code conventions](../2-conventions/2-code-conventions.md). + +### Your First Pull Request + +Before submitting your PR, run the following commands to ensure they all succeed: + +``` +make test +make lint +``` + +If this is your first time contributing to an open-source project on GitHub, please make sure to read the instructions on [creating a pull request](https://help.github.com/en/articles/creating-a-pull-request). + +To increase the chances of your pull request being accepted, please ensure your pull request follows these guidelines: + +- The title and description match the implementation. +- The commits in the pull request follow the [formatting guidelines](#Formatting-guidelines). +- The pull request closes a related issue. +- The pull request includes necessary tests to verify the expected behavior. +- If your pull request has conflicts, please rebase your branch onto the main branch. + +If the pull request fixes a bug: + +- The pull request description must contain `Closes #` or `Fixes #`. +- To prevent regressions, the pull request should include tests that replicate the bug being fixed. + +## Code Review + +Once you have created a pull request, the next step is to have others review your changes. Review is a learning opportunity for both reviewers and the author of the pull request. + +If you believe a specific person should review your pull request, you can tag them in the description or a comment. +Tag a user by typing an `@` symbol followed by their username. + +We recommend that you read [How to do a code review](https://google.github.io/eng-practices/review/reviewer/) to learn more about code reviews. + +## Formatting Guidelines + +A well-crafted pull request can minimize the time to get your changes accepted. These guidelines will help you write well-formulated commit messages and descriptions for your pull requests. + +### Commit Message Format + +More see: [Commit Conventions](../2-conventions/4-commit-conventions.md) + +### Pull Request Title + +When accepting pull requests, the Karpor team merges all commits into one. + +The pull request title becomes the subject line of the merged commit message. + +We still encourage contributors to write informative commit messages, as they will be part of the Git commit body. + +We use the pull request titles when generating change logs for releases. Hence, we strive to make the titles as informative as possible. + +Make sure your pull request title uses the same format as the commit message subject line. If the format is not followed, we will add a `title-needs-formatting` label on the pull request. + +### Passing All CI Checks + +Before merging, all testing CIs should pass: + +- Coverage should not drop. Currently, the pull request coverage should be at least 70%. +- Karpor uses a **CLA** for the contributor agreement. It requires you to sign for every commit before merging the pull request. + +## Updating Documentation and Website + +If your pull request has been merged, and it is a new feature or enhancement, you need to update the documentation and send a pull request to the [kusionstack.io](https://github.com/KusionStack/kusionstack.io) repository. + +Learn how to write documentation through the following guide: + +- [kusionstack.io Developer Guide](https://github.com/KusionStack/kusionstack.io/blob/main/README.md) + +Awesome, you've completed the lifecycle of code contribution! diff --git a/karpor_versioned_docs/version-v0.5/4-developer-guide/1-contribution-guide/3-roles.md b/karpor_versioned_docs/version-v0.5/4-developer-guide/1-contribution-guide/3-roles.md new file mode 100644 index 00000000..e9193204 --- /dev/null +++ b/karpor_versioned_docs/version-v0.5/4-developer-guide/1-contribution-guide/3-roles.md @@ -0,0 +1,41 @@ +--- +title: Roles +--- +Thank you for your interest and support for karpor! + +This document outlines the roles and responsibilities of contributors in the project, as well as the process for becoming a Contributor and losing Maintainer status. We hope that this document will help every contributor understand the growth path and make a greater contribution to the project's development. + +## Contributor Roles and Responsibilities + +we have two main contributor roles: Contributor and Maintainer. + +Here is a brief introduction to these two roles: + +1. Contributor: A contributor to the project who can contribute code, documentation, testing, and other resources. Contributors provide valuable resources to the project, helping it to continuously improve and develop. +2. Maintainer: A maintainer of the project who is responsible for the day-to-day maintenance of the project, including reviewing and merging PRs, handling issues, and releasing versions. Maintainers are key members of the project and have a significant impact on the project's development direction and decision-making. + +## How to become a Maintainer + +We welcome every contributor to contribute to the project's development and encourage contributors to upgrade to the role of Maintainer. + +The following are the conditions for upgrading from Contributor to Maintainer: + +1. Continuous contribution: Contributors need to contribute to the project continuously for a period of time (e.g., 3 months). This demonstrates the contributor's attention and enthusiasm for the project. +2. Quality assurance: The code or documentation submitted by contributors needs to maintain a high level of quality, meet the project's specifications, and have a positive impact on the project. +3. Active participation: Contributors need to actively participate in project discussions and decision-making, providing constructive opinions and suggestions for the project's development. +4. Team collaboration: Contributors need to have good teamwork skills, communicate friendly with other contributors and maintainers, and work together to solve problems. +5. Responsibility: Contributors need to have a certain sense of responsibility and be willing to undertake some of the project maintenance work, including reviewing PRs and handling issues. When a contributor meets the above conditions, existing maintainers will evaluate them. + +If they meet the requirements of Maintainer, they will be invited to become a new Maintainer. + +## Losing Maintainers status + +Maintainer have important responsibilities in the project, and we hope that every Maintainer can maintain their attention and enthusiasm for the project. + +However, we also understand that everyone's time and energy are limited, so when Maintainers cannot continue to fulfill their responsibilities, they will be downgraded to the role of Contributor: + +1. Long-term inactivity: If a Maintainer has not participated in project maintenance work, including reviewing PRs and handling issues, for a period of time (e.g., 3 months), they will be considered inactive. +2. Quality issues: If a Maintainer's work in the project has serious quality issues that affect the project's development, they will be considered not meeting the requirements of Maintainer. +3. Team collaboration issues: If a Maintainer has serious communication or teamwork issues with other contributors and maintainers, such as disrespecting others' opinions, frequent conflicts, or refusing to collaborate, which affects the project's normal operation and atmosphere, they will be considered not meeting the requirements of Maintainer. +4. Violation of rules: If a Maintainer violates the project's rules or code of conduct, including but not limited to leaking sensitive information or abusing privileges, they will be considered not meeting the requirements of Maintainer. +5. Voluntary application: If a Maintainer cannot continue to fulfill their responsibilities due to personal reasons, they can voluntarily apply to be downgraded to the role of Contributor. diff --git a/karpor_versioned_docs/version-v0.5/4-developer-guide/1-contribution-guide/_category_.json b/karpor_versioned_docs/version-v0.5/4-developer-guide/1-contribution-guide/_category_.json new file mode 100644 index 00000000..09eab23b --- /dev/null +++ b/karpor_versioned_docs/version-v0.5/4-developer-guide/1-contribution-guide/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Contribution Guide" +} diff --git a/karpor_versioned_docs/version-v0.5/4-developer-guide/1-contribution-guide/index.md b/karpor_versioned_docs/version-v0.5/4-developer-guide/1-contribution-guide/index.md new file mode 100644 index 00000000..a23e2100 --- /dev/null +++ b/karpor_versioned_docs/version-v0.5/4-developer-guide/1-contribution-guide/index.md @@ -0,0 +1,118 @@ +# Contributing Guide + +Contributing Guide that introduces how to participate and contribute to the community. + +To help us create a safe and positive community experience for all, we require all participants adhere to the CNCF Community [Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md). + +## Before contributing + +### Find a Contribution Point + +You can contribute to Karpor in several ways including code and non-code contributions, +we appreciate every effort you contribute to the community. + +Here are some examples: + +* Contribute to the codebase and docs. +* Report and triage issues. +* Organize meetups and user groups in your local area. +* Help others by answering questions about Karpor. + +And: + +- If you don’t know what issues start, we have prepared a [Community tasks | 新手任务清单 🎖︎](https://github.com/KusionStack/karpor/issues/463), or you can filter [help wanted](https://github.com/KusionStack/karpor/issues?q=is%3Aopen+is%3Aissue+label%3A%22help+wanted%22) or [good first issue](https://github.com/KusionStack/karpor/issues?q=is%3Aopen+is%3Aissue++label%3A%22good+first+issue%22) label in issue tracker. you can choose the issue you like. +- If you have any questions, please [Submit the Issue](https://github.com/KusionStack/karpor/issues/new/choose) or [Post on the discussions](https://github.com/KusionStack/karpor/discussions/new/choose), we will answer as soon as possible. + +### How to Contribute Non-code + +We regard non-coding contribution as equally important with code contribution for the community's very existence and its future growth. + +- Refer to [Non-code Contribution Guide](./non-code-contribute) to know how you could help. + +### How to Contribute Code + +Unsure where to begin contributing to Karpor codebase? Start by browsing issues labeled `good first issue` or `help wanted`. + +- [Good first issue](https://github.com/KusionStack/karpor/labels/good%20first%20issue) issues are generally straightforward to complete. +- [Help wanted](https://github.com/KusionStack/karpor/labels/help%20wanted) issues are problems we would like the community to help us with regardless of complexity. +- Refer to [Code Contribution Guide](./code-contribute) for more details. + +Learn [Code Conventions](../conventions/code-conventions) and [Test Conventions](../conventions/test-conventions) and understand what to pay attention to when writing code. + +And learn the [Release Process And Cadence](../conventions/release-process) to know when your code changes will be released. + +## Contribute a Pull Request + +After opening or claiming an issue, you could contribute codes or non-codes to karpor by a pull request. Here are the steps you should follow: + +### Fork Repository + +Karpor adopts trunk-based development, i.e., the code used for release is maintained on the main branch. + +Thus, to develop karpor, you have to fork one project in [karpor](https://github.com/KusionStack/karpor) repository to your workspace, and then check out a new branch to develop coding. + +### Develop Code/Non-Code + +Now you can start writing to solve the issue. To maintain the quality of karpor, after submitting the PR, some necessary checks will be triggered. + +After the development is completed, commit and push to your forked repository. Since the PR Title will be used as a merging commit message, we ask your PR Title to meet the [Commit Conventions](../2-conventions/4-commit-conventions.md). + +Here are some simple explanations: + +PR Title should be organized following this structure: + +``` +[optional scope]: + +[optional body] +``` + +The required type helps better capture the area of the commit, based on the [Angular guidelines](https://github.com/angular/angular/blob/22b96b9/CONTRIBUTING.md#-commit-message-guidelines). + +We use lowercase for `` to avoid spending time on case-sensitive issues. `` can be one of the following: + +``` +feat: A new feature +fix: A bug fix +docs: Documentation only changes +build: Changes that affect the build system or external dependencies +style: Changes that do not affect the meaning of the code (white-space, formatting, missing semi-colons, etc) +refactor: A code change that neither fixes a bug nor adds a feature +perf: A code change that improves performance +test: Adding missing tests or correcting existing tests +chore: Changes to the build process or auxiliary tools and libraries such as documentation generation +``` + +### Open a Pull Request + +[Open a pull request](https://github.com/KusionStack/karpor/pulls) from the develop branch of your forked repository to the main branch of karpor. You should clearly describe what you do in the PR, and link it to an issue. Besides, the PR title should also follow the commit conventions described above, and must be 5-256 characters in length, prefix `WIP` and `[WIP]` are not allowed. + +### Sign CLA + +If it was your first pull request, you need to sign our [CLA(Contributor License Agreement)](https://github.com/KusionStack/.github/blob/main/CLA.md). The only thing you need to do is to post a pull request comment same as the below format: + +`I have read the CLA Document and I hereby sign the CLA` + +If your CLA signature failed, you may find the solutions below: + +* The comment must be in the same format as above, with no extra spaces, line breaks, etc. +* The git committer must be the same one who created the Karpor PR + +### PR Checks + +To keep the reliability of the karpor project, the following check will get triggered automatically: + +* Unit Test +* Golang Lint +* Commit Lint +* PR Title Lint +* License Lint +* Markdown Link Lint + +Please make sure your PR passes these checks. + +## Become a Community Member + +If you're interested to become a community member or learn more about the governance, please check the [Roles](./3-roles.md) for details. + +Enjoy coding and collaboration in Karpor world! diff --git a/karpor_versioned_docs/version-v0.5/4-developer-guide/2-conventions/1-release-process.md b/karpor_versioned_docs/version-v0.5/4-developer-guide/2-conventions/1-release-process.md new file mode 100644 index 00000000..6dda486a --- /dev/null +++ b/karpor_versioned_docs/version-v0.5/4-developer-guide/2-conventions/1-release-process.md @@ -0,0 +1,49 @@ +--- +title: Release Process And Cadence +--- +## Release Planning + +We will establish and continuously follow up on the release plan through [GitHub Milestones](https://github.com/KusionStack/karpor/milestones). Each release milestone will include two types of tasks: + +- Tasks Maintainers commit to complete. Maintainers will decide on the features they are committed to implementing before the next release based on their available time and effort. Usually, tasks are finalized after offline discussions and then added to the milestone. These tasks will be assigned to the Maintainer who plans to implement or test them. +- Additional items contributed by community contributors, typically non-urgent features or optimizations. Maintainers do not commit to completing these issues within the release cycle but will commit to reviewing submissions from the community. + +The milestones will clearly describe the most important features and their expected completion dates. This will clearly inform end-users about the timing and contents of the next release. + +In addition to the next milestone, we will also maintain drafts of future release milestones. + +## Release Standards + +- All **official releases** should be tagged on the `main` branch, with optional pre-release version suffixes such as: `alpha`, `beta`, `rc`, for example, a regular official release version might be `v1.2.3`, `v1.2.3-alpha.0`. For instance, if we want to perform some validations before releasing the official version `v1.2.3`, we could first release a pre-release version like `v1.2.3-alpha.0`, followed by `v1.2.3` after the validation is complete. +- Maintainers commit to completing certain features and enhancements, tracking progress through [GitHub Milestones](https://github.com/KusionStack/karpor/milestones). +- We will do our best to avoid release delays; thus, if we cannot complete a feature on time, it will be moved to the next release. +- A new version will be released every **1 month**. + +## Release Standard Procedure + +Maintainers are responsible for driving the release process and following standard operating procedures to ensure the quality of the release. + +1. Tag the git commit designated for release and push it upstream; the tag needs to comply with [Semantic Versioning](#semantic-versioning). +2. Ensure that the triggered Github Actions pipeline is executed successfully. Once successful, it will automatically generate a new Github Release, which includes the Changelog calculated from commit messages, as well as artifacts such as images and tar.gz files. +3. Write clear release notes based on the **Github Release**, including: + - User-friendly release highlights. + - Deprecated and incompatible changes. + - Brief instructions on how to install and upgrade. + +## Gate Testing + +Before creating the release branch, we will have a **1-week** code freeze period. During this period, we will refrain from merging any feature PRs and will only fix bugs. + +Maintainers will test and fix these last-minute issues before each release. + +## Semantic Versioning + +`Karpor` adopts [Semantic Versioning](https://semver.org/) for its version numbers. + +The version format: `MAJOR.MINOR.PATCH`, for example, `v1.2.3`. The version number **incrementing rules** are as follows: + +- MAJOR version when you make incompatible API changes. +- MINOR version when you add functionality in a backwards-compatible manner. +- PATCH version when you make backwards-compatible bug fixes. + +**Pre-release version numbers and build metadata** can be added to the `MAJOR.MINOR.PATCH` as an extension, like `v1.2.3-alpha.0`, `v1.2.3-beta.1`, `v1.2.3-rc.2`, where `-alpha.0`, `-beta.1`, `-rc.2` are pre-release versions. diff --git a/karpor_versioned_docs/version-v0.5/4-developer-guide/2-conventions/2-code-conventions.md b/karpor_versioned_docs/version-v0.5/4-developer-guide/2-conventions/2-code-conventions.md new file mode 100644 index 00000000..0fa03be0 --- /dev/null +++ b/karpor_versioned_docs/version-v0.5/4-developer-guide/2-conventions/2-code-conventions.md @@ -0,0 +1,94 @@ +--- +title: Code Conventions +--- +In this section, you will find the code conventions for all kinds of code Karpor project related. It's not necessary to learn all of them at once, but make sure you have read corresponding parts before you start to code. + +- [Go Code Conventions](#go-code-conventions) +- [Bash or Script Conventions](#bash-or-script-conventions) +- [Directory and File Conventions](#directory-and-file-conventions) +- [Logging Conventions](#logging-conventions) + +## Go Code Conventions + +- [Go Code Review Comments](https://go.dev/wiki/CodeReviewComments) +- [Effective Go](https://go.dev/doc/effective_go) +- Know and avoid [Go landmines](https://gist.github.com/lavalamp/4bd23295a9f32706a48f) +- Comment your code. + + - [Go's commenting conventions](https://go.dev/blog/godoc) + - If reviewers ask questions about why the code is the way it is, that's a + sign that comments might be helpful. +- Command-line flags should use dashes, not underscores +- API + + - According to RFC3986, URLs are "case sensitive". Karpor uses `kebab-case` for API URLs. + - e.g.: `POST /rest-api/v1/resource-group-rule` +- Naming + + - Please consider package name when selecting an interface name, and avoid + redundancy. + + - e.g.: `storage.Interface` is better than `storage.StorageInterface`. + - Do not use uppercase characters, underscores, or dashes in package + names. + - Please consider parent directory name when choosing a package name. + + - so pkg/manager/cluster/foo.go should say `package cluster` + not `package clustermanager`. + - Unless there's a good reason, the `package foo` line should match + the name of the directory in which the .go file exists. + - Importers can use a different name if they need to disambiguate. + - Locks should be called `lock` and should never be embedded (always `lock sync.Mutex`). When multiple locks are present, give each lock a distinct name + following Go conventions - `stateLock`, `mapLock` etc. + +## Bash or Script Conventions + +- [https://google.github.io/styleguide/shell.xml](https://google.github.io/styleguide/shell.xml) +- Ensure that build, release, test, and cluster-management scripts run on + macOS + +## Directory and File Conventions + +- Avoid package sprawl. Find an appropriate subdirectory for new packages. + + - Libraries with no more appropriate home belong in new package + subdirectories of pkg/util +- Avoid general utility packages. Packages called "util" are suspect. Instead, + derive a name that describes your desired function. For example, the utility + functions dealing with waiting for operations are in the "wait" package and + include functionality like Poll. So the full name is wait.Poll +- All filenames should be lowercase +- Go source files and directories use underscores, not dashes + + - Package directories should generally avoid using separators as much as + possible (when packages are multiple words, they usually should be in nested + subdirectories). +- Document directories and filenames should use dashes rather than underscores +- Contrived examples that illustrate system features belong in + `/docs/user-guide` or `/docs/admin`, depending on whether it is a feature primarily + intended for users that deploy applications or cluster administrators, + respectively. Actual application examples belong in /examples. + + - Examples should also illustrate [best practices for configuration and using the system](https://kubernetes.io/docs/concepts/configuration/overview/) +- Third-party code + + - Go code for normal third-party dependencies is managed using + [go modules](https://github.com/golang/go/wiki/Modules) + - Other third-party code belongs in `/third_party` + + - forked third party Go code goes in `/third_party/forked` + - forked _golang stdlib_ code goes in `/third_party/forked/golang` + - Third-party code must include licenses + - This includes modified third-party code and excerpts, as well + +## Linting and Formatting + +To ensure consistency across the Go codebase, we require all code to pass a number of linter checks. + +To run all linters, use the `lint` Makefile target: + +```shell +make lint +``` + +The command will clean code along with some lint checks. Please remember to check in all changes after that. diff --git a/karpor_versioned_docs/version-v0.5/4-developer-guide/2-conventions/3-test-conventions.md b/karpor_versioned_docs/version-v0.5/4-developer-guide/2-conventions/3-test-conventions.md new file mode 100644 index 00000000..9eb0e2e4 --- /dev/null +++ b/karpor_versioned_docs/version-v0.5/4-developer-guide/2-conventions/3-test-conventions.md @@ -0,0 +1,267 @@ +--- +title: Test Conventions +--- +## Testing Principles + +In Karpor, we primarily focus on the following three types of tests: + +- Unit tests: Tests targeting the **smallest testable units** (such as functions, methods, utility classes, etc.) +- Integration tests: Tests targeting the interaction and integration between **multiple units (or modules)** +- End-to-End tests (e2e tests): Tests targeting the **entire system's behavior**, usually requiring the simulation of real user scenarios + +Each has its strengths, weaknesses, and suitable scenarios. To achieve a better development experience, we should adhere to the following principles when writing tests. + +**Testing principles**: + +- A case should only cover one scenario +- Follow the **7-2-1 principle**, i.e., 70% unit tests, 20% integration tests, and 10% end-to-end tests +- **Avoid using Mock frameworks in unit tests unless necessary** (e.g., `golang/mock`). If you feel the need to use a Mock framework in unit tests, what you actually need might be integration tests or even end-to-end tests + +## Technology Selection + +At the current point in time, the most popular testing frameworks in the Go language ecosystem are [Ginkgo](https://onsi.github.io/ginkgo/)/[Gomega](https://onsi.github.io/gomega/) and [Testify](https://github.com/stretchr/testify). Therefore, this section mainly discusses the characteristics, pros and cons, and the final selection of these two testing frameworks. + +### Ginkgo/Gomega + +**Advantages**: + +1. **BDD Support**: Ginkgo is favored by many developers for its support of Behavior-Driven Development (BDD) style. It offers a rich DSL syntax, making test cases more descriptive and readable through keywords like `Describe`, `Context`, `It`, etc. +2. **Parallel Execution**: Ginkgo can execute tests in parallel across different processes, improving the efficiency of test execution. +3. **Rich Matchers**: Used in conjunction with the Gomega matchers library, it provides a wealth of assertion capabilities, making tests more intuitive and convenient. +4. **Asynchronous Support**: Ginkgo provides native support for handling complex asynchronous scenarios, reducing the risk of deadlocks and timeouts. +5. **Test Case Organization**: Supports organizing test cases into Suites for easy management and expansion. +6. **Documentation**: Ginkgo's [official documentation](http://onsi.github.io/ginkgo/) is very detailed, offering guides from beginner to advanced usage. + +**Disadvantages**: + +1. **Learning Curve:** For developers not familiar with BDD, Ginkgo's DSL syntax may take some time to get used to. +2. **Complexity in Parallel Testing:** Although Ginkgo supports parallel execution, managing resources and environment for parallel tests can introduce additional complexity in some cases. + +### Testify + +**Advantages**: + +1. **Simplified API**: Testify provides a simple and intuitive API, easy to get started with, especially for developers accustomed to the `testing` package. +2. **Mock Support**: Testify offers powerful Mock functionalities, facilitating the simulation of dependencies and interfaces for unit testing. +3. **Table-Driven Tests**: Supports table-driven testing, allowing for easy provision of various inputs and expected outputs for the same test function, enhancing test case reusability. +4. **Compatibility with `testing` Package**: Testify is highly compatible with the Go standard library's `testing` package, allowing for seamless integration into existing testing workflows. +5. Documentation: Testify's [official documentation](https://pkg.go.dev/github.com/stretchr/testify) also provides rich introductions on how to use its assertion and mocking functionalities. + +**Disadvantages**: + +1. **Lack of BDD Support**: Testify does not support the BDD style, potentially less intuitive for developers looking to improve test case readability. +2. **Relatively Simple Features**: Compared to Ginkgo, Testify's features are relatively simple and may not meet some complex testing scenarios' requirements. + +### Summary + +In short, Ginkgo/Gomega offers better readability and maintainability, producing clean and clear tests, but with a higher learning curve requiring familiarity with BDD style. Testify is simpler, more practical, with a lower learning curve, but as time progresses, the testing code style may become more varied, lowering maintainability. + +Considering the actual situation of Karpor and the pros and cons of both frameworks, we decide to use these two frameworks in combination: + +- Use Testify for unit testing, adhering to [Table-Driven Testing](https://go.dev/wiki/TableDrivenTests) to constrain the code style and prevent decay; +- Utilize Ginkgo's BDD features for writing higher-level integration and end-to-end tests; + +This combination fully leverages the strengths of both frameworks, improving the overall efficiency, readability, and quality of testing. + +## Writing Specifications + +### Test Style + +[Table-Driven Testing](https://go.dev/wiki/TableDrivenTests) is a best practice for writing test cases, akin to design patterns in programming, and it is also the style recommended by the official Go language. Table-Driven Testing uses tables to provide a variety of inputs and expected outputs, allowing the same test function to verify different scenarios. The advantages of this method are that it increases the reusability of test cases, reduces repetitive code, and makes tests clearer and easier to maintain. + +While there is no direct syntax support for Table-Driven Testing in Go's `testing` package, it can be emulated by writing helper functions and using anonymous functions. + +Here is an example of Table-Driven Testing implemented in the Go standard library's `fmt` package: + +```go +var flagtests = []struct { + in string + out string +}{ + {"%a", "[%a]"}, + {"%-a", "[%-a]"}, + {"%+a", "[%+a]"}, + {"%#a", "[%#a]"}, + {"% a", "[% a]"}, + {"%0a", "[%0a]"}, + {"%1.2a", "[%1.2a]"}, + {"%-1.2a", "[%-1.2a]"}, + {"%+1.2a", "[%+1.2a]"}, + {"%-+1.2a", "[%+-1.2a]"}, + {"%-+1.2abc", "[%+-1.2a]bc"}, + {"%-1.2abc", "[%-1.2a]bc"}, +} +func TestFlagParser(t *testing.T) { + var flagprinter flagPrinter + for _, tt := range flagtests { + t.Run(tt.in, func(t *testing.T) { + s := Sprintf(tt.in, &flagprinter) + if s != tt.out { + t.Errorf("got %q, want %q", s, tt.out) + } + }) + } +} +``` + +It is worth noting that most mainstream IDEs have already integrated [gotests](https://github.com/cweill/gotests), enabling the automatic generation of table-driven style Go unit tests, which I believe can enhance the efficiency of writing your unit tests: + +- [GoLand](https://blog.jetbrains.com/go/2020/03/13/test-driven-development-with-goland/) +- [Visual Studio Code](https://juandes.com/go-test-vsc/) + +### File Naming + +- **Specification Content**:Test files should end with `_test.go` to distinguish between test code and production code. +- **Positive Example**:`xxx_test.go` +- **Negative Example**:`testFile.go`、`test_xxx.go` + +### Test Function Naming + +- **Specification**: The name of the test function should start with `Test`, followed by the name of the function being tested, using camel case notation. +- **Positive Example**: + ```go + func TestAdd(t *testing.T) { + // Test logic ... + } + ``` +- **Negative Example**: + ```go + func TestAddWrong(t *testing.T) { + // Test logic ... + } + ``` + +### Test Function Signature + +- **Specification Content**: The signature of the test function should be `func TestXxx(t *testing.T)`, where `t` is the test object, of type `*testing.T`, and there should be no other parameters or return values. +- **Positive Example**: + ```go + func TestSubtraction(t *testing.T) { + // Test logic ... + } + ``` +- **Negative Example**: + ```go + func TestSubtraction(value int) { + // Test logic ... + } + ``` + +### Test Organization + +- **Specification Content**:Test cases should be independent of each other to avoid mutual influence between tests; use sub-tests (`t.Run`) to organize complex test scenarios. +- **Positive Example**: + ```go + func TestMathOperations(t *testing.T) { + t.Run("Addition", func(t *testing.T) { + // Test addition logic ... + }) + t.Run("Subtraction", func(t *testing.T) { + // Test subtraction logic ... + }) + } + ``` +- **Negative Example**: + ```go + func TestMathOperations(t *testing.T) { + // Mixed addition and subtraction logic... + } + ``` + +### Test Coverage + +- **Specification Content**:Attention should be paid to test coverage, use the `go test -cover` command to examine the test coverage of the code. +- **Positive Example**: + + ```shell + $ go test -cover + ``` +- **Negative Example**: + + ```shell + $ go test # Without checking test coverage + ``` +- **Note**: Karpor has wrapped this command as `make cover`, which will output the coverage for each package and total coverage in the command line. If you would like to view the coverage report in the browser, please execute `make cover-html`. + +### Benchmark Tests + +- **Specification Content**:Benchmark test functions should start with `Benchmark` and accept an argument of type `*testing.B`, focusing on performance testing. +- **Positive Example**: + ```go + func BenchmarkAdd(b *testing.B) { + for i := 0; i < b.N; i++ { + add(1, 1) + } + } + ``` +- **Negative Example**: + ```go + func BenchmarkAddWrong(b *testing.B) { + for i := 0; i < 1000; i++ { + add(1, 1) + } + } + ``` + +### Concurrency Testing + +- **Specification Content**:For concurrent code, appropriate test cases should be written to ensure the correctness of the concurrency logic. +- **Positive Example**: + ```go + func TestConcurrentAccess(t *testing.T) { + // Set up concurrent environment ... + // Test logic for concurrent access ... + } + ``` +- **Negative Example**: + ```go + func TestConcurrentAccess(t *testing.T) { + // Only test single-thread logic... + } + ``` + +### Test Helper Functions + +- **Specification Content**:Helper functions can be defined within the test files to help set up the test environment or clean up resources. +- **Positive Example**: + ```go + func setupTest(t *testing.T) { + // Set up test environment ... + } + + func tearDownTest(t *testing.T) { + // Clean up resources ... + } + + func TestMyFunction(t *testing.T) { + t.Run("TestSetup", func(t *testing.T) { + setupTest(t) + // Test logic ... + }) + } + ``` +- **Negative Example**: + ```go + // Directly setting up and cleaning up resources in the test + func TestMyFunction(t *testing.T) { + // Set up test environment... + // Test logic... + // Clean up resources... + } + ``` + +### Avoid Using Global Variables + +- **Specification Content**: Try to avoid using global variables in tests to ensure test independence. +- **Positive Example**: Declare and use the necessary variables inside the test function. +- **Negative Example**: Declare global variables at the top of the test file. + +### Clear Error Messages + +- **Specification Content**: When a test fails, output clear and understandable error messages to help developers locate the problem. +- **Positive Example**: + - `t.Errorf("Expected value %d, but got %d", expected, real)` +- **Negative Example**: + - `t.Errorf("Error occurred")` + - `fmt.Println("Error occurred")` + - `panic("Error occurred")` diff --git a/karpor_versioned_docs/version-v0.5/4-developer-guide/2-conventions/4-commit-conventions.md b/karpor_versioned_docs/version-v0.5/4-developer-guide/2-conventions/4-commit-conventions.md new file mode 100644 index 00000000..fd3980e3 --- /dev/null +++ b/karpor_versioned_docs/version-v0.5/4-developer-guide/2-conventions/4-commit-conventions.md @@ -0,0 +1,71 @@ +--- +title: Commit Conventions +--- +## Commit Message Structure + +Karpor adheres to [conventional-commits](https://www.conventionalcommits.org/en/v1.0.0/). + +Commit messages should be organized following this structure: + +``` +[optional scope]: + +[optional body] +``` + +## Example + +Commit message with scope: + +``` +feat(lang): add polish language +``` + +Commit message without body: + +``` +docs: correct spelling of CHANGELOG +``` + +Commit message with multiple body paragraphs:: + +``` +fix: correct minor typos in code + +see the issue for details + +on typos fixed. + +reviewed-by: Z +refs #133 +``` + +## ``(Required) + +The required type helps better capture the area of the commit, based on the [Angular guidelines](https://github.com/angular/angular/blob/22b96b9/CONTRIBUTING.md#-commit-message-guidelines). + +We use lowercase for `` to avoid spending time on case-sensitive issues. `` can be one of the following: + +- **feat**: A new feature +- **fix**: A bug fix +- **docs**: Documentation only changes +- **build**: Changes that affect the build system or external dependencies +- **style**: Changes that do not affect the meaning of the code (white-space, formatting, missing semi-colons, etc) +- **refactor**: A code change that neither fixes a bug nor adds a feature +- **perf**: A code change that improves performance +- **test**: Adding missing tests or correcting existing tests +- **chore**: Changes to the build process or auxiliary tools and libraries such as documentation generation + +## ``(Optional) + +Scope is optional and can be provided to the type of commit to provide additional contextual information, enclosed in parentheses. It can be anything specifying the place of the commit change. Github issue links are also valid scopes e.g., fix(ui), feat(api), fix(#233), etc. + +When the change affects multiple scopes, `*` can be used. + +## ``(Required) + +The subject must come immediately after the type/scope prefix, followed by a colon and space. It is a concise summary of the code changes, for example, "fix: array parsing issue when multiple spaces were contained in string", rather than "fix: bug". + +## ``(Required) + +A longer commit body can be provided after the brief subject, giving additional context information about the code change. The body must begin one line after the description. diff --git a/karpor_versioned_docs/version-v0.5/4-developer-guide/2-conventions/_category_.json b/karpor_versioned_docs/version-v0.5/4-developer-guide/2-conventions/_category_.json new file mode 100644 index 00000000..3287fa06 --- /dev/null +++ b/karpor_versioned_docs/version-v0.5/4-developer-guide/2-conventions/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Conventions" +} diff --git a/karpor_versioned_docs/version-v0.5/4-developer-guide/_category_.json b/karpor_versioned_docs/version-v0.5/4-developer-guide/_category_.json new file mode 100644 index 00000000..8de262b6 --- /dev/null +++ b/karpor_versioned_docs/version-v0.5/4-developer-guide/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "Developer Guide" +} diff --git a/karpor_versioned_docs/version-v0.5/5-references/1-cli-commands/1-karpor.md b/karpor_versioned_docs/version-v0.5/5-references/1-cli-commands/1-karpor.md new file mode 100644 index 00000000..891809d7 --- /dev/null +++ b/karpor_versioned_docs/version-v0.5/5-references/1-cli-commands/1-karpor.md @@ -0,0 +1,230 @@ +--- +title: karpor +--- +### Synopsis + +Launch an API server + +``` +karpor [flags] +``` + +### Options + +``` + --admission-control-config-file string File with admission control configuration. + --advertise-address ip The IP address on which to advertise the apiserver to members of the cluster. This address must be reachable by the rest of the cluster. If blank, the --bind-address will be used. If --bind-address is unspecified, the host's default interface will be used. + --anonymous-auth Enables anonymous requests to the secure port of the API server. Requests that are not rejected by another authentication method are treated as anonymous requests. Anonymous requests have a username of system:anonymous, and a group name of system:unauthenticated. (default true) + --api-audiences strings Identifiers of the API. The service account token authenticator will validate that tokens used against the API are bound to at least one of these audiences. If the --service-account-issuer flag is configured and this flag is not, this field defaults to a single element list containing the issuer URL. + --audit-log-batch-buffer-size int The size of the buffer to store events before batching and writing. Only used in batch mode. (default 10000) + --audit-log-batch-max-size int The maximum size of a batch. Only used in batch mode. (default 1) + --audit-log-batch-max-wait duration The amount of time to wait before force writing the batch that hadn't reached the max size. Only used in batch mode. + --audit-log-batch-throttle-burst int Maximum number of requests sent at the same moment if ThrottleQPS was not utilized before. Only used in batch mode. + --audit-log-batch-throttle-enable Whether batching throttling is enabled. Only used in batch mode. + --audit-log-batch-throttle-qps float32 Maximum average number of batches per second. Only used in batch mode. + --audit-log-compress If set, the rotated log files will be compressed using gzip. + --audit-log-format string Format of saved audits. "legacy" indicates 1-line text format for each event. "json" indicates structured json format. Known formats are legacy,json. (default "json") + --audit-log-maxage int The maximum number of days to retain old audit log files based on the timestamp encoded in their filename. + --audit-log-maxbackup int The maximum number of old audit log files to retain. Setting a value of 0 will mean there's no restriction on the number of files. + --audit-log-maxsize int The maximum size in megabytes of the audit log file before it gets rotated. + --audit-log-mode string Strategy for sending audit events. Blocking indicates sending events should block server responses. Batch causes the backend to buffer and write events asynchronously. Known modes are batch,blocking,blocking-strict. (default "blocking") + --audit-log-path string If set, all requests coming to the apiserver will be logged to this file. '-' means standard out. + --audit-log-truncate-enabled Whether event and batch truncating is enabled. + --audit-log-truncate-max-batch-size int Maximum size of the batch sent to the underlying backend. Actual serialized size can be several hundreds of bytes greater. If a batch exceeds this limit, it is split into several batches of smaller size. (default 10485760) + --audit-log-truncate-max-event-size int Maximum size of the audit event sent to the underlying backend. If the size of an event is greater than this number, first request and response are removed, and if this doesn't reduce the size enough, event is discarded. (default 102400) + --audit-log-version string API group and version used for serializing audit events written to log. (default "audit.k8s.io/v1") + --audit-policy-file string Path to the file that defines the audit policy configuration. + --audit-webhook-batch-buffer-size int The size of the buffer to store events before batching and writing. Only used in batch mode. (default 10000) + --audit-webhook-batch-max-size int The maximum size of a batch. Only used in batch mode. (default 400) + --audit-webhook-batch-max-wait duration The amount of time to wait before force writing the batch that hadn't reached the max size. Only used in batch mode. (default 30s) + --audit-webhook-batch-throttle-burst int Maximum number of requests sent at the same moment if ThrottleQPS was not utilized before. Only used in batch mode. (default 15) + --audit-webhook-batch-throttle-enable Whether batching throttling is enabled. Only used in batch mode. (default true) + --audit-webhook-batch-throttle-qps float32 Maximum average number of batches per second. Only used in batch mode. (default 10) + --audit-webhook-config-file string Path to a kubeconfig formatted file that defines the audit webhook configuration. + --audit-webhook-initial-backoff duration The amount of time to wait before retrying the first failed request. (default 10s) + --audit-webhook-mode string Strategy for sending audit events. Blocking indicates sending events should block server responses. Batch causes the backend to buffer and write events asynchronously. Known modes are batch,blocking,blocking-strict. (default "batch") + --audit-webhook-truncate-enabled Whether event and batch truncating is enabled. + --audit-webhook-truncate-max-batch-size int Maximum size of the batch sent to the underlying backend. Actual serialized size can be several hundreds of bytes greater. If a batch exceeds this limit, it is split into several batches of smaller size. (default 10485760) + --audit-webhook-truncate-max-event-size int Maximum size of the audit event sent to the underlying backend. If the size of an event is greater than this number, first request and response are removed, and if this doesn't reduce the size enough, event is discarded. (default 102400) + --audit-webhook-version string API group and version used for serializing audit events written to webhook. (default "audit.k8s.io/v1") + --authorization-mode strings Ordered list of plug-ins to do authorization on secure port. Comma-delimited list of: AlwaysAllow,AlwaysDeny,ABAC,Webhook,RBAC,Node. (default [RBAC]) + --authorization-policy-file string File with authorization policy in json line by line format, used with --authorization-mode=ABAC, on the secure port. + --authorization-webhook-cache-authorized-ttl duration The duration to cache 'authorized' responses from the webhook authorizer. (default 5m0s) + --authorization-webhook-cache-unauthorized-ttl duration The duration to cache 'unauthorized' responses from the webhook authorizer. (default 30s) + --authorization-webhook-config-file string File with webhook configuration in kubeconfig format, used with --authorization-mode=Webhook. The API server will query the remote service to determine access on the API server's secure port. + --authorization-webhook-version string The API version of the authorization.k8s.io SubjectAccessReview to send to and expect from the webhook. (default "v1beta1") + --bind-address ip The IP address on which to listen for the --secure-port port. The associated interface(s) must be reachable by the rest of the cluster, and by CLI/web clients. If blank or an unspecified address (0.0.0.0 or ::), all interfaces will be used. (default 0.0.0.0) + --cert-dir string The directory where the TLS certs are located. If --tls-cert-file and --tls-private-key-file are provided, this flag will be ignored. (default "apiserver.local.config/certificates") + --client-ca-file string If set, any request presenting a client certificate signed by one of the authorities in the client-ca-file is authenticated with an identity corresponding to the CommonName of the client certificate. + --contention-profiling Enable lock contention profiling, if profiling is enabled + --cors-allowed-origins strings List of allowed origins for CORS, comma separated. An allowed origin can be a regular expression to support subdomain matching. If this list is empty CORS will not be enabled. (default [.*]) + --delete-collection-workers int Number of workers spawned for DeleteCollection call. These are used to speed up namespace cleanup. (default 1) + --disable-admission-plugins strings admission plugins that should be disabled although they are in the default enabled plugins list (NamespaceLifecycle, MutatingAdmissionWebhook, ValidatingAdmissionPolicy, ValidatingAdmissionWebhook). Comma-delimited list of admission plugins: MutatingAdmissionWebhook, NamespaceLifecycle, ValidatingAdmissionPolicy, ValidatingAdmissionWebhook. The order of plugins in this flag does not matter. (default [MutatingAdmissionWebhook,NamespaceLifecycle,ValidatingAdmissionWebhook,ValidatingAdmissionPolicy]) + --egress-selector-config-file string File with apiserver egress selector configuration. + --elastic-search-addresses strings The elastic search address + --elastic-search-password string The elastic search password + --elastic-search-username string The elastic search username + --enable-admission-plugins strings admission plugins that should be enabled in addition to default enabled ones (NamespaceLifecycle, MutatingAdmissionWebhook, ValidatingAdmissionPolicy, ValidatingAdmissionWebhook). Comma-delimited list of admission plugins: MutatingAdmissionWebhook, NamespaceLifecycle, ValidatingAdmissionPolicy, ValidatingAdmissionWebhook. The order of plugins in this flag does not matter. + --enable-garbage-collector Enables the generic garbage collector. MUST be synced with the corresponding flag of the kube-controller-manager. (default true) + --enable-priority-and-fairness If true and the APIPriorityAndFairness feature gate is enabled, replace the max-in-flight handler with an enhanced one that queues and dispatches with priority and fairness (default true) + --encryption-provider-config string The file containing configuration for encryption providers to be used for storing secrets in etcd + --encryption-provider-config-automatic-reload Determines if the file set by --encryption-provider-config should be automatically reloaded if the disk contents change. Setting this to true disables the ability to uniquely identify distinct KMS plugins via the API server healthz endpoints. + --etcd-cafile string SSL Certificate Authority file used to secure etcd communication. + --etcd-certfile string SSL certification file used to secure etcd communication. + --etcd-compaction-interval duration The interval of compaction requests. If 0, the compaction request from apiserver is disabled. (default 5m0s) + --etcd-count-metric-poll-period duration Frequency of polling etcd for number of resources per type. 0 disables the metric collection. (default 1m0s) + --etcd-db-metric-poll-interval duration The interval of requests to poll etcd and update metric. 0 disables the metric collection (default 30s) + --etcd-healthcheck-timeout duration The timeout to use when checking etcd health. (default 2s) + --etcd-keyfile string SSL key file used to secure etcd communication. + --etcd-prefix string The prefix to prepend to all resource paths in etcd. (default "/registry/karpor") + --etcd-readycheck-timeout duration The timeout to use when checking etcd readiness (default 2s) + --etcd-servers strings List of etcd servers to connect with (scheme://ip:port), comma separated. + --etcd-servers-overrides strings Per-resource etcd servers overrides, comma separated. The individual override format: group/resource#servers, where servers are URLs, semicolon separated. Note that this applies only to resources compiled into this server binary. + --external-hostname string The hostname to use when generating externalized URLs for this master (e.g. Swagger API Docs or OpenID Discovery). + --feature-gates mapStringBool A set of key=value pairs that describe feature gates for alpha/experimental features. Options are: + APIListChunking=true|false (BETA - default=true) + APIPriorityAndFairness=true|false (BETA - default=true) + APIResponseCompression=true|false (BETA - default=true) + APISelfSubjectReview=true|false (ALPHA - default=false) + APIServerIdentity=true|false (BETA - default=true) + APIServerTracing=true|false (ALPHA - default=false) + AggregatedDiscoveryEndpoint=true|false (ALPHA - default=false) + AllAlpha=true|false (ALPHA - default=false) + AllBeta=true|false (BETA - default=false) + AnyVolumeDataSource=true|false (BETA - default=true) + AppArmor=true|false (BETA - default=true) + CPUManagerPolicyAlphaOptions=true|false (ALPHA - default=false) + CPUManagerPolicyBetaOptions=true|false (BETA - default=true) + CPUManagerPolicyOptions=true|false (BETA - default=true) + CSIMigrationPortworx=true|false (BETA - default=false) + CSIMigrationRBD=true|false (ALPHA - default=false) + CSINodeExpandSecret=true|false (ALPHA - default=false) + CSIVolumeHealth=true|false (ALPHA - default=false) + ComponentSLIs=true|false (ALPHA - default=false) + ContainerCheckpoint=true|false (ALPHA - default=false) + CronJobTimeZone=true|false (BETA - default=true) + CrossNamespaceVolumeDataSource=true|false (ALPHA - default=false) + CustomCPUCFSQuotaPeriod=true|false (ALPHA - default=false) + CustomResourceValidationExpressions=true|false (BETA - default=true) + DisableCloudProviders=true|false (ALPHA - default=false) + DisableKubeletCloudCredentialProviders=true|false (ALPHA - default=false) + DownwardAPIHugePages=true|false (BETA - default=true) + DynamicResourceAllocation=true|false (ALPHA - default=false) + EventedPLEG=true|false (ALPHA - default=false) + ExpandedDNSConfig=true|false (BETA - default=true) + ExperimentalHostUserNamespaceDefaulting=true|false (BETA - default=false) + GRPCContainerProbe=true|false (BETA - default=true) + GracefulNodeShutdown=true|false (BETA - default=true) + GracefulNodeShutdownBasedOnPodPriority=true|false (BETA - default=true) + HPAContainerMetrics=true|false (ALPHA - default=false) + HPAScaleToZero=true|false (ALPHA - default=false) + HonorPVReclaimPolicy=true|false (ALPHA - default=false) + IPTablesOwnershipCleanup=true|false (ALPHA - default=false) + InTreePluginAWSUnregister=true|false (ALPHA - default=false) + InTreePluginAzureDiskUnregister=true|false (ALPHA - default=false) + InTreePluginAzureFileUnregister=true|false (ALPHA - default=false) + InTreePluginGCEUnregister=true|false (ALPHA - default=false) + InTreePluginOpenStackUnregister=true|false (ALPHA - default=false) + InTreePluginPortworxUnregister=true|false (ALPHA - default=false) + InTreePluginRBDUnregister=true|false (ALPHA - default=false) + InTreePluginvSphereUnregister=true|false (ALPHA - default=false) + JobMutableNodeSchedulingDirectives=true|false (BETA - default=true) + JobPodFailurePolicy=true|false (BETA - default=true) + JobReadyPods=true|false (BETA - default=true) + KMSv2=true|false (ALPHA - default=false) + KubeletInUserNamespace=true|false (ALPHA - default=false) + KubeletPodResources=true|false (BETA - default=true) + KubeletPodResourcesGetAllocatable=true|false (BETA - default=true) + KubeletTracing=true|false (ALPHA - default=false) + LegacyServiceAccountTokenTracking=true|false (ALPHA - default=false) + LocalStorageCapacityIsolationFSQuotaMonitoring=true|false (ALPHA - default=false) + LogarithmicScaleDown=true|false (BETA - default=true) + MatchLabelKeysInPodTopologySpread=true|false (ALPHA - default=false) + MaxUnavailableStatefulSet=true|false (ALPHA - default=false) + MemoryManager=true|false (BETA - default=true) + MemoryQoS=true|false (ALPHA - default=false) + MinDomainsInPodTopologySpread=true|false (BETA - default=false) + MinimizeIPTablesRestore=true|false (ALPHA - default=false) + MultiCIDRRangeAllocator=true|false (ALPHA - default=false) + NetworkPolicyStatus=true|false (ALPHA - default=false) + NodeInclusionPolicyInPodTopologySpread=true|false (BETA - default=true) + NodeOutOfServiceVolumeDetach=true|false (BETA - default=true) + NodeSwap=true|false (ALPHA - default=false) + OpenAPIEnums=true|false (BETA - default=true) + OpenAPIV3=true|false (BETA - default=true) + PDBUnhealthyPodEvictionPolicy=true|false (ALPHA - default=false) + PodAndContainerStatsFromCRI=true|false (ALPHA - default=false) + PodDeletionCost=true|false (BETA - default=true) + PodDisruptionConditions=true|false (BETA - default=true) + PodHasNetworkCondition=true|false (ALPHA - default=false) + PodSchedulingReadiness=true|false (ALPHA - default=false) + ProbeTerminationGracePeriod=true|false (BETA - default=true) + ProcMountType=true|false (ALPHA - default=false) + ProxyTerminatingEndpoints=true|false (BETA - default=true) + QOSReserved=true|false (ALPHA - default=false) + ReadWriteOncePod=true|false (ALPHA - default=false) + RecoverVolumeExpansionFailure=true|false (ALPHA - default=false) + RemainingItemCount=true|false (BETA - default=true) + RetroactiveDefaultStorageClass=true|false (BETA - default=true) + RotateKubeletServerCertificate=true|false (BETA - default=true) + SELinuxMountReadWriteOncePod=true|false (ALPHA - default=false) + SeccompDefault=true|false (BETA - default=true) + ServerSideFieldValidation=true|false (BETA - default=true) + SizeMemoryBackedVolumes=true|false (BETA - default=true) + StatefulSetAutoDeletePVC=true|false (ALPHA - default=false) + StatefulSetStartOrdinal=true|false (ALPHA - default=false) + StorageVersionAPI=true|false (ALPHA - default=false) + StorageVersionHash=true|false (BETA - default=true) + TopologyAwareHints=true|false (BETA - default=true) + TopologyManager=true|false (BETA - default=true) + TopologyManagerPolicyAlphaOptions=true|false (ALPHA - default=false) + TopologyManagerPolicyBetaOptions=true|false (BETA - default=false) + TopologyManagerPolicyOptions=true|false (ALPHA - default=false) + UserNamespacesStatelessPodsSupport=true|false (ALPHA - default=false) + ValidatingAdmissionPolicy=true|false (ALPHA - default=false) + VolumeCapacityPriority=true|false (ALPHA - default=false) + WinDSR=true|false (ALPHA - default=false) + WinOverlay=true|false (BETA - default=true) + WindowsHostNetwork=true|false (ALPHA - default=true) (default APIPriorityAndFairness=true) + --goaway-chance float To prevent HTTP/2 clients from getting stuck on a single apiserver, randomly close a connection (GOAWAY). The client's other in-flight requests won't be affected, and the client will reconnect, likely landing on a different apiserver after going through the load balancer again. This argument sets the fraction of requests that will be sent a GOAWAY. Clusters with single apiservers, or which don't use a load balancer, should NOT enable this. Min is 0 (off), Max is .02 (1/50 requests); .001 (1/1000) is a recommended starting point. + -h, --help help for karpor + --http2-max-streams-per-connection int The limit that the server gives to clients for the maximum number of streams in an HTTP/2 connection. Zero means to use golang's default. (default 1000) + --lease-reuse-duration-seconds int The time in seconds that each lease is reused. A lower value could avoid large number of objects reusing the same lease. Notice that a too small value may cause performance problems at storage layer. (default 60) + --livez-grace-period duration This option represents the maximum amount of time it should take for apiserver to complete its startup sequence and become live. From apiserver's start time to when this amount of time has elapsed, /livez will assume that unfinished post-start hooks will complete successfully and therefore return true. + --max-mutating-requests-inflight int This and --max-requests-inflight are summed to determine the server's total concurrency limit (which must be positive) if --enable-priority-and-fairness is true. Otherwise, this flag limits the maximum number of mutating requests in flight, or a zero value disables the limit completely. (default 200) + --max-requests-inflight int This and --max-mutating-requests-inflight are summed to determine the server's total concurrency limit (which must be positive) if --enable-priority-and-fairness is true. Otherwise, this flag limits the maximum number of non-mutating requests in flight, or a zero value disables the limit completely. (default 400) + --min-request-timeout int An optional field indicating the minimum number of seconds a handler must keep a request open before timing it out. Currently only honored by the watch request handler, which picks a randomized value above this number as the connection timeout, to spread out load. (default 1800) + --permit-address-sharing If true, SO_REUSEADDR will be used when binding the port. This allows binding to wildcard IPs like 0.0.0.0 and specific IPs in parallel, and it avoids waiting for the kernel to release sockets in TIME_WAIT state. [default=false] + --permit-port-sharing If true, SO_REUSEPORT will be used when binding the port, which allows more than one instance to bind on the same address and port. [default=false] + --profiling Enable profiling via web interface host:port/debug/pprof/ (default true) + --read-only-mode turn on the read only mode + --request-timeout duration An optional field indicating the duration a handler must keep a request open before timing it out. This is the default request timeout for requests but may be overridden by flags such as --min-request-timeout for specific types of requests. (default 1m0s) + --requestheader-allowed-names strings List of client certificate common names to allow to provide usernames in headers specified by --requestheader-username-headers. If empty, any client certificate validated by the authorities in --requestheader-client-ca-file is allowed. + --requestheader-client-ca-file string Root certificate bundle to use to verify client certificates on incoming requests before trusting usernames in headers specified by --requestheader-username-headers. WARNING: generally do not depend on authorization being already done for incoming requests. + --requestheader-extra-headers-prefix strings List of request header prefixes to inspect. X-Remote-Extra- is suggested. + --requestheader-group-headers strings List of request headers to inspect for groups. X-Remote-Group is suggested. + --requestheader-username-headers strings List of request headers to inspect for usernames. X-Remote-User is common. + --search-storage-type string The search storage type + --secure-port int The port on which to serve HTTPS with authentication and authorization. If 0, don't serve HTTPS at all. (default 443) + --shutdown-delay-duration duration Time to delay the termination. During that time the server keeps serving requests normally. The endpoints /healthz and /livez will return success, but /readyz immediately returns failure. Graceful termination starts after this delay has elapsed. This can be used to allow load balancer to stop sending traffic to this server. + --shutdown-send-retry-after If true the HTTP Server will continue listening until all non long running request(s) in flight have been drained, during this window all incoming requests will be rejected with a status code 429 and a 'Retry-After' response header, in addition 'Connection: close' response header is set in order to tear down the TCP connection when idle. + --storage-backend string The storage backend for persistence. Options: 'etcd3' (default). + --storage-media-type string The media type to use to store objects in storage. Some resources or storage backends may only support a specific media type and will ignore this setting. Supported media types: [application/json, application/yaml, application/vnd.kubernetes.protobuf] (default "application/json") + --strict-transport-security-directives strings List of directives for HSTS, comma separated. If this list is empty, then HSTS directives will not be added. Example: 'max-age=31536000,includeSubDomains,preload' + --tls-cert-file string File containing the default x509 Certificate for HTTPS. (CA cert, if any, concatenated after server cert). If HTTPS serving is enabled, and --tls-cert-file and --tls-private-key-file are not provided, a self-signed certificate and key are generated for the public address and saved to the directory specified by --cert-dir. (default "apiserver.local.config/certificates/apiserver.crt") + --tls-cipher-suites strings Comma-separated list of cipher suites for the server. If omitted, the default Go cipher suites will be used. + Preferred values: TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_256_CBC_SHA, TLS_RSA_WITH_AES_256_GCM_SHA384. + Insecure values: TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_RSA_WITH_RC4_128_SHA, TLS_RSA_WITH_3DES_EDE_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA256, TLS_RSA_WITH_RC4_128_SHA. + --tls-min-version string Minimum TLS version supported. Possible values: VersionTLS10, VersionTLS11, VersionTLS12, VersionTLS13 + --tls-private-key-file string File containing the default x509 private key matching --tls-cert-file. (default "apiserver.local.config/certificates/apiserver.key") + --tls-sni-cert-key namedCertKey A pair of x509 certificate and private key file paths, optionally suffixed with a list of domain patterns which are fully qualified domain names, possibly with prefixed wildcard segments. The domain patterns also allow IP addresses, but IPs should only be used if the apiserver has visibility to the IP address requested by a client. If no domain patterns are provided, the names of the certificate are extracted. Non-wildcard matches trump over wildcard matches, explicit domain patterns trump over extracted names. For multiple key/certificate pairs, use the --tls-sni-cert-key multiple times. Examples: "example.crt,example.key" or "foo.crt,foo.key:*.foo.com,foo.com". (default []) + --tracing-config-file string File with apiserver tracing configuration. + --watch-cache Enable watch caching in the apiserver (default true) + --watch-cache-sizes strings Watch cache size settings for some resources (pods, nodes, etc.), comma separated. The individual setting format: resource[.group]#size, where resource is lowercase plural (no version), group is omitted for resources of apiVersion v1 (the legacy core API) and included for others, and size is a number. This option is only meaningful for resources built into the apiserver, not ones defined by CRDs or aggregated from external servers, and is only consulted if the watch-cache is enabled. The only meaningful size setting to supply here is zero, which means to disable watch caching for the associated resource; all non-zero values are equivalent and mean to not disable watch caching for that resource +``` + +### SEE ALSO + +* [karpor syncer](2-karpor-syncer.md) - start a resource syncer to sync resource from clusters + +###### Auto generated by spf13/cobra on 7-May-2024 diff --git a/karpor_versioned_docs/version-v0.5/5-references/1-cli-commands/2-karpor-syncer.md b/karpor_versioned_docs/version-v0.5/5-references/1-cli-commands/2-karpor-syncer.md new file mode 100644 index 00000000..d25245ae --- /dev/null +++ b/karpor_versioned_docs/version-v0.5/5-references/1-cli-commands/2-karpor-syncer.md @@ -0,0 +1,25 @@ +--- +title: karpor syncer +--- +## karpor syncer + +start a resource syncer to sync resource from clusters + +``` +karpor syncer [flags] +``` + +### Options + +``` + --elastic-search-addresses strings The elastic search address. + --health-probe-bind-address string The address the probe endpoint binds to. (default ":8081") + -h, --help help for syncer + --metrics-bind-address string The address the metric endpoint binds to. (default ":8080") +``` + +### SEE ALSO + +* [karpor](1-karpor.md) - Launch an API server + +###### Auto generated by spf13/cobra on 7-May-2024 diff --git a/karpor_versioned_docs/version-v0.5/5-references/1-cli-commands/_category_.json b/karpor_versioned_docs/version-v0.5/5-references/1-cli-commands/_category_.json new file mode 100644 index 00000000..41757f5f --- /dev/null +++ b/karpor_versioned_docs/version-v0.5/5-references/1-cli-commands/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "CLI Commands" +} diff --git a/karpor_versioned_docs/version-v0.5/5-references/2-openapi.md b/karpor_versioned_docs/version-v0.5/5-references/2-openapi.md new file mode 100644 index 00000000..81c0321d --- /dev/null +++ b/karpor_versioned_docs/version-v0.5/5-references/2-openapi.md @@ -0,0 +1,1862 @@ +--- +title: OpenAPI +--- +## Informations + +### Version + +1.0 + +### Contact + +## Content negotiation + +### URI Schemes + +* http + +### Consumes + +* application/json +* multipart/form-data +* text/plain + +### Produces + +* application/json +* text/plain + +## All endpoints + +### cluster + +| Method | URI | Name | Summary | +| ------ | ------------------------------------ | ------------------------------------------------------------------------------------- | -------------------------------------------- | +| DELETE | /rest-api/v1/cluster/{clusterName} | [delete rest API v1 cluster cluster name](#delete-rest-api-v1-cluster-cluster-name) | Delete removes a cluster resource by name. | +| GET | /rest-api/v1/cluster/{clusterName} | [get rest API v1 cluster cluster name](#get-rest-api-v1-cluster-cluster-name) | Get returns a cluster resource by name. | +| GET | /rest-api/v1/clusters | [get rest API v1 clusters](#get-rest-api-v1-clusters) | List lists all cluster resources. | +| POST | /rest-api/v1/cluster/{clusterName} | [post rest API v1 cluster cluster name](#post-rest-api-v1-cluster-cluster-name) | Create creates a cluster resource. | +| POST | /rest-api/v1/cluster/config/file | [post rest API v1 cluster config file](#post-rest-api-v1-cluster-config-file) | Upload kubeConfig file for cluster | +| POST | /rest-api/v1/cluster/config/validate | [post rest API v1 cluster config validate](#post-rest-api-v1-cluster-config-validate) | Validate KubeConfig | +| PUT | /rest-api/v1/cluster/{clusterName} | [put rest API v1 cluster cluster name](#put-rest-api-v1-cluster-cluster-name) | Update updates the cluster metadata by name. | + +### debug + +| Method | URI | Name | Summary | +| ------ | ---------- | ------------------------------- | ---------------------------- | +| GET | /endpoints | [get endpoints](#get-endpoints) | List all available endpoints | + +### insight + +| Method | URI | Name | Summary | +| ------ | ----------------------------- | --------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------- | +| GET | /rest-api/v1/insight/audit | [get rest API v1 insight audit](#get-rest-api-v1-insight-audit) | Audit based on resource group. | +| GET | /rest-api/v1/insight/detail | [get rest API v1 insight detail](#get-rest-api-v1-insight-detail) | GetDetail returns a Kubernetes resource by name, namespace, cluster, apiVersion and kind. | +| GET | /rest-api/v1/insight/events | [get rest API v1 insight events](#get-rest-api-v1-insight-events) | GetEvents returns events for a Kubernetes resource by name, namespace, cluster, apiVersion and kind. | +| GET | /rest-api/v1/insight/score | [get rest API v1 insight score](#get-rest-api-v1-insight-score) | ScoreHandler calculates a score for the audited manifest. | +| GET | /rest-api/v1/insight/stats | [get rest API v1 insight stats](#get-rest-api-v1-insight-stats) | Get returns a global statistics info. | +| GET | /rest-api/v1/insight/summary | [get rest API v1 insight summary](#get-rest-api-v1-insight-summary) | Get returns a Kubernetes resource summary by name, namespace, cluster, apiVersion and kind. | +| GET | /rest-api/v1/insight/topology | [get rest API v1 insight topology](#get-rest-api-v1-insight-topology) | GetTopology returns a topology map for a Kubernetes resource by name, namespace, cluster, apiVersion and kind. | + +### resourcegroup + +| Method | URI | Name | Summary | +| ------ | ---------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | ------------------------------------------- | +| GET | /rest-api/v1/resource-groups/{resourceGroupRuleName} | [get rest API v1 resource groups resource group rule name](#get-rest-api-v1-resource-groups-resource-group-rule-name) | List lists all ResourceGroups by rule name. | + +### resourcegrouprule + +| Method | URI | Name | Summary | +| ------ | -------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------ | +| DELETE | /rest-api/v1/resource-group-rule/{resourceGroupRuleName} | [delete rest API v1 resource group rule resource group rule name](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name) | Delete removes a ResourceGroupRule by name. | +| GET | /rest-api/v1/resource-group-rule/{resourceGroupRuleName} | [get rest API v1 resource group rule resource group rule name](#get-rest-api-v1-resource-group-rule-resource-group-rule-name) | Get returns a ResourceGroupRule by name. | +| GET | /rest-api/v1/resource-group-rules | [get rest API v1 resource group rules](#get-rest-api-v1-resource-group-rules) | List lists all ResourceGroupRules. | +| POST | /rest-api/v1/resource-group-rule | [post rest API v1 resource group rule](#post-rest-api-v1-resource-group-rule) | Create creates a ResourceGroupRule. | +| PUT | /rest-api/v1/resource-group-rule | [put rest API v1 resource group rule](#put-rest-api-v1-resource-group-rule) | Update updates the ResourceGroupRule metadata by name. | + +### search + +| Method | URI | Name | Summary | +| ------ | ------------------- | ------------------------------------------------- | ----------------------------------------------------------------------------------------------------- | +| GET | /rest-api/v1/search | [get rest API v1 search](#get-rest-api-v1-search) | SearchForResource returns an array of Kubernetes runtime Object matched using the query from context. | + +## Paths + +### Delete removes a cluster resource by name. (*DeleteRestAPIV1ClusterClusterName*) + +``` +DELETE /rest-api/v1/cluster/{clusterName} +``` + +This endpoint deletes the cluster resource by name. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ----------- | ------ | ------ | -------- | --------- | :------: | ------- | ----------------------- | +| clusterName | `path` | string | `string` | | ✓ | | The name of the cluster | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| --------------------------------------------------- | --------------------- | --------------------- | :---------: | ------------------------------------------------------------- | +| [200](#delete-rest-api-v1-cluster-cluster-name-200) | OK | Operation status | | [schema](#delete-rest-api-v1-cluster-cluster-name-200-schema) | +| [400](#delete-rest-api-v1-cluster-cluster-name-400) | Bad Request | Bad Request | | [schema](#delete-rest-api-v1-cluster-cluster-name-400-schema) | +| [401](#delete-rest-api-v1-cluster-cluster-name-401) | Unauthorized | Unauthorized | | [schema](#delete-rest-api-v1-cluster-cluster-name-401-schema) | +| [404](#delete-rest-api-v1-cluster-cluster-name-404) | Not Found | Not Found | | [schema](#delete-rest-api-v1-cluster-cluster-name-404-schema) | +| [405](#delete-rest-api-v1-cluster-cluster-name-405) | Method Not Allowed | Method Not Allowed | | [schema](#delete-rest-api-v1-cluster-cluster-name-405-schema) | +| [429](#delete-rest-api-v1-cluster-cluster-name-429) | Too Many Requests | Too Many Requests | | [schema](#delete-rest-api-v1-cluster-cluster-name-429-schema) | +| [500](#delete-rest-api-v1-cluster-cluster-name-500) | Internal Server Error | Internal Server Error | | [schema](#delete-rest-api-v1-cluster-cluster-name-500-schema) | + +#### Responses + +##### 200 - Operation status + +Status: OK + +###### Schema + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### Delete removes a ResourceGroupRule by name. (*DeleteRestAPIV1ResourceGroupRuleResourceGroupRuleName*) + +``` +DELETE /rest-api/v1/resource-group-rule/{resourceGroupRuleName} +``` + +This endpoint deletes the ResourceGroupRule by name. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| --------------------- | ------ | ------ | -------- | --------- | :------: | ------- | ----------------------------------- | +| resourceGroupRuleName | `path` | string | `string` | | ✓ | | The name of the resource group rule | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| --------------------------------------------------------------------------- | --------------------- | --------------------- | :---------: | ------------------------------------------------------------------------------------- | +| [200](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-200) | OK | Operation status | | [schema](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-200-schema) | +| [400](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-400) | Bad Request | Bad Request | | [schema](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-400-schema) | +| [401](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-401) | Unauthorized | Unauthorized | | [schema](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-401-schema) | +| [404](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-404) | Not Found | Not Found | | [schema](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-404-schema) | +| [405](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-405) | Method Not Allowed | Method Not Allowed | | [schema](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-405-schema) | +| [429](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-429) | Too Many Requests | Too Many Requests | | [schema](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-429-schema) | +| [500](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-500) | Internal Server Error | Internal Server Error | | [schema](#delete-rest-api-v1-resource-group-rule-resource-group-rule-name-500-schema) | + +#### Responses + +##### 200 - Operation status + +Status: OK + +###### Schema + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### List all available endpoints (*GetEndpoints*) + +``` +GET /endpoints +``` + +List all registered endpoints in the router + +#### Consumes + +* text/plain + +#### Produces + +* text/plain + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ------------------------- | ------ | ----------------------------- | :---------: | ----------------------------------- | +| [200](#get-endpoints-200) | OK | Endpoints listed successfully | | [schema](#get-endpoints-200-schema) | + +#### Responses + +##### 200 - Endpoints listed successfully + +Status: OK + +###### Schema + +### Get returns a cluster resource by name. (*GetRestAPIV1ClusterClusterName*) + +``` +GET /rest-api/v1/cluster/{clusterName} +``` + +This endpoint returns a cluster resource by name. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ----------- | ------- | ------ | -------- | --------- | :------: | ------- | -------------------------------------------------- | +| clusterName | `path` | string | `string` | | ✓ | | The name of the cluster | +| format | `query` | string | `string` | | | | The format of the response. Either in json or yaml | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ------------------------------------------------ | --------------------- | --------------------- | :---------: | ---------------------------------------------------------- | +| [200](#get-rest-api-v1-cluster-cluster-name-200) | OK | Unstructured object | | [schema](#get-rest-api-v1-cluster-cluster-name-200-schema) | +| [400](#get-rest-api-v1-cluster-cluster-name-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-cluster-cluster-name-400-schema) | +| [401](#get-rest-api-v1-cluster-cluster-name-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-cluster-cluster-name-401-schema) | +| [404](#get-rest-api-v1-cluster-cluster-name-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-cluster-cluster-name-404-schema) | +| [405](#get-rest-api-v1-cluster-cluster-name-405) | Method Not Allowed | Method Not Allowed | | [schema](#get-rest-api-v1-cluster-cluster-name-405-schema) | +| [429](#get-rest-api-v1-cluster-cluster-name-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-cluster-cluster-name-429-schema) | +| [500](#get-rest-api-v1-cluster-cluster-name-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-cluster-cluster-name-500-schema) | + +#### Responses + +##### 200 - Unstructured object + +Status: OK + +###### Schema + +[UnstructuredUnstructured](#unstructured-unstructured) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### List lists all cluster resources. (*GetRestAPIV1Clusters*) + +``` +GET /rest-api/v1/clusters +``` + +This endpoint lists all cluster resources. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ---------- | ------- | ------- | -------- | --------- | :------: | ------- | -------------------------------------------------------------- | +| descending | `query` | boolean | `bool` | | | | Whether to sort the list in descending order. Default to false | +| orderBy | `query` | string | `string` | | | | The order to list the cluster. Default to order by name | +| summary | `query` | boolean | `bool` | | | | Whether to display summary or not. Default to false | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ------------------------------------ | --------------------- | ----------------------- | :---------: | ---------------------------------------------- | +| [200](#get-rest-api-v1-clusters-200) | OK | List of cluster objects | | [schema](#get-rest-api-v1-clusters-200-schema) | +| [400](#get-rest-api-v1-clusters-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-clusters-400-schema) | +| [401](#get-rest-api-v1-clusters-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-clusters-401-schema) | +| [404](#get-rest-api-v1-clusters-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-clusters-404-schema) | +| [405](#get-rest-api-v1-clusters-405) | Method Not Allowed | Method Not Allowed | | [schema](#get-rest-api-v1-clusters-405-schema) | +| [429](#get-rest-api-v1-clusters-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-clusters-429-schema) | +| [500](#get-rest-api-v1-clusters-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-clusters-500-schema) | + +#### Responses + +##### 200 - List of cluster objects + +Status: OK + +###### Schema + +[][UnstructuredUnstructured](#unstructured-unstructured) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### Audit based on resource group. (*GetRestAPIV1InsightAudit*) + +``` +GET /rest-api/v1/insight/audit +``` + +This endpoint audits based on the specified resource group. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ---------- | ------- | ------- | -------- | --------- | :------: | ------- | ----------------------------------------------------- | +| apiVersion | `query` | string | `string` | | | | The specified apiVersion, such as 'apps/v1' | +| cluster | `query` | string | `string` | | | | The specified cluster name, such as 'example-cluster' | +| forceNew | `query` | boolean | `bool` | | | | Switch for forced scanning, default is 'false' | +| kind | `query` | string | `string` | | | | The specified kind, such as 'Deployment' | +| name | `query` | string | `string` | | | | The specified resource name, such as 'foo' | +| namespace | `query` | string | `string` | | | | The specified namespace, such as 'default' | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ----------------------------------------- | --------------------- | --------------------- | :---------: | --------------------------------------------------- | +| [200](#get-rest-api-v1-insight-audit-200) | OK | Audit results | | [schema](#get-rest-api-v1-insight-audit-200-schema) | +| [400](#get-rest-api-v1-insight-audit-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-insight-audit-400-schema) | +| [401](#get-rest-api-v1-insight-audit-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-insight-audit-401-schema) | +| [404](#get-rest-api-v1-insight-audit-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-insight-audit-404-schema) | +| [429](#get-rest-api-v1-insight-audit-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-insight-audit-429-schema) | +| [500](#get-rest-api-v1-insight-audit-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-insight-audit-500-schema) | + +#### Responses + +##### 200 - Audit results + +Status: OK + +###### Schema + +[ScannerAuditData](#scanner-audit-data) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### GetDetail returns a Kubernetes resource by name, namespace, cluster, apiVersion and kind. (*GetRestAPIV1InsightDetail*) + +``` +GET /rest-api/v1/insight/detail +``` + +This endpoint returns a Kubernetes resource by name, namespace, cluster, apiVersion and kind. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ---------- | ------- | ------ | -------- | --------- | :------: | ------- | ---------------------------------------------------------------------- | +| apiVersion | `query` | string | `string` | | | | The specified apiVersion, such as 'apps/v1'. Should be percent-encoded | +| cluster | `query` | string | `string` | | | | The specified cluster name, such as 'example-cluster' | +| format | `query` | string | `string` | | | | The format of the response. Either in json or yaml. Default to json | +| kind | `query` | string | `string` | | | | The specified kind, such as 'Deployment' | +| name | `query` | string | `string` | | | | The specified resource name, such as 'foo' | +| namespace | `query` | string | `string` | | | | The specified namespace, such as 'default' | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ------------------------------------------ | --------------------- | --------------------- | :---------: | ---------------------------------------------------- | +| [200](#get-rest-api-v1-insight-detail-200) | OK | Unstructured object | | [schema](#get-rest-api-v1-insight-detail-200-schema) | +| [400](#get-rest-api-v1-insight-detail-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-insight-detail-400-schema) | +| [401](#get-rest-api-v1-insight-detail-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-insight-detail-401-schema) | +| [404](#get-rest-api-v1-insight-detail-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-insight-detail-404-schema) | +| [405](#get-rest-api-v1-insight-detail-405) | Method Not Allowed | Method Not Allowed | | [schema](#get-rest-api-v1-insight-detail-405-schema) | +| [429](#get-rest-api-v1-insight-detail-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-insight-detail-429-schema) | +| [500](#get-rest-api-v1-insight-detail-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-insight-detail-500-schema) | + +#### Responses + +##### 200 - Unstructured object + +Status: OK + +###### Schema + +[UnstructuredUnstructured](#unstructured-unstructured) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### GetEvents returns events for a Kubernetes resource by name, namespace, cluster, apiVersion and kind. (*GetRestAPIV1InsightEvents*) + +``` +GET /rest-api/v1/insight/events +``` + +This endpoint returns events for a Kubernetes resource YAML by name, namespace, cluster, apiVersion and kind. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ---------- | ------- | ------ | -------- | --------- | :------: | ------- | ---------------------------------------------------------------------- | +| apiVersion | `query` | string | `string` | | | | The specified apiVersion, such as 'apps/v1'. Should be percent-encoded | +| cluster | `query` | string | `string` | | | | The specified cluster name, such as 'example-cluster' | +| kind | `query` | string | `string` | | | | The specified kind, such as 'Deployment' | +| name | `query` | string | `string` | | | | The specified resource name, such as 'foo' | +| namespace | `query` | string | `string` | | | | The specified namespace, such as 'default' | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ------------------------------------------ | --------------------- | --------------------- | :---------: | ---------------------------------------------------- | +| [200](#get-rest-api-v1-insight-events-200) | OK | List of events | | [schema](#get-rest-api-v1-insight-events-200-schema) | +| [400](#get-rest-api-v1-insight-events-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-insight-events-400-schema) | +| [401](#get-rest-api-v1-insight-events-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-insight-events-401-schema) | +| [404](#get-rest-api-v1-insight-events-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-insight-events-404-schema) | +| [405](#get-rest-api-v1-insight-events-405) | Method Not Allowed | Method Not Allowed | | [schema](#get-rest-api-v1-insight-events-405-schema) | +| [429](#get-rest-api-v1-insight-events-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-insight-events-429-schema) | +| [500](#get-rest-api-v1-insight-events-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-insight-events-500-schema) | + +#### Responses + +##### 200 - List of events + +Status: OK + +###### Schema + +[][UnstructuredUnstructured](#unstructured-unstructured) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### ScoreHandler calculates a score for the audited manifest. (*GetRestAPIV1InsightScore*) + +``` +GET /rest-api/v1/insight/score +``` + +This endpoint calculates a score for the provided manifest based on the number and severity of issues detected during the audit. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ---------- | ------- | ------- | -------- | --------- | :------: | ------- | ----------------------------------------------------- | +| apiVersion | `query` | string | `string` | | | | The specified apiVersion, such as 'apps/v1' | +| cluster | `query` | string | `string` | | | | The specified cluster name, such as 'example-cluster' | +| forceNew | `query` | boolean | `bool` | | | | Switch for forced compute score, default is 'false' | +| kind | `query` | string | `string` | | | | The specified kind, such as 'Deployment' | +| name | `query` | string | `string` | | | | The specified resource name, such as 'foo' | +| namespace | `query` | string | `string` | | | | The specified namespace, such as 'default' | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ----------------------------------------- | --------------------- | ------------------------ | :---------: | --------------------------------------------------- | +| [200](#get-rest-api-v1-insight-score-200) | OK | Score calculation result | | [schema](#get-rest-api-v1-insight-score-200-schema) | +| [400](#get-rest-api-v1-insight-score-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-insight-score-400-schema) | +| [401](#get-rest-api-v1-insight-score-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-insight-score-401-schema) | +| [404](#get-rest-api-v1-insight-score-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-insight-score-404-schema) | +| [429](#get-rest-api-v1-insight-score-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-insight-score-429-schema) | +| [500](#get-rest-api-v1-insight-score-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-insight-score-500-schema) | + +#### Responses + +##### 200 - Score calculation result + +Status: OK + +###### Schema + +[InsightScoreData](#insight-score-data) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### Get returns a global statistics info. (*GetRestAPIV1InsightStats*) + +``` +GET /rest-api/v1/insight/stats +``` + +This endpoint returns a global statistics info. + +#### Produces + +* application/json + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ----------------------------------------- | --------------------- | ---------------------- | :---------: | --------------------------------------------------- | +| [200](#get-rest-api-v1-insight-stats-200) | OK | Global statistics info | | [schema](#get-rest-api-v1-insight-stats-200-schema) | +| [400](#get-rest-api-v1-insight-stats-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-insight-stats-400-schema) | +| [401](#get-rest-api-v1-insight-stats-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-insight-stats-401-schema) | +| [404](#get-rest-api-v1-insight-stats-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-insight-stats-404-schema) | +| [405](#get-rest-api-v1-insight-stats-405) | Method Not Allowed | Method Not Allowed | | [schema](#get-rest-api-v1-insight-stats-405-schema) | +| [429](#get-rest-api-v1-insight-stats-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-insight-stats-429-schema) | +| [500](#get-rest-api-v1-insight-stats-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-insight-stats-500-schema) | + +#### Responses + +##### 200 - Global statistics info + +Status: OK + +###### Schema + +[InsightStatistics](#insight-statistics) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### Get returns a Kubernetes resource summary by name, namespace, cluster, apiVersion and kind. (*GetRestAPIV1InsightSummary*) + +``` +GET /rest-api/v1/insight/summary +``` + +This endpoint returns a Kubernetes resource summary by name, namespace, cluster, apiVersion and kind. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ---------- | ------- | ------ | -------- | --------- | :------: | ------- | ---------------------------------------------------------------------- | +| apiVersion | `query` | string | `string` | | | | The specified apiVersion, such as 'apps/v1'. Should be percent-encoded | +| cluster | `query` | string | `string` | | | | The specified cluster name, such as 'example-cluster' | +| kind | `query` | string | `string` | | | | The specified kind, such as 'Deployment' | +| name | `query` | string | `string` | | | | The specified resource name, such as 'foo' | +| namespace | `query` | string | `string` | | | | The specified namespace, such as 'default' | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ------------------------------------------- | --------------------- | --------------------- | :---------: | ----------------------------------------------------- | +| [200](#get-rest-api-v1-insight-summary-200) | OK | Resource Summary | | [schema](#get-rest-api-v1-insight-summary-200-schema) | +| [400](#get-rest-api-v1-insight-summary-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-insight-summary-400-schema) | +| [401](#get-rest-api-v1-insight-summary-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-insight-summary-401-schema) | +| [404](#get-rest-api-v1-insight-summary-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-insight-summary-404-schema) | +| [405](#get-rest-api-v1-insight-summary-405) | Method Not Allowed | Method Not Allowed | | [schema](#get-rest-api-v1-insight-summary-405-schema) | +| [429](#get-rest-api-v1-insight-summary-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-insight-summary-429-schema) | +| [500](#get-rest-api-v1-insight-summary-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-insight-summary-500-schema) | + +#### Responses + +##### 200 - Resource Summary + +Status: OK + +###### Schema + +[InsightResourceSummary](#insight-resource-summary) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### GetTopology returns a topology map for a Kubernetes resource by name, namespace, cluster, apiVersion and kind. (*GetRestAPIV1InsightTopology*) + +``` +GET /rest-api/v1/insight/topology +``` + +This endpoint returns a topology map for a Kubernetes resource by name, namespace, cluster, apiVersion and kind. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ---------- | ------- | ------- | -------- | --------- | :------: | ------- | ---------------------------------------------------------------------- | +| apiVersion | `query` | string | `string` | | | | The specified apiVersion, such as 'apps/v1'. Should be percent-encoded | +| cluster | `query` | string | `string` | | | | The specified cluster name, such as 'example-cluster' | +| forceNew | `query` | boolean | `bool` | | | | Force re-generating the topology, default is 'false' | +| kind | `query` | string | `string` | | | | The specified kind, such as 'Deployment' | +| name | `query` | string | `string` | | | | The specified resource name, such as 'foo' | +| namespace | `query` | string | `string` | | | | The specified namespace, such as 'default' | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| -------------------------------------------- | --------------------- | -------------------------------------------- | :---------: | ------------------------------------------------------ | +| [200](#get-rest-api-v1-insight-topology-200) | OK | map from string to resource.ResourceTopology | | [schema](#get-rest-api-v1-insight-topology-200-schema) | +| [400](#get-rest-api-v1-insight-topology-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-insight-topology-400-schema) | +| [401](#get-rest-api-v1-insight-topology-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-insight-topology-401-schema) | +| [404](#get-rest-api-v1-insight-topology-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-insight-topology-404-schema) | +| [405](#get-rest-api-v1-insight-topology-405) | Method Not Allowed | Method Not Allowed | | [schema](#get-rest-api-v1-insight-topology-405-schema) | +| [429](#get-rest-api-v1-insight-topology-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-insight-topology-429-schema) | +| [500](#get-rest-api-v1-insight-topology-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-insight-topology-500-schema) | + +#### Responses + +##### 200 - map from string to resource.ResourceTopology + +Status: OK + +###### Schema + +map of [InsightResourceTopology](#insight-resource-topology) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### Get returns a ResourceGroupRule by name. (*GetRestAPIV1ResourceGroupRuleResourceGroupRuleName*) + +``` +GET /rest-api/v1/resource-group-rule/{resourceGroupRuleName} +``` + +This endpoint returns a ResourceGroupRule by name. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| --------------------- | ------ | ------ | -------- | --------- | :------: | ------- | ----------------------------------- | +| resourceGroupRuleName | `path` | string | `string` | | ✓ | | The name of the resource group rule | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ------------------------------------------------------------------------ | --------------------- | --------------------- | :---------: | ---------------------------------------------------------------------------------- | +| [200](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-200) | OK | Unstructured object | | [schema](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-200-schema) | +| [400](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-400-schema) | +| [401](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-401-schema) | +| [404](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-404-schema) | +| [405](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-405) | Method Not Allowed | Method Not Allowed | | [schema](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-405-schema) | +| [429](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-429-schema) | +| [500](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-resource-group-rule-resource-group-rule-name-500-schema) | + +#### Responses + +##### 200 - Unstructured object + +Status: OK + +###### Schema + +[UnstructuredUnstructured](#unstructured-unstructured) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### List lists all ResourceGroupRules. (*GetRestAPIV1ResourceGroupRules*) + +``` +GET /rest-api/v1/resource-group-rules +``` + +This endpoint lists all ResourceGroupRules. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ---------- | ------- | ------- | -------- | --------- | :------: | ------- | ----------------------------------------------------------------- | +| descending | `query` | boolean | `bool` | | | | Whether to sort the list in descending order. Default to false | +| orderBy | `query` | string | `string` | | | | The order to list the resourceGroupRule. Default to order by name | +| summary | `query` | boolean | `bool` | | | | Whether to display summary or not. Default to false | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ------------------------------------------------ | --------------------- | --------------------------------- | :---------: | ---------------------------------------------------------- | +| [200](#get-rest-api-v1-resource-group-rules-200) | OK | List of resourceGroupRule objects | | [schema](#get-rest-api-v1-resource-group-rules-200-schema) | +| [400](#get-rest-api-v1-resource-group-rules-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-resource-group-rules-400-schema) | +| [401](#get-rest-api-v1-resource-group-rules-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-resource-group-rules-401-schema) | +| [404](#get-rest-api-v1-resource-group-rules-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-resource-group-rules-404-schema) | +| [405](#get-rest-api-v1-resource-group-rules-405) | Method Not Allowed | Method Not Allowed | | [schema](#get-rest-api-v1-resource-group-rules-405-schema) | +| [429](#get-rest-api-v1-resource-group-rules-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-resource-group-rules-429-schema) | +| [500](#get-rest-api-v1-resource-group-rules-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-resource-group-rules-500-schema) | + +#### Responses + +##### 200 - List of resourceGroupRule objects + +Status: OK + +###### Schema + +[][UnstructuredUnstructured](#unstructured-unstructured) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### List lists all ResourceGroups by rule name. (*GetRestAPIV1ResourceGroupsResourceGroupRuleName*) + +``` +GET /rest-api/v1/resource-groups/{resourceGroupRuleName} +``` + +This endpoint lists all ResourceGroups. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| --------------------- | ------ | ------ | -------- | --------- | :------: | ------- | ----------------------------------- | +| resourceGroupRuleName | `path` | string | `string` | | ✓ | | The name of the resource group rule | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| -------------------------------------------------------------------- | --------------------- | ----------------------------- | :---------: | ------------------------------------------------------------------------------ | +| [200](#get-rest-api-v1-resource-groups-resource-group-rule-name-200) | OK | List of resourceGroup objects | | [schema](#get-rest-api-v1-resource-groups-resource-group-rule-name-200-schema) | +| [400](#get-rest-api-v1-resource-groups-resource-group-rule-name-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-resource-groups-resource-group-rule-name-400-schema) | +| [401](#get-rest-api-v1-resource-groups-resource-group-rule-name-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-resource-groups-resource-group-rule-name-401-schema) | +| [404](#get-rest-api-v1-resource-groups-resource-group-rule-name-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-resource-groups-resource-group-rule-name-404-schema) | +| [405](#get-rest-api-v1-resource-groups-resource-group-rule-name-405) | Method Not Allowed | Method Not Allowed | | [schema](#get-rest-api-v1-resource-groups-resource-group-rule-name-405-schema) | +| [429](#get-rest-api-v1-resource-groups-resource-group-rule-name-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-resource-groups-resource-group-rule-name-429-schema) | +| [500](#get-rest-api-v1-resource-groups-resource-group-rule-name-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-resource-groups-resource-group-rule-name-500-schema) | + +#### Responses + +##### 200 - List of resourceGroup objects + +Status: OK + +###### Schema + +[][UnstructuredUnstructured](#unstructured-unstructured) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### SearchForResource returns an array of Kubernetes runtime Object matched using the query from context. (*GetRestAPIV1Search*) + +``` +GET /rest-api/v1/search +``` + +This endpoint returns an array of Kubernetes runtime Object matched using the query from context. + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| -------- | ------- | ------ | -------- | --------- | :------: | ------- | ------------------------------------------------------ | +| page | `query` | string | `string` | | | | The current page to fetch. Default to 1 | +| pageSize | `query` | string | `string` | | | | The size of the page. Default to 10 | +| pattern | `query` | string | `string` | | ✓ | | The search pattern. Can be either sql or dsl. Required | +| query | `query` | string | `string` | | ✓ | | The query to use for search. Required | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ---------------------------------- | --------------------- | ----------------------- | :---------: | -------------------------------------------- | +| [200](#get-rest-api-v1-search-200) | OK | Array of runtime.Object | | [schema](#get-rest-api-v1-search-200-schema) | +| [400](#get-rest-api-v1-search-400) | Bad Request | Bad Request | | [schema](#get-rest-api-v1-search-400-schema) | +| [401](#get-rest-api-v1-search-401) | Unauthorized | Unauthorized | | [schema](#get-rest-api-v1-search-401-schema) | +| [404](#get-rest-api-v1-search-404) | Not Found | Not Found | | [schema](#get-rest-api-v1-search-404-schema) | +| [405](#get-rest-api-v1-search-405) | Method Not Allowed | Method Not Allowed | | [schema](#get-rest-api-v1-search-405-schema) | +| [429](#get-rest-api-v1-search-429) | Too Many Requests | Too Many Requests | | [schema](#get-rest-api-v1-search-429-schema) | +| [500](#get-rest-api-v1-search-500) | Internal Server Error | Internal Server Error | | [schema](#get-rest-api-v1-search-500-schema) | + +#### Responses + +##### 200 - Array of runtime.Object + +Status: OK + +###### Schema + +[][interface{}](#interface) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### Create creates a cluster resource. (*PostRestAPIV1ClusterClusterName*) + +``` +POST /rest-api/v1/cluster/{clusterName} +``` + +This endpoint creates a new cluster resource using the payload. + +#### Consumes + +* application/json +* text/plain + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ----------- | ------ | ------------------------------------------------- | ------------------------------ | --------- | :------: | ------- | ---------------------------------------------------- | +| clusterName | `path` | string | `string` | | ✓ | | The name of the cluster | +| request | `body` | [ClusterClusterPayload](#cluster-cluster-payload) | `models.ClusterClusterPayload` | | ✓ | | cluster to create (either plain text or JSON format) | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ------------------------------------------------- | --------------------- | --------------------- | :---------: | ----------------------------------------------------------- | +| [200](#post-rest-api-v1-cluster-cluster-name-200) | OK | Unstructured object | | [schema](#post-rest-api-v1-cluster-cluster-name-200-schema) | +| [400](#post-rest-api-v1-cluster-cluster-name-400) | Bad Request | Bad Request | | [schema](#post-rest-api-v1-cluster-cluster-name-400-schema) | +| [401](#post-rest-api-v1-cluster-cluster-name-401) | Unauthorized | Unauthorized | | [schema](#post-rest-api-v1-cluster-cluster-name-401-schema) | +| [404](#post-rest-api-v1-cluster-cluster-name-404) | Not Found | Not Found | | [schema](#post-rest-api-v1-cluster-cluster-name-404-schema) | +| [405](#post-rest-api-v1-cluster-cluster-name-405) | Method Not Allowed | Method Not Allowed | | [schema](#post-rest-api-v1-cluster-cluster-name-405-schema) | +| [429](#post-rest-api-v1-cluster-cluster-name-429) | Too Many Requests | Too Many Requests | | [schema](#post-rest-api-v1-cluster-cluster-name-429-schema) | +| [500](#post-rest-api-v1-cluster-cluster-name-500) | Internal Server Error | Internal Server Error | | [schema](#post-rest-api-v1-cluster-cluster-name-500-schema) | + +#### Responses + +##### 200 - Unstructured object + +Status: OK + +###### Schema + +[UnstructuredUnstructured](#unstructured-unstructured) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### Upload kubeConfig file for cluster (*PostRestAPIV1ClusterConfigFile*) + +``` +POST /rest-api/v1/cluster/config/file +``` + +Uploads a KubeConfig file for cluster, with a maximum size of 2MB. + +#### Consumes + +* multipart/form-data + +#### Produces + +* text/plain + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ----------- | ---------- | ------ | --------------- | --------- | :------: | ------- | ---------------------------------- | +| description | `formData` | string | `string` | | ✓ | | cluster description | +| displayName | `formData` | string | `string` | | ✓ | | cluster display name | +| file | `formData` | file | `io.ReadCloser` | | ✓ | | Upload file with field name 'file' | +| name | `formData` | string | `string` | | ✓ | | cluster name | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ------------------------------------------------ | --------------------- | --------------------------------------------------------- | :---------: | ---------------------------------------------------------- | +| [200](#post-rest-api-v1-cluster-config-file-200) | OK | Returns the content of the uploaded KubeConfig file. | | [schema](#post-rest-api-v1-cluster-config-file-200-schema) | +| [400](#post-rest-api-v1-cluster-config-file-400) | Bad Request | The uploaded file is too large or the request is invalid. | | [schema](#post-rest-api-v1-cluster-config-file-400-schema) | +| [500](#post-rest-api-v1-cluster-config-file-500) | Internal Server Error | Internal server error. | | [schema](#post-rest-api-v1-cluster-config-file-500-schema) | + +#### Responses + +##### 200 - Returns the content of the uploaded KubeConfig file. + +Status: OK + +###### Schema + +[ClusterUploadData](#cluster-upload-data) + +##### 400 - The uploaded file is too large or the request is invalid. + +Status: Bad Request + +###### Schema + +##### 500 - Internal server error. + +Status: Internal Server Error + +###### Schema + +### Validate KubeConfig (*PostRestAPIV1ClusterConfigValidate*) + +``` +POST /rest-api/v1/cluster/config/validate +``` + +Validates the provided KubeConfig using cluster manager methods. + +#### Consumes + +* application/json +* text/plain + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ------- | ------ | --------------------------------------------------- | ------------------------------- | --------- | :------: | ------- | ------------------------------ | +| request | `body` | [ClusterValidatePayload](#cluster-validate-payload) | `models.ClusterValidatePayload` | | ✓ | | KubeConfig payload to validate | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ---------------------------------------------------- | --------------------- | ---------------------------------- | :---------: | -------------------------------------------------------------- | +| [200](#post-rest-api-v1-cluster-config-validate-200) | OK | Verification passed server version | | [schema](#post-rest-api-v1-cluster-config-validate-200-schema) | +| [400](#post-rest-api-v1-cluster-config-validate-400) | Bad Request | Bad Request | | [schema](#post-rest-api-v1-cluster-config-validate-400-schema) | +| [401](#post-rest-api-v1-cluster-config-validate-401) | Unauthorized | Unauthorized | | [schema](#post-rest-api-v1-cluster-config-validate-401-schema) | +| [404](#post-rest-api-v1-cluster-config-validate-404) | Not Found | Not Found | | [schema](#post-rest-api-v1-cluster-config-validate-404-schema) | +| [429](#post-rest-api-v1-cluster-config-validate-429) | Too Many Requests | Too Many Requests | | [schema](#post-rest-api-v1-cluster-config-validate-429-schema) | +| [500](#post-rest-api-v1-cluster-config-validate-500) | Internal Server Error | Internal Server Error | | [schema](#post-rest-api-v1-cluster-config-validate-500-schema) | + +#### Responses + +##### 200 - Verification passed server version + +Status: OK + +###### Schema + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### Create creates a ResourceGroupRule. (*PostRestAPIV1ResourceGroupRule*) + +``` +POST /rest-api/v1/resource-group-rule +``` + +This endpoint creates a new ResourceGroupRule using the payload. + +#### Consumes + +* application/json +* text/plain + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ------- | ------ | ------------------------------------------------------------------------------------------- | -------------------------------------------------- | --------- | :------: | ------- | -------------------------------------------------------------- | +| request | `body` | [ResourcegroupruleResourceGroupRulePayload](#resourcegrouprule-resource-group-rule-payload) | `models.ResourcegroupruleResourceGroupRulePayload` | | ✓ | | resourceGroupRule to create (either plain text or JSON format) | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ------------------------------------------------ | --------------------- | --------------------- | :---------: | ---------------------------------------------------------- | +| [200](#post-rest-api-v1-resource-group-rule-200) | OK | Unstructured object | | [schema](#post-rest-api-v1-resource-group-rule-200-schema) | +| [400](#post-rest-api-v1-resource-group-rule-400) | Bad Request | Bad Request | | [schema](#post-rest-api-v1-resource-group-rule-400-schema) | +| [401](#post-rest-api-v1-resource-group-rule-401) | Unauthorized | Unauthorized | | [schema](#post-rest-api-v1-resource-group-rule-401-schema) | +| [404](#post-rest-api-v1-resource-group-rule-404) | Not Found | Not Found | | [schema](#post-rest-api-v1-resource-group-rule-404-schema) | +| [405](#post-rest-api-v1-resource-group-rule-405) | Method Not Allowed | Method Not Allowed | | [schema](#post-rest-api-v1-resource-group-rule-405-schema) | +| [429](#post-rest-api-v1-resource-group-rule-429) | Too Many Requests | Too Many Requests | | [schema](#post-rest-api-v1-resource-group-rule-429-schema) | +| [500](#post-rest-api-v1-resource-group-rule-500) | Internal Server Error | Internal Server Error | | [schema](#post-rest-api-v1-resource-group-rule-500-schema) | + +#### Responses + +##### 200 - Unstructured object + +Status: OK + +###### Schema + +[UnstructuredUnstructured](#unstructured-unstructured) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### Update updates the cluster metadata by name. (*PutRestAPIV1ClusterClusterName*) + +``` +PUT /rest-api/v1/cluster/{clusterName} +``` + +This endpoint updates the display name and description of an existing cluster resource. + +#### Consumes + +* application/json +* text/plain + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ----------- | ------ | ------------------------------------------------- | ------------------------------ | --------- | :------: | ------- | ---------------------------------------------------- | +| clusterName | `path` | string | `string` | | ✓ | | The name of the cluster | +| request | `body` | [ClusterClusterPayload](#cluster-cluster-payload) | `models.ClusterClusterPayload` | | ✓ | | cluster to update (either plain text or JSON format) | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ------------------------------------------------ | --------------------- | --------------------- | :---------: | ---------------------------------------------------------- | +| [200](#put-rest-api-v1-cluster-cluster-name-200) | OK | Unstructured object | | [schema](#put-rest-api-v1-cluster-cluster-name-200-schema) | +| [400](#put-rest-api-v1-cluster-cluster-name-400) | Bad Request | Bad Request | | [schema](#put-rest-api-v1-cluster-cluster-name-400-schema) | +| [401](#put-rest-api-v1-cluster-cluster-name-401) | Unauthorized | Unauthorized | | [schema](#put-rest-api-v1-cluster-cluster-name-401-schema) | +| [404](#put-rest-api-v1-cluster-cluster-name-404) | Not Found | Not Found | | [schema](#put-rest-api-v1-cluster-cluster-name-404-schema) | +| [405](#put-rest-api-v1-cluster-cluster-name-405) | Method Not Allowed | Method Not Allowed | | [schema](#put-rest-api-v1-cluster-cluster-name-405-schema) | +| [429](#put-rest-api-v1-cluster-cluster-name-429) | Too Many Requests | Too Many Requests | | [schema](#put-rest-api-v1-cluster-cluster-name-429-schema) | +| [500](#put-rest-api-v1-cluster-cluster-name-500) | Internal Server Error | Internal Server Error | | [schema](#put-rest-api-v1-cluster-cluster-name-500-schema) | + +#### Responses + +##### 200 - Unstructured object + +Status: OK + +###### Schema + +[UnstructuredUnstructured](#unstructured-unstructured) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +### Update updates the ResourceGroupRule metadata by name. (*PutRestAPIV1ResourceGroupRule*) + +``` +PUT /rest-api/v1/resource-group-rule +``` + +This endpoint updates the display name and description of an existing ResourceGroupRule. + +#### Consumes + +* application/json +* text/plain + +#### Produces + +* application/json + +#### Parameters + +| Name | Source | Type | Go type | Separator | Required | Default | Description | +| ------- | ------ | ------------------------------------------------------------------------------------------- | -------------------------------------------------- | --------- | :------: | ------- | -------------------------------------------------------------- | +| request | `body` | [ResourcegroupruleResourceGroupRulePayload](#resourcegrouprule-resource-group-rule-payload) | `models.ResourcegroupruleResourceGroupRulePayload` | | ✓ | | resourceGroupRule to update (either plain text or JSON format) | + +#### All responses + +| Code | Status | Description | Has headers | Schema | +| ----------------------------------------------- | --------------------- | --------------------- | :---------: | --------------------------------------------------------- | +| [200](#put-rest-api-v1-resource-group-rule-200) | OK | Unstructured object | | [schema](#put-rest-api-v1-resource-group-rule-200-schema) | +| [400](#put-rest-api-v1-resource-group-rule-400) | Bad Request | Bad Request | | [schema](#put-rest-api-v1-resource-group-rule-400-schema) | +| [401](#put-rest-api-v1-resource-group-rule-401) | Unauthorized | Unauthorized | | [schema](#put-rest-api-v1-resource-group-rule-401-schema) | +| [404](#put-rest-api-v1-resource-group-rule-404) | Not Found | Not Found | | [schema](#put-rest-api-v1-resource-group-rule-404-schema) | +| [405](#put-rest-api-v1-resource-group-rule-405) | Method Not Allowed | Method Not Allowed | | [schema](#put-rest-api-v1-resource-group-rule-405-schema) | +| [429](#put-rest-api-v1-resource-group-rule-429) | Too Many Requests | Too Many Requests | | [schema](#put-rest-api-v1-resource-group-rule-429-schema) | +| [500](#put-rest-api-v1-resource-group-rule-500) | Internal Server Error | Internal Server Error | | [schema](#put-rest-api-v1-resource-group-rule-500-schema) | + +#### Responses + +##### 200 - Unstructured object + +Status: OK + +###### Schema + +[UnstructuredUnstructured](#unstructured-unstructured) + +##### 400 - Bad Request + +Status: Bad Request + +###### Schema + +##### 401 - Unauthorized + +Status: Unauthorized + +###### Schema + +##### 404 - Not Found + +Status: Not Found + +###### Schema + +##### 405 - Method Not Allowed + +Status: Method Not Allowed + +###### Schema + +##### 429 - Too Many Requests + +Status: Too Many Requests + +###### Schema + +##### 500 - Internal Server Error + +Status: Internal Server Error + +###### Schema + +## Models + +### cluster.ClusterPayload + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| ----------- | ------ | -------- | :------: | ------- | --------------------------------------------------------------- | ------- | +| description | string | `string` | | | ClusterDescription is the description of cluster to be created | | +| displayName | string | `string` | | | ClusterDisplayName is the display name of cluster to be created | | +| kubeconfig | string | `string` | | | ClusterKubeConfig is the kubeconfig of cluster to be created | | + +### cluster.UploadData + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| ----------------------- | ------- | -------- | :------: | ------- | ----------- | ------- | +| content | string | `string` | | | | | +| fileName | string | `string` | | | | | +| fileSize | integer | `int64` | | | | | +| sanitizedClusterContent | string | `string` | | | | | + +### cluster.ValidatePayload + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| ---------- | ------ | -------- | :------: | ------- | ----------- | ------- | +| kubeConfig | string | `string` | | | | | + +### entity.ResourceGroup + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| ----------- | ------------- | ------------------- | :------: | ------- | ----------- | ------- | +| annotations | map of string | `map[string]string` | | | | | +| apiVersion | string | `string` | | | | | +| cluster | string | `string` | | | | | +| kind | string | `string` | | | | | +| labels | map of string | `map[string]string` | | | | | +| name | string | `string` | | | | | +| namespace | string | `string` | | | | | + +### insight.ResourceSummary + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| ----------------- | --------------------------------------------- | --------------------- | :------: | ------- | ----------- | ------- | +| creationTimestamp | string | `string` | | | | | +| resource | [EntityResourceGroup](#entity-resource-group) | `EntityResourceGroup` | | | | | +| resourceVersion | string | `string` | | | | | +| uid | string | `string` | | | | | + +### insight.ResourceTopology + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| ------------- | --------------------------------------------- | --------------------- | :------: | ------- | ----------- | ------- | +| children | []string | `[]string` | | | | | +| parents | []string | `[]string` | | | | | +| resourceGroup | [EntityResourceGroup](#entity-resource-group) | `EntityResourceGroup` | | | | | + +### insight.ScoreData + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| ------------------------------------------------------------------------- | -------------- | ------------------ | :------: | ------- | ----------------------------------------------------------------------- | ------- | +| issuesTotal | integer | `int64` | | | IssuesTotal is the total count of all issues found during the audit. | | +| This count can be used to understand the overall number of problems | | | | | | | +| that need to be addressed. | | | | | | | +| resourceTotal | integer | `int64` | | | ResourceTotal is the count of unique resources audited during the scan. | | +| score | number | `float64` | | | Score represents the calculated score of the audited manifest based on | | +| the number and severity of issues. It provides a quantitative measure | | | | | | | +| of the security posture of the resources in the manifest. | | | | | | | +| severityStatistic | map of integer | `map[string]int64` | | | SeverityStatistic is a mapping of severity levels to their respective | | +| number of occurrences. It allows for a quick overview of the distribution | | | | | | | +| of issues across different severity categories. | | | | | | | + +### insight.Statistics + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| ---------------------- | ------- | ------- | :------: | ------- | ----------- | ------- | +| clusterCount | integer | `int64` | | | | | +| resourceCount | integer | `int64` | | | | | +| resourceGroupRuleCount | integer | `int64` | | | | | + +### resourcegrouprule.ResourceGroupRulePayload + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| ----------- | -------- | ---------- | :------: | ------- | ----------- | ------- | +| description | string | `string` | | | | | +| fields | []string | `[]string` | | | | | +| name | string | `string` | | | | | + +### scanner.AuditData + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| ------------- | ------------------------------------------- | ---------------------- | :------: | ------- | ----------- | ------- | +| bySeverity | map of integer | `map[string]int64` | | | | | +| issueGroups | [][ScannerIssueGroup](#scanner-issue-group) | `[]*ScannerIssueGroup` | | | | | +| issueTotal | integer | `int64` | | | | | +| resourceTotal | integer | `int64` | | | | | + +### scanner.Issue + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| -------- | ------- | -------- | :------: | ------- | ------------------------------------------------------------------------------------- | ------- | +| message | string | `string` | | | Message provides a detailed human-readable description of the issue. | | +| scanner | string | `string` | | | Scanner is the name of the scanner that discovered the issue. | | +| severity | integer | `int64` | | | Severity indicates how critical the issue is, using the IssueSeverityLevel constants. | | +| title | string | `string` | | | Title is a brief summary of the issue. | | + +### scanner.IssueGroup + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| -------------- | ----------------------------------------------- | ------------------------ | :------: | ------- | ----------- | ------- | +| issue | [ScannerIssue](#scanner-issue) | `ScannerIssue` | | | | | +| resourceGroups | [][EntityResourceGroup](#entity-resource-group) | `[]*EntityResourceGroup` | | | | | + +### unstructured.Unstructured + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +| ---------------------- | ------------------------- | ------------- | :------: | ------- | -------------------------------------------------------------------------------- | ------- | +| object | [interface{}](#interface) | `interface{}` | | | Object is a JSON compatible map with string, float, int, bool, []interface{}, or | | +| map[string]interface{} | | | | | | | +| children. | | | | | | | diff --git a/karpor_versioned_docs/version-v0.5/5-references/3-search-methods.md b/karpor_versioned_docs/version-v0.5/5-references/3-search-methods.md new file mode 100644 index 00000000..e1e63903 --- /dev/null +++ b/karpor_versioned_docs/version-v0.5/5-references/3-search-methods.md @@ -0,0 +1,109 @@ +--- +title: Search Methods +--- +Karpor is an open-source project that offers robust capabilities for searching resources across multiple clusters. This document outlines the two main search methods supported by Karpor: DSL (Domain Specific Language) and SQL (Structured Query Language), and explains how to utilize them for resource searches. + +## Keywords + +Karpor facilitates resource searches using two methods: DSL and SQL. Both methodologies leverage the following keywords for resource discovery: + +- cluster +- apiVersion +- kind +- namespace +- name +- creationTimestamp +- deletionTimestamp +- ownerReferences +- resourceVersion +- labels.`key` +- annotations.`key` +- content + +## SQL + +Karpor offers a SQL-like approach for querying Kubernetes resources, enabling users to employ SQL syntax for their searches. Below are examples illustrating the use of SQL syntax for various search scenarios: + +**Query resources of the Namespace kind** + +```sql +select * from resources where kind='Namespace' +``` + +**Query resources where the labels contain the key 'key1' with value 'value1'** + +```sql +select * from resources where labels.key1='value1' +``` + +**Query resources where the annotations contain the key 'key1' with value 'value1'** + +```sql +select * from resources where annotations.key1='value1' +``` + +**Query resources that are not of the Pod kind** + +```sql +select * from resources where kind!='Pod' +``` + +**Query resources of the Pod kind within a specific cluster** + +```sql +select * from resources where cluster='demo' and kind='Pod' +``` + +**Query resources of kind within a specified list** + +```sql +select * from resources where kind in ('pod','service') +``` + +**Query resources of kinds not within a specified list** + +```sql +select * from resources where kind not in ('pod','service') +``` + +**Query resources where the namespace starts with appl (where % represents any number of characters)** + +```sql +select * from resources where namespace like 'appl%' +``` + +**Query resources where the namespace contains banan (where \_ represents any single character)** + +```sql +select * from resources where namespace like 'banan_' +``` + +**Query resources where the namespace does not start with appl** + +```sql +select * from resources where namespace not like 'appl%' +``` + +**Query resources where the namespace does not contain banan** + +```sql +select * from resources where namespace notlike 'banan_' +``` + +**Query resources of kind Deployment and created before January 1, 2024, at 18:00:00** + +```sql +select * from resources where kind='Deployment' and creationTimestamp < '2024-01-01T18:00:00Z' +``` + +**Query resources of kind Service and order by creation timestamp in descending order** + +```sql +select * from resources where kind='Service' order by creationTimestamp desc +``` + +**Query resources whose content contains apple** + +```sql +select * from resources where contains(content, 'apple') +``` diff --git a/karpor_versioned_docs/version-v0.5/5-references/_category_.json b/karpor_versioned_docs/version-v0.5/5-references/_category_.json new file mode 100644 index 00000000..1fd07096 --- /dev/null +++ b/karpor_versioned_docs/version-v0.5/5-references/_category_.json @@ -0,0 +1,3 @@ +{ + "label": "References" +} diff --git a/karpor_versioned_docs/version-v0.5/6-roadmap/README.md b/karpor_versioned_docs/version-v0.5/6-roadmap/README.md new file mode 100644 index 00000000..b8cf11a3 --- /dev/null +++ b/karpor_versioned_docs/version-v0.5/6-roadmap/README.md @@ -0,0 +1,18 @@ +--- +title: Roadmap +--- +Karpor is an emerging open-source project, and we are committed to diligently polishing it into a **small and beautiful, vendor-neutral, developer-friendly, community-driven** open-source project! 🚀 Moving forward, we will focus our efforts in the following areas: + +- Refine Karpor's **usability** to lower the barrier of entry and make it sufficiently "user-friendly." +- Strengthen Karpor's **reliability** to ensure it is dependable in production environments. +- Deepen the **ecosystem integration** with more community tools to ensure openness. +- Explore **AI + Karpor** to create more possibilities. +- Embrace the open-source community; we love the **open-source spirit**. If you're interested in open source, then start here! +- ...... + +Karpor follows the [Release Process and Cadence Guide](../4-developer-guide/2-conventions/1-release-process.md), but actions may not strictly adhere to the roadmap. We may adjust milestones based on feedback from community meetings and [GitHub issues](https://github.com/KusionStack/karpor/issues), expecting all community members to join the discussions. For final decisions, please refer to the [GitHub milestones](https://github.com/KusionStack/karpor/milestones). + +Below is the detailed roadmap, which we will continue to update ⬇️ + +- **2024 Roadmap**: [https://github.com/KusionStack/karpor/issues/273](https://github.com/KusionStack/karpor/issues/273) + diff --git a/karpor_versioned_sidebars/version-v0.4-sidebars.json b/karpor_versioned_sidebars/version-v0.4-sidebars.json new file mode 100644 index 00000000..6b358560 --- /dev/null +++ b/karpor_versioned_sidebars/version-v0.4-sidebars.json @@ -0,0 +1,8 @@ +{ + "karpor": [ + { + "type": "autogenerated", + "dirName": "." + } + ] +} diff --git a/karpor_versioned_sidebars/version-v0.5-sidebars.json b/karpor_versioned_sidebars/version-v0.5-sidebars.json new file mode 100644 index 00000000..6b358560 --- /dev/null +++ b/karpor_versioned_sidebars/version-v0.5-sidebars.json @@ -0,0 +1,8 @@ +{ + "karpor": [ + { + "type": "autogenerated", + "dirName": "." + } + ] +} diff --git a/karpor_versions.json b/karpor_versions.json new file mode 100644 index 00000000..88251a43 --- /dev/null +++ b/karpor_versions.json @@ -0,0 +1,4 @@ +[ + "v0.5", + "v0.4" +] diff --git a/kuperator_versioned_docs/version-v0.3/concepts/_category_.json b/kuperator_versioned_docs/version-v0.3/concepts/_category_.json new file mode 100644 index 00000000..1d3167d4 --- /dev/null +++ b/kuperator_versioned_docs/version-v0.3/concepts/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Concepts", + "position": 3 +} diff --git a/kuperator_versioned_docs/version-v0.3/concepts/podopslifecycle.md b/kuperator_versioned_docs/version-v0.3/concepts/podopslifecycle.md new file mode 100644 index 00000000..88068133 --- /dev/null +++ b/kuperator_versioned_docs/version-v0.3/concepts/podopslifecycle.md @@ -0,0 +1,232 @@ +--- +sidebar_position: 2 +--- + +# PodOpsLifecycle + +## Background + +Kubernetes provides a set of default controllers for workload management, such as StatefulSet, Deployment, and DaemonSet, which are responsible for Pod operations. +Meanwhile, application users may also have some services outside the Kubernetes cluster that are closely related to the Pod Lifecycle, including traffic routing, service discovery, or alert monitoring. +However, they face challenges in participating in the operational lifecycle of a Pod, even if they are connected to Kubernetes by developing a controller that watches the Pods. + +PodOpsLifecycle aims to offer Kubernetes administrators and developers finer-grained control over the entire lifecycle of a Pod. +It enables developers to execute necessary actions before, during, and after specific phases of a Pod operation. +For instance, removing the Pod's IP from the traffic route before initiating the Pod operation, performing the actual Pod operations, and adding it back after the Pod operation is completed to achieve a smooth and graceful Pod operation, and prevent any traffic loss. + +## Introduction + +In PodOpsLifecycle, participants are classified into two roles: `operation controllers` and `cooperation controllers`. +- **Operation controllers** are responsible for operating Pods, such as Deployments and StatefulSets from Kubernetes, and CollaSets from Kuperator which intend to scale, update, or recreate Pods. +- **Cooperation controllers** are sensitive with Pod status. They handle resources or configurations around Pods, which may include traffic controller, alert monitoring controller, etc. These controllers typically reconcile Kubernetes resources around Pods with external services, such as sync Pod IPs with the LB provider, or maintaining Pods' metadata with application monitoring system. + +The two types of controllers do not need to be aware of each other. All controllers are organized by PodOpsLifecycle. Additionally, KusionStack Kuperator introduces extra phases around the native Kubernetes Pod Lifecycle: ServiceAvailable, Preparing, and Completing. + +![pod-ops-lifecycle](/img/kuperator/concepts/podopslifecycle/pod-ops-lifecycle.png) + +- **Completing**: After a Pod is created or updated and becomes ready, Kuperator marks its PodOpsLifecycle as the `Completing` phase. During this phase, the Pod is in a ready condition, prompting cooperation controllers to perform actions such as registering the Pod IP in the traffic route. Once all cooperation controllers complete their tasks, Kuperator sets the PodOpsLifecycle to the `ServiceAvailable` phase. +- **ServiceAvailable**: This phase indicates that the Pod is in a normal state and ready to serve. If everything goes smoothly, the Pod remains in the `ServiceAvailable` phase until the next operation. +- **Preparing**: When an operation controller needs to operate the Pod, it triggers a new PodOpsLifecycle. The Pod then transitions from the `ServiceAvailable` phase to the `Preparing` phase. During this phase, the Pod is initially marked as Unready by setting ReadinessGate to false. All cooperation controllers then begin preparing tasks, such as removing the Pod's IP from the traffic route. After completing these tasks, the Pod enters the `Operating` phase. +- **Operating**: If a Pod enters the `Operating` phase, it is expected to accept any kind of operation without any damage, including recreation, scaling-in, upgrading, etc. Operation controllers are permitted to apply any changes to this Pod. Once all these operations are completed, the Pod advances to the next phase — `Completing`, and the PodOpsLifecycle continues. + +The PodOpsLifecycle detail and the relationship with Kubernetes native Pod Lifecycle is showed by following sequence diagram. + +![pod-ops-lifecycle-sequence-diagram](/img/kuperator/concepts/podopslifecycle/pod-ops-lifecycle-sequence-diagram.png) + +## Developer's Guide + +This section introduces how to develop operation controllers and cooperation controllers to interact with PodOpsLifecycle. +- The operation controller is responsible for a set of Pod operation tasks. KusionStack Kuperator has already provided various types of operation controllers. Users only need to develop a new operation controller if a new kind of Pod operation needs to be added. +- The cooperation controller participates in PodOpsLifecycle before and after operating on a Pod, such as the Traffic controller, alert monitoring controller, and other controllers responsible for maintaining the Pod and application status. Users should develop a new cooperation controller only when there is a new type of service or status around the Pod that needs to be maintained, such as integrating with a new traffic provider. + +### Operation Controller + +The operation controller is responsible for Pod operations. The tasks that an operation controller needs to perform during PodOpsLifecycle include triggering a PodOpsLifecycle, checking whether the Pod has entered the Operating phase, performing Pod operations, and marking Pod operations as finished. These actions interacting with PodOpsLifecycle are provided in the package `kusionstack.io/kuperator/pkg/controllers/utils/podopslifecycle/utils.go`. + +A simple operation controller reconcile method would look like this: + +```go +import ( + "context" + + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/client" + + "kusionstack.io/kuperator/pkg/controllers/utils/podopslifecycle" +) + +var operationAdapter = &OperationOpsLifecycleAdapter{} + +type OperationOpsLifecycleAdapter struct { +} + +// GetID indicates ID of the PodOpsLifecycle +func (a *OperationOpsLifecycleAdapter) GetID() string { + return "new-id" +} + +// GetType indicates type for this Operation Controller +func (a *OperationOpsLifecycleAdapter) GetType() podopslifecycle.OperationType { + return "new-type" +} + +// AllowMultiType indicates whether multiple IDs which have the same Type are allowed +func (a *OperationOpsLifecycleAdapter) AllowMultiType() bool { + return true +} + +// WhenBegin is a hook, which will be executed when begin a lifecycle +func (a *OperationOpsLifecycleAdapter) WhenBegin(pod client.Object) (bool, error) { + return false, nil +} + +// WhenFinish is a hook, which will be executed when finish a lifecycle +func (a *OperationOpsLifecycleAdapter) WhenFinish(pod client.Object) (bool, error) { + return false, nil +} + +...... +func (r *PodOperationReconciler) Reconcile(ctx context.Context, req reconcile.Request) (ctrl.Result, error) { + // get the Pod + pod := &corev1.Pod{} + if err := r.Get(ctx, req.NamespacedName, pod); err != nil { + if !errors.IsNotFound(err) { + return reconcile.Result{}, err + } + return reconcile.Result{}, nil + } + + // check if the Pod needs operation + if !r.needOperation(pod) { + return reconcile.Result{}, nil + } + + // if PodOpsLifecycle has not been triggered, trigger it + if !podopslifecycle.IsDuringOps(OpsLifecycleAdapter, pod) { + if updated, err := podopslifecycle.Begin(r, operationAdapter, pod); err != nil { + return reconcile.Result{}, err + } else if updated { + return reconcile.Result{}, nil + } + } + + // waiting until Pod enters operating phase + if _, allowed := podopslifecycle.AllowOps(operationAdapter, 0, pod); !allowed { + return reconcile.Result{}, nil + } + + // do operation works + if completed := r.doPodOperation(pod); !completed { + return reconcile.Result{}, nil + } + + // after operation works completed, finish operating phase to continue PodOpsLifecycle + if _, err := podopslifecycle.Finish(r, operationAdapter, pod); err != nil { + return reconcile.Result{}, err + } +} +``` + +### Pod Cooperation Controller + +There are two ways to develop a cooperation controller. +One way is to develop a controller using the controller runtime and adhering to some conventions of PodOpsLifecycle and Kubernetes. +Another way is to take the use of [ResourceConsist](https://github.com/KusionStack/resourceconsist) framework provided by KusionStack, which can be referenced from its [documentation](https://www.kusionstack.io/docs/kuperator/manuals/resourceconsist). + +The following outlines the first approach. + +```go +import ( + "context" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + k8spod "k8s.io/kubernetes/pkg/api/v1/pod/util.go" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + appsv1alpha1 "kusionstack.io/kuperator/apis/apps/v1alpha1" +) + +const ( + // Finalizer needs to have prefix: `prot.podopslifecycle.kusionstack.io`. + // KusionStack Kuperator keeps this prefix back-compatible, + // so that it can be hard code to decouple with KusionStack Kuperator. + finalizerPrefix = appsv1alpha1.PodOperationProtectionFinalizerPrefix + + protectionFinalizer = finalizerPrefix + "/" + "unique-id" +) + +...... +func (r *PodResourceReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { + // get the Pod + pod := &corev1.Pod{} + if err := r.Get(ctx, req.NamespacedName, pod); err != nil { + if !errors.IsNotFound(err) { + return reconcile.Result{}, err + } + return reconcile.Result{}, nil + } + + if k8spod.IsPodReady(pod) { + // do resource reconcile like add Pod IP to traffic route + r.trafficOn(pod.status.PodIP) + // It is important to add a unique finalizer on this Pod + return reconcile.Result{}, r.addFinalizer(ctx, pod, protectionFinalizer) + } + + if !k8spod.IsPodReady(pod) { + // do resource reconcile like remove Pod IP from traffic route + r.trafficOff(pod.status.PodIP) + // It is important to remove the unique finalizer from this Pod + return reconcile.Result{}, r.removeFinalizer(ctx, pod, protectionFinalizer) + } +} + +func (r *PodResourceReconciler) addFinalizer(ctx context.Context, pod *corev1.Pod, finalizer string) error { + if controllerutil.ContainsFinalizer(pod, finalizer) { + return nil + } + + controllerutil.AddFinalizer(pod, finalizer) + return r.Update(ctx, pod) +} + +func (r *PodResourceReconciler) removeFinalizer(ctx context.Context, pod *corev1.Pod, finalizer string) error { + if !controllerutil.ContainsFinalizer(pod, finalizer) { + return nil + } + + controllerutil.RemoveFinalizer(pod, finalizer) + return r.Update(ctx, pod) +} +``` + +## Key Features + +### Concurrency Support + +PodOpsLifecycle in KusionStack Kuperator supports concurrency. +It means PodOpsLifecycle is able to organize and track multi controllers operating the same pod at the same time. +For example, when a controller is going to update Pod, other controllers are allowed to do other operations at the same time, like delete, restart, recreate it, +although the result may not be meaningful. + +### General Workload Support + +PodOpsLifecycle offers seamless integration with various workload types, including Deployment and StatefulSet. +To enable this functionality, ensure the feature gate for `GraceDeleteWebhook` is enabled when starting the KusionStack Kuperator controller: + +```shell +# Enable the GraceDeleteWebhook feature when starting the controller with this argument +$ /manager --feature-gates=GraceDeleteWebhook=true +``` + +Once enabled, any Pod labeled with `kusionstack.io/control=true` under a general workload, such as Deployment, becomes manageable by PodOpsLifecycle. +This feature provides workloads a way to work closer with Pod Cooperation Controllers. + +> Due to the Kubernetes webhook mechanism, the following error will be returned when workloads or users delete a pod. This error is intentional and serves to indicate that the pod deletion process has started and is being managed by PodOpsLifecycle. +> ```shell +> $ kubectl -n default delete pod collaset-sample-74fsv +> Error from server (failed to validate GraceDeleteWebhook, pod deletion process is underway and being managed by PodOpsLifecycle): admission webhook "validating-pod.apps.kusionstack.io" denied the request: failed to validate GraceDeleteWebhook, pod deletion process is underway and being managed by PodOpsLifecycle +> ``` \ No newline at end of file diff --git a/docs/user_docs/intro/_category_.json b/kuperator_versioned_docs/version-v0.3/introduction/_category_.json similarity index 100% rename from docs/user_docs/intro/_category_.json rename to kuperator_versioned_docs/version-v0.3/introduction/_category_.json diff --git a/kuperator_versioned_docs/version-v0.3/introduction/introduction.md b/kuperator_versioned_docs/version-v0.3/introduction/introduction.md new file mode 100644 index 00000000..5adf228c --- /dev/null +++ b/kuperator_versioned_docs/version-v0.3/introduction/introduction.md @@ -0,0 +1,49 @@ +# What is KusionStack Kuperator? + +KusionStack Kuperator consists of workloads and operators built on Kubernetes Custom Resource Definitions, +with a primary aim of bridging the gap between platform development and Kubernetes. + +By keeping more operation works finished in Kubernetes layer, +KusionStack Kuperator reduces complexity when interacting with Kubernetes +and enhances convenience for platform developers. + +## Key features + +KusionStack Kuperator currently provides the following features, +streamlining application operations when developing platforms based on Kubernetes: + +### Fine-grained operation + +KusionStack Kuperator introduces PodOpsLifecycle to extend native Pod lifecycle with additional phases such as PreCheck, Preparing, etc. +All operators within KusionStack Kuperator will respect PodOpsLifecycle, +so that PodOpsLifecycle is able to orchestrate all of these operators to operate each Pod coordinately. + +### Advanced workloads + +KusionStack Kuperator offers several workloads to ensure it is convenient and effective to delivery and operate application resources. + +Recently, Kuperator provides the workload CollaSet. +Besides the basic ability of scaling and updating Pods like Deployment and StatefulSet of Kubernetes, +CollaSet also provides a range of scale and update strategies, +like in-place update with container image and pod revision consistency. + +### Streamlined Pod Operation + +KusionStack Kuperator introduces resource consist framework that offers a graceful way +to integrate resource management around Pods, including traffic control, into the PodOpsLifecycle. +This simplifies the works for platform developers dealing with Pod operation details. +KusionStack also integrates some resources by default, such as Aliyun SLB. + +### Risk management + +Building upon the PodOpsLifecycle, KusionStack Kuperator introduces the workload named PodTransitionRule +which will keep risks of pod operation under control. +By providing a MaxUnavailable rule similar to Kubernetes' PodDisruptionBudget (PDB), +it ensures there are always enough Pods available for service. +Furthermore, it allows for custom rules through extension via webhooks and label hooks. + +## Future works + +KusionStack Kuperator project is currently in its early stages. +Our goal is to simplify platform development. We will continue building in areas such as application operations, +observability, and insight. We hope the Kuperator will make it easier for you to build platforms. \ No newline at end of file diff --git a/kuperator_versioned_docs/version-v0.3/manuals/_category_.json b/kuperator_versioned_docs/version-v0.3/manuals/_category_.json new file mode 100644 index 00000000..795f138a --- /dev/null +++ b/kuperator_versioned_docs/version-v0.3/manuals/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Manuals", + "position": 4 +} diff --git a/kuperator_versioned_docs/version-v0.3/manuals/collaset.md b/kuperator_versioned_docs/version-v0.3/manuals/collaset.md new file mode 100644 index 00000000..b5ebcfaf --- /dev/null +++ b/kuperator_versioned_docs/version-v0.3/manuals/collaset.md @@ -0,0 +1,942 @@ +--- +sidebar_position: 1 +--- + +# CollaSet +CollaSet is responsible for managing a set of Pods. Similar to Kubernetes Deployment and StatefulSet, it also supports scaling and updating Pods. Additionally, CollaSet offers advanced features to provide users with more granular control over managing Pods. + +A basic CollaSet configuration is represented in the following YAML format: + +``` yaml +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: collaset-sample +spec: + replicas: 2 + selector: + matchLabels: + app: foo + template: + metadata: + labels: + app: foo + spec: + containers: + - image: nginx:1.25.2 + name: nginx +``` +Let's explore the features of CollaSet. + +## Basic Features +### Scaling Pods +CollaSet utilizes the field spec.replicas to indicate the number of Pods under management. + +``` yaml +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: collaset-sample +spec: + replicas: 3 # indicate the number of Pods to manage + selector: + matchLabels: + app: foo + template: + metadata: + labels: + app: foo + spec: + containers: + - image: nginx:1.25.2 + name: nginx +... +``` +Pods can be provisioned by CollaSet. + +``` shell +$ kubectl -n default apply -f ./config/samples/apps_v1alpha1_collaset.yaml +collaset.apps.kusionstack.io/collaset-sample created + +$ kubectl -n default get pod +NAME READY STATUS RESTARTS AGE +collaset-sample-85q7g 1/1 Running 0 57s +collaset-sample-vx5ws 1/1 Running 0 57s +collaset-sample-hr7pv 1/1 Running 0 57s + +$ kubectl -n default get cls +NAME DESIRED CURRENT UPDATED UPDATED_READY UPDATED_AVAILABLE CURRENT_REVISION UPDATED_REVISION AGE +collaset-sample 3 3 3 3 3 collaset-sample-6d7b7c58f collaset-sample-6d7b7c58f 64s +``` + +By default, CollaSet always creates new Pods using the latest template specified in `spec.template`. CollaSet establishes ownership over a set of Pods through the label selector defined in `spec.selector`. Thus, it's important to ensure that the labels provided in `spec.selector` match those in `spec.template.metadata.labels`. + +CollaSet status provides general information about this CollaSet and all Pods under it. + +``` shell +$ kubectl -n default get cls collaset-sample -o yaml +...... +status: + availableReplicas: 3 + collisionCount: 0 + conditions: + - lastTransitionTime: "2023-09-01T03:56:09Z" + reason: Updated + status: "True" + type: Update + currentRevision: collaset-sample-6d7b7c58f + observedGeneration: 1 + operatingReplicas: 0 + readyReplicas: 3 + replicas: 3 + scheduledReplicas: 3 + updatedAvailableReplicas: 3 + updatedReadyReplicas: 3 + updatedReplicas: 3 + updatedRevision: collaset-sample-6d7b7c58f +``` + +Some fields in CollaSet status are explained here: + +`updatedRevision` indicates the latest revision that CollaSet uses to create or update Pods. + +`currentRevision` indicates the last updated revision. It will be set to updatedRevision after all Pods are updated, and their PodReady conditions become True. + +`replicas` indicates the count of Pods under this CollaSet. + +`scheduledReplicas` indicates the count of Pods under this CollaSet that successfully got scheduled. + +`availableReplicas` indicates the count of Pods under this CollaSet that have all expected finalizers attached. + +`updatedReplicas` indicates the count of Pods under this CollaSet that have the updated revision. + +`updatedReadyReplicas` indicates the count of Pods under this CollaSet that are counted in `updatedReplicas` and have their PodReady conditions set to True. + +`updatedAvailableReplicas` indicates the count of Pods under this CollaSet that is counted in `updatedReadyReplicas` and have all expected finalizers attached. + +### Updating Pods +CollaSet generates Pods according to the pod template described in `spec.template`. This template can be updated to signal CollaSet to update each owned Pod: + +``` shell +$ kubectl -n default edit cls collaset-sample +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: +...... +spec: +...... + template: + ...... + spec: + containers: + - image: nginx:1.24.0 # changed from nginx:1.25.2 +...... +``` + +CollaSet immediately updates all Pods it owns with the new Pod template by default. + +``` shell +$ kubectl -n default get pod -o yaml | grep "image: nginx" + - image: nginx:1.24.0 + - image: nginx:1.24.0 + - image: nginx:1.24.0 +``` + +The update progress can be controlled using partition. + +#### Partition +Similar to StatefulSet, `partition` is used to control the upgrade progress. + +By default, if not indicated, all Pods will be updated when spec.template changes. The `partition` can be adjusted from 0 to `spec.replicas` to specify how many Pods CollaSet should update. **Unlike StatefulSet, the partition in CollaSet represents the number of Pods to update.** + +Let's update the image back to nginx:1.25.2: + +``` shell +$ kubectl -n default edit cls collaset-sample +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: collaset-sample +spec: + template: + ...... + spec: + containers: + - image: nginx:1.25.2 # changed from nginx:1.24.0 + ... + updateStrategy: + rollingUpdate: + byPartition: + partition: 1 # use partition to control upgrade progress +``` + +In this case, CollaSet only updates 1 Pod to the updated revision. + +``` shell +$ kubectl -n default get pod -o yaml | grep "image: nginx" + - image: nginx:1.24.0 + - image: nginx:1.25.2 # only 1 Pod updated + - image: nginx:1.24.0 +``` + +#### Update by Label +By configuring the `byLabel` rolling update policy, users can precisely specify which Pods they want to update by using labels. + +If you go back to the sample in the [section Partition](#Partition) and change `byPartition` to `byLabel` like the following: + +``` shell +$ kubectl -n default edit cls collaset-sample +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: collaset-sample +spec: + ... + updateStrategy: + rollingUpdate: +- byPartition: +- partition: 1 ++ byLabel: {} +``` + +Subsequently, each Pod will only be updated if it's marked with the label `collaset.kusionstack.io/update-included`. + +## Advanced Features +### Pod Instance ID +Each Pod created by CollaSet has a unique ID held by the label `collaset.kusionstack.io/instance-id`, which can be used to identify each individual Pod. + +``` yaml +apiVersion: v1 +kind: Pod +metadata: + labels: + collaset.kusionstack.io/instance-id: "0" # Pod instance ID +... +``` + +CollaSet provides a context to specify an ID pool, which defaults to the same name as the CollaSet and is immutable. + +``` yaml +... +spec: + scaleStrategy: + context: +``` + +The same ID pool name can be indicated for multiple CollaSets, allowing them to share a single ID pool. Consequently, each Pod created by these CollaSets will be assigned a unique ID. + +For example, these are two CollaSets with the same context: + +``` shell +$ cat ~/sample.yaml +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: collaset-sample-a +spec: + replicas: 2 + scaleStrategy: + context: foo # with the same context foo + selector: + matchLabels: + app: foo + template: + metadata: + labels: + app: foo + spec: + containers: + - image: nginx:1.25.2 + name: nginx +--- + +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: collaset-sample-b +spec: + replicas: 2 + scaleStrategy: + context: foo # with the same context foo + selector: + matchLabels: + app: foo + template: + metadata: + labels: + app: foo + spec: + containers: + - image: nginx:1.25.2 + name: nginx +``` + +Then create these CollaSets with the sample file: + +``` shell +$ kubectl -n default apply -f ~/sample.yaml +collaset.apps.kusionstack.io/collaset-sample-a created +collaset.apps.kusionstack.io/collaset-sample-b created + +$ kubectl -n default get pod +NAME READY STATUS RESTARTS AGE +collaset-sample-a-g4sjj 1/1 Running 0 42s +collaset-sample-a-ph9vc 1/1 Running 0 42s +collaset-sample-b-fqkq4 1/1 Running 0 42s +collaset-sample-b-lqg8f 1/1 Running 0 42s + +$ kubectl -n default get pod -o yaml | grep collaset.kusionstack.io/instance-id + collaset.kusionstack.io/instance-id: "0" + collaset.kusionstack.io/instance-id: "1" + collaset.kusionstack.io/instance-id: "3" + collaset.kusionstack.io/instance-id: "2" +``` + +Now, the 4 Pods created by these 2 CollaSets will have a unique instance ID. + +### Revision Consistency +Pods within a CollaSet can utilize more than two different Pod templates simultaneously, including both the current and updated revisions. This can result from partial updates. To ensure the stability of Pod revisions over time, CollaSet records this information. When a Pod is deleted, CollaSet recreates it using its previous revision. + +It can be reproduced by following steps: + +1. Provision a new CollaSet with replicas 3. + +``` shell +$ kubectl -n default apply -f ./config/samples/apps_v1alpha1_collaset.yaml +collaset.apps.kusionstack.io/collaset-sample created + +$ kubectl get pod +NAME READY STATUS RESTARTS AGE +collaset-sample-5tgcs 1/1 Running 0 4s +collaset-sample-glgnb 1/1 Running 0 4s +collaset-sample-qs46r 1/1 Running 0 4s + +$ kubectl -n default get cls +NAME DESIRED CURRENT UPDATED UPDATED_READY UPDATED_AVAILABLE CURRENT_REVISION UPDATED_REVISION AGE +collaset-sample 3 3 3 3 3 collaset-sample-6d7b7c58f collaset-sample-6d7b7c58f 64s +``` + +2. Update the image of PodTemplate of the CollaSet to image nginx:1.24.0 and set the partition to 2. Then there will be 2 Pods with image nginx:1.24.0 and 1 Pod with image nginx:1.25.2. + +``` shell +$ kubectl -n default edit cls collaset-sample +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: collaset-sample +spec: + template: + ...... + spec: + containers: + - image: nginx:1.24.0 # changed from nginx:1.25.2 + ... + updateStrategy: + rollingUpdate: + byPartition: + partition: 2 # update 2 Pods + +# Wait until these 2 Pods are updated, and check the Pod's images. +$ kubectl get pod -o yaml | grep "image: nginx" + - image: nginx:1.25.2 + - image: nginx:1.24.0 + - image: nginx:1.24.0 +``` + +3. Update the image of PodTemplate of the CollaSet to image nginx:1.23.4 and set the partition to 1. + +``` shell +$ kubectl -n default edit cls collaset-sample +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: collaset-sample +spec: + template: + ...... + spec: + containers: + - image: nginx:1.23.4 # changed from nginx:1.24.0 + ... + updateStrategy: + rollingUpdate: + byPartition: + partition: 1 # update 1 Pod + +# Wait until the Pod is updated, and check the Pod's images. +$ kubectl get pod -o yaml | grep "image: nginx" + - image: nginx:1.25.2 + - image: nginx:1.24.0 # Pod collaset-sample-qs46r + - image: nginx:1.23.4 +``` + +Now, there are 3 Pods, each of which has an individual image. If we then delete the Pod with the image nginx:1.24.0, the new Pod replacing it will be created with the same image nginx:1.24.0 in order for the Pod to inherit the revision. + +``` shell +$ kubectl delete -n default delete pod collaset-sample-qs46r +pod "collaset-sample-qs46r" deleted + +$ kubectl get pod +NAME READY STATUS RESTARTS AGE +collaset-sample-5tgcs 1/1 Running 0 3h +collaset-sample-ht9x6 1/1 Running 0 2m27s # Pod recreated +collaset-sample-qs46r 1/1 Running 1 (3h ago) 3h + +$ kubectl get pod -o yaml | grep "image: nginx" + - image: nginx:1.25.2 + - image: nginx:1.24.0 # image has not been changed + - image: nginx:1.23.4 +``` + +### In-Place Update Pod +In addition to the `Recreate` update policy, which is identical to Deployment and StatefulSet, CollaSet offers the `InPlaceIfPossible` update policy. + +``` yaml +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: collaset-sample +spec: + ... + updateStrategy: + podUpgradePolicy: InPlaceIfPossible # Options: InPlaceIfPossible, Recreate, Replace +``` + +`InPlaceIfPossible` is the default value, which instructs CollaSets to try to update Pods in place when only container images, labels, and annotations have changed. If some other fields have changed too, the policy will back off to the `Recreate` policy. + +`Recreate` indicates CollaSets always delete the old Pod and create a new one with an updated revision. + +If update pod template with `InPlaceIfPossible` policy as following example, the Pod will not be recreated. + +``` shell +$ kubectl -n default edit cls collaset-sample +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: collaset-sample +spec: + template: + ...... + spec: + containers: + - image: nginx:1.24.0 # changed from nginx:1.25.2 + ... + updateStrategy: + podUpgradePolicy: InPlaceIfPossible # use InPlaceIfPossible policy + +$ kubectl -n default get pod -o yaml | grep "image: nginx" + - image: nginx:1.24.0 + - image: nginx:1.24.0 + - image: nginx:1.24.0 + +$ kubectl -n default get pod +NAME READY STATUS RESTARTS AGE +collaset-sample-5wvlh 1/1 Running 1 (6s ago) 2m10s +collaset-sample-ldvrg 1/1 Running 1 (6s ago) 2m10s +collaset-sample-pbz75 1/1 Running 1 (6s ago) 2m10s +``` + +### Replace Update Pod + +CollaSet provides the `Replace` policy for certain applications that are sensitive to the available number of Pods. + +``` yaml +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: collaset-sample +spec: + ... + updateStrategy: + podUpgradePolicy: Replace # Options: InPlaceIfPossible, Recreate, Replace +``` + +The `Replace` policy indicates that CollaSet should update a Pod by creating a new one to replace it. +Unlike the `Recreate` policy, which deletes the old Pod before creating a new updated one, or the `InPlaceIfPossible` policy, which updates the current Pod in place, +the `Replace` policy first creates a new Pod with the updated revision. It then deletes the old Pod once the new one becomes available for service. + +```shell +# Before updating CollaSet +$ kubectl -n default get pod +NAME READY STATUS RESTARTS AGE +collaset-sample-dwkls 1/1 Running 0 6m55s + +# After updating CollaSet, the updated Pod is created first +$ kubectl -n default get pod +NAME READY STATUS RESTARTS AGE +collaset-sample-dwkls 1/1 Running 0 6m55s +collaset-sample-rcmbv 0/1 ContainerCreating 0 0s + +# Once the created Pod is available for service, the old Pod will be deleted +$ kubectl -n default get pod +NAME READY STATUS RESTARTS AGE +collaset-sample-rcmbv 1/1 Running 0 1s +collaset-sample-dwkls 1/1 Terminating 0 7m12s +``` + +The two Pods will have a pair of labels to identify their relationship. The new Pod will have the label `collaset.kusionstack.io/replace-pair-origin-name` to indicate the name of the old Pod, and the old Pod will have the label `collaset.kusionstack.io/replace-pair-new-id` to indicate the instance ID of the new Pod. + +Additionally, the new Pod and old Pod will each begin their own PodOpsLifecycles, which are independent of each other. + +### Recreate And Replace Specified Pod + +In practice, users often need to recreate or replace specified Pods under a CollaSet. + +To delete a Pod, users can simply call the Kubernetes API, like executing `kubectl delete pod `. +However, this will bypass the [PodOpsLifecycle](https://www.kusionstack.io/docs/kuperator/concepts/podopslifecycle) Mechanism. +We provide following two options: + +1. Enable the feature `GraceDeleteWebhook` so that it is possible to delete Pods through `PodOpsLifecycle`. +```shell +# Enable the GraceDeleteWebhook feature when starting the controller with this argument +$ /manager --feature-gates=GraceDeleteWebhook=true +``` +```shell +$ kubectl -n default get pod +NAME READY STATUS RESTARTS AGE +collaset-sample-vqccr 1/1 Running 0 21s + +# Delete the pod directly. A message will respond indicating that the Pod deletion is handled by PodOpsLifecycle +kubectl -n default delete pod collaset-sample-vqccr +Error from server (failed to validate GraceDeleteWebhook, pod deletion process is underway and being managed by PodOpsLifecycle): admission webhook "validating-pod.apps.kusionstack.io" denied the request: failed to validate GraceDeleteWebhook, pod deletion process is underway and being managed by PodOpsLifecycle + +# The old Pod is deleted, and a new Pod will be created +$ kubectl -n default get pod -w +collaset-sample-vqccr 1/1 Running 0 71s +collaset-sample-vqccr 1/1 Terminating 0 71s +...... +collaset-sample-nbl6t 0/1 Pending 0 0s +collaset-sample-nbl6t 0/1 ContainerCreating 0 0s +...... +collaset-sample-nbl6t 1/1 Running 0 0s +``` +2. Label the Pod with `podopslifecycle.kusionstack.io/to-delete`, so that CollaSet will delete the Pod through PodOpsLifecycle. + +```shell +# Label Pod +$ kubectl -n default label pod collaset-sample-nbl6t podopslifecycle.kusionstack.io/to-delete=true + +# The old Pod is deleted, and a new Pod will be recreated +$ kubectl -n default get pod -w +collaset-sample-nbl6t 1/1 Running 0 5m28s +collaset-sample-nbl6t 1/1 Terminating 0 5m28s +...... +collaset-sample-w6x69 0/1 Pending 0 0s +...... +collaset-sample-w6x69 0/1 ContainerCreating 0 0s +...... +collaset-sample-w6x69 1/1 Running 0 2s +``` + +Recreating a Pod will delete the old Pod first and then create a new one. This will affect the available Pod count. +To avoid this, CollaSet provides a feature to replace Pods by labeling them with `podopslifecycle.kusionstack.io/to-replace`. + +```shell +# Replace Pod by label +$ kubectl -n echo label pod collaset-sample-w6x69 podopslifecycle.kusionstack.io/to-replace=true + +# The old Pod is deleted, and a new Pod will be created +$ kubectl -n default get pod -w +collaset-sample-w6x69 1/1 Running 0 5m29s +collaset-sample-74fsv 0/1 Pending 0 0s +collaset-sample-74fsv 0/1 ContainerCreating 0 0s +...... +collaset-sample-74fsv 1/1 Running 0 2s +...... +collaset-sample-w6x69 0/1 Terminating 0 5m33s +``` + + +### Supprting PVCs +CollaSet introduces support for PVCs, allowing user to declare `VolumeClaimTemplates` to create PVCs for each Pod. +Furthermore, in response to common issues with PVCs management, such as high modification costs and difficult control, CollaSet extends its functionality with the following advantages vs. StatefulSet: + +1. Support update, add and delete on `volumeClaimTemplates`. +2. Provide control over PVC lifecycle. + +#### Provision PVCs +The `collaset-pvc.yaml` file declares a CollaSet with `VolumeClaimTemplates` to provision a PVC with `1Gi` storage for each Pod. +These PVCs are then mounted on the container at the path `/path/mount/www`. + +``` yaml +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: foo +spec: + replicas: 2 + selector: + matchLabels: + app: foo + template: + metadata: + labels: + app: foo + spec: + containers: + - image: nginx:1.25 + name: nginx + volumeMounts: + - mountPath: /path/mount/www # path to mount PVC + name: www + volumeClaimTemplates: + - metadata: + name: www + spec: + storageClassName: standard + volumeMode: Filesystem + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: 1Gi +``` + +Pods and PVCs can be provisioned by CollaSet. + +``` shell +$ kubectl -n default apply -f collaset-pvc.yaml +collaset.apps.kusionstack.io/foo created + +$ kubectl -n default get pod +NAME READY STATUS RESTARTS AGE +foo-pw5lg 1/1 Running 0 4s +foo-5n6ts 1/1 Running 0 4s + +$ kubectl -n default get pvc +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +foo-www-h5zv7 Bound pvc-8a7d8ea0-ced0-423a-9255-bedfad0f2db6 1Gi RWO standard 7s +foo-www-lswp2 Bound pvc-9564b44b-9c99-467b-abee-4285183ff9c3 1Gi RWO standard 7s +``` + +Each Pod and its related PVC have the same value of label `collaset.kusionstack.io/instance-id`. + +``` shell +$ kubectl -n default get pod -o yaml | grep instance-id + collaset.kusionstack.io/instance-id: "1" + collaset.kusionstack.io/instance-id: "0" + +$ kubectl -n default get pvc -o yaml | grep instance-id + collaset.kusionstack.io/instance-id: "1" + collaset.kusionstack.io/instance-id: "0" +``` + +#### Update PVCs +To save the operating costs of PVCs, i.e. expand storage capacity, CollaSet supports update, add and delete on `volumeClaimTemplates`. + +To achieve this, for each PVC, CollaSet calculates a hash value based on its template, and attatch it to label `collaset.kusionstack.io/pvc-template-hash`. +Once users modify the templates, CollaSet recognizes, caculates a new hash value and attach it on new PVCs to replace old ones. + +Let's give it a try, update the storage of PVC template from `1Gi` to `2Gi`. +``` shell +$ kubectl -n default edit cls foo + ...... + volumeClaimTemplates: + - metadata: + name: www + spec: + storageClassName: standard + volumeModes: Filesystem + accessModes: [ "ReadWriteOnce" ] + resources: + requests: +- storage: 1Gi ++ storage: 2Gi # update pvc template to expand storage +...... +``` + +There are 2 new PVCs with `2Gi` storage created with different hash values. + +``` shell +$ kubectl -n default edit cls foo +collaset.apps.kusionstack.io/foo edited + +$ kubectl -n default get pod +NAME READY STATUS RESTARTS AGE +foo-pw5lg 1/1 Terminating 0 7s +foo-5n6ts 1/1 Terminating 0 7s +foo-9nhz4 0/1 Pending 0 1s +foo-xb2gd 0/1 Pending 0 1s + +$ kubectl -n default get pvc +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +foo-www-h5zv7 Terminating pvc-8a7d8ea0-ced0-423a-9255-bedfad0f2db6 1Gi RWO standard 11s +foo-www-lswp2 Terminating pvc-9564b44b-9c99-467b-abee-4285183ff9c3 1Gi RWO standard 11s +foo-www-cj2s9 Bound pvc-647e2a81-7fc6-4f37-a835-e63da9172de3 2Gi RWO standard 5s +foo-www-hp2t6 Bound pvc-03d7536e-cd3f-465f-bd30-362a9510f0c9 2Gi RWO standard 5s + +$ kubectl -n default get pvc -o yaml | grep pvc-template-hash + collaset.kusionstack.io/pvc-template-hash: 594d8857f9 # hash value of old pvc + collaset.kusionstack.io/pvc-template-hash: 594d8857f9 + collaset.kusionstack.io/pvc-template-hash: d78c5ff6b # hash value of new pvc + collaset.kusionstack.io/pvc-template-hash: d78c5ff6b +``` + +For old Pvcs, users can retain them by configuring `whenScaled` policy to `Retain` . +Then old PVCs can be re-mount on its related Pod after rolling back. +Otherwise, old PVCs can be deleted by default policy `Delete`. + + +#### Add PVCs +Add a PVC template `yyy`, which is mounted on the container at the path `/path/mount/yyy`. + +``` shell +$ kubectl -n default edit cls foo +...... + spec: + containers: + - image: nginx:1.25 + name: nginx + volumeMounts: + - mountPath: /path/mount/www # path to mount PVC + name: www ++ - mountPath: /path/mount/yyy # path to mount PVC ++ name: yyy + volumeClaimTemplates: + - metadata: + name: www + spec: + storageClassName: standard + volumeMode: Filesystem + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: 2Gi ++ - metadata: # added pvc template ++ name: yyy ++ spec: ++ storageClassName: standard ++ volumeMode: Filesystem ++ accessModes: [ "ReadWriteOnce" ] ++ resources: ++ requests: ++ storage: 2Gi +``` + +Now, each pod has two PVCs, which include a new PVCs claimed by template `yyy` and one old PVC claimed by template `www`. + +``` shell +$ kubectl -n default edit cls foo +collaset.apps.kusionstack.io/foo edited + +$ kubectl -n default get pod +NAME READY STATUS RESTARTS AGE +foo-8wwsz 0/1 Pending 0 1s +foo-9nhz4 1/1 Terminating 0 23s +foo-hd2cv 0/1 Pending 0 1s +foo-xb2gd 1/1 Terminating 0 23s + +$ kubectl -n default get pvc +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +foo-www-cj2s9 Bound pvc-647e2a81-7fc6-4f37-a835-e63da9172de3 2Gi RWO standard 25s +foo-www-hp2t6 Bound pvc-03d7536e-cd3f-465f-bd30-362a9510f0c9 2Gi RWO standard 25s +foo-yyy-c68nh Bound pvc-94ee5eff-2350-4cb7-8411-85f0928d25fc 2Gi RWO standard 3s # new pvc +foo-yyy-vpwss Bound pvc-8363dc78-3340-47d0-aa11-0adac36308d5 2Gi RWO standard 3s # new pvc +``` + +#### Delete PVCs +Delete the PVC template `yyy` on CollaSet. + +``` shell +$ kubectl -n default edit cls foo +...... + spec: + containers: + - image: nginx:1.25 + name: nginx + volumeMounts: + - mountPath: /path/mount/www # path to mount PVC + name: www +- - mountPath: /path/mount/yyy # path to mount PVC +- name: yyy + volumeClaimTemplates: + - metadata: + name: www + spec: + storageClassName: standard + volumeMode: Filesystem + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: 2Gi +- - metadata: # delete pvc template +- name: yyy +- spec: +- storageClassName: standard +- volumeMode: Filesystem +- accessModes: [ "ReadWriteOnce" ] +- resources: +- requests: +- storage: 2Gi +``` + +Now, PVCs claimed by template `yyy` are deleted and the origin PVCs claimed by template `www` are retained. + +``` shell +$ kubectl -n default edit cls foo +collaset.apps.kusionstack.io/foo edited + +$ kubectl -n default get pod +NAME READY STATUS RESTARTS AGE +foo-6qcpc 1/1 Running 0 2s +foo-z2jqv 1/1 Running 0 2s +foo-8wwsz 1/1 Terminating 0 38s +foo-hd2cv 1/1 Terminating 0 38s + +$ kubectl -n default get pvc +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +foo-www-cj2s9 Bound pvc-647e2a81-7fc6-4f37-a835-e63da9172de3 2Gi RWO standard 61s +foo-www-hp2t6 Bound pvc-03d7536e-cd3f-465f-bd30-362a9510f0c9 2Gi RWO standard 61s +foo-yyy-c68nh Terminating pvc-94ee5eff-2350-4cb7-8411-85f0928d25fc 2Gi RWO standard 39s +foo-yyy-vpwss Terminating pvc-8363dc78-3340-47d0-aa11-0adac36308d5 2Gi RWO standard 39s +``` + +#### PVC Retention Policy +CollaSet provides control over PVC lifecycle by configuring `spec.persistentVolumeClaimRetentionPolicy`. +Users can retain or delete PVCs after its related Pod is scaled down or CollaSet is deleted, respectively. +This feature is also supported by [StatefulSet](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention) since v1.27. +Basic rule is detailed as follows: +- `whenScale` : decides to delete or retain PVCs after Pod is scaled down. +- `whenDeleted`: decides to delete or retain PVCs after CollaSet is deleted. + +For each policy users can set the value to either Delete (by default) or Retain. +Note that for StatefulSet, the default policy is Retain. + +#### whenScaled +Apply `collaset-pvc.yaml` and edit foo to scale replicas to 1. +``` shell +$ kubectl apply -f collaset-pvc.yaml +collaset.apps.kusionstack.io/foo created + +$ kubectl edit cls foo + ...... + spec: +- replicas: 2 ++ replicas: 1 # scale in 1 pod + selector: + matchLabels: + app: foo + ...... +``` +As the `whenScaled` is not configured, thus its value is `Delete` by default. +Consequently, PVC `foo-www-wzwbq` is deleted as its related Pod `foo-tkc5m` is scaling down. + +``` shell +$ kubectl -n default edit cls foo +collaset.apps.kusionstack.io/foo edited + +$ kubectl -n default get pod +NAME READY STATUS RESTARTS AGE +foo-tkc5m 0/1 Terminating 0 27s # related pvc is terminating +foo-vwtcm 1/1 Running 0 27s + +$ kubectl -n default get pvc +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +foo-www-wzwbq Terminating pvc-b92c28c6-59ad-4976-810c-8d538c4a22c6 1Gi RWO standard 29s +foo-www-r4vlh Bound pvc-dd7f7cce-a3cb-4bba-a106-e5ad264959a2 1Gi RWO standard 29s +``` + +Set `Retain` to `whenScaled`, and scale replicas to 0. + +``` shell +$ kubectl -n default edit cls foo + ...... + spec: +- replicas: 1 ++ replicas: 0 # scale in 1 pod + selector: + matchLabels: + app: foo ++ scaleStrategy: ++ persistentVolumeClaimRetentionPolicy: ++ whenScaled: Retain # retain the pvc after pod is scaled down + ...... +``` + +Pod `foo-vwtcm` is terminating, while its related PVC `foo-www-r4vlh` is retained. + +``` shell +$ kubectl -n default edit cls foo +collaset.apps.kusionstack.io/foo edited + +$ kubectl -n default get pod +NAME READY STATUS RESTARTS AGE +foo-vwtcm -n default 1/1 Terminating 0 62s # related pvc is retained + +$ kubectl -n default get pvc +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +foo-www-r4vlh Bound pvc-dd7f7cce-a3cb-4bba-a106-e5ad264959a2 1Gi RWO standard 63s +``` + +To validate the retention policy, try ro scale replicas to 2, and the remaining PVC should be mounted again. + +``` shell +$ kubectl -n default edit cls foo + ...... + spec: +- replicas: 0 ++ replicas: 2 # scale out 2 pods + ...... +``` + +We can see that PVC `foo-www-r4vlh` is retained by Pod `foo-px487` as they have the same `instance-id`. + +``` shell +$ kubectl -n default edit cls foo +collaset.apps.kusionstack.io/foo edited + +$ kubectl -n default get pod +NAME READY STATUS RESTARTS AGE +foo-ld5xc 1/1 Running 0 27s +foo-px487 1/1 Running 0 27s + +$ kubectl -n default get pvc +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +foo-www-d48gx Bound pvc-1884ee45-5cc9-48ee-b01a-20f5ad63d6d4 1Gi RWO standard 29s +foo-www-r4vlh Bound pvc-dd7f7cce-a3cb-4bba-a106-e5ad264959a2 1Gi RWO standard 2m47s + +$ kubectl -n default get pod foo-px487 -o yaml | grep instance-id + collaset.kusionstack.io/instance-id: "1" + +$ kubectl -n default get pvc foo-www-r4vlh -o yaml | grep instance-id + collaset.kusionstack.io/instance-id: "1" # pvc foo-www-r4vlh is retained +``` + +#### whenDelete +Edit `foo` to configure `Retain` policy for `whenDelete`, and then delete this CollaSet. +``` shell +$ kubectl -n default edit cls foo + ...... + scaleStrategy: + persistentVolumeClaimRetentionPolicy: + whenScaled: Retain ++ whenDelete: Retain # retain the pvc after collaset is deleted + ...... +collaset.apps.kusionstack.io/foo edited + +$ kubectl -n default delete cls foo +collaset.apps.kusionstack.io "foo" deleted +``` + +Now, try to recreate `foo` with 2 replicas, and the result shows both PVCs are retained. +``` shell +$ kubectl -n default apply -f collaset-pvc.yaml +collaset.apps.kusionstack.io/foo created + +$ kubectl -n default get pod +NAME READY STATUS RESTARTS AGE +foo-qhh8t 1/1 Running 0 2s +foo-ss255 1/1 Running 0 2s + +$ kubectl -n default get pvc +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +foo-www-d48gx Bound pvc-1884ee45-5cc9-48ee-b01a-20f5ad63d6d4 1Gi RWO standard 4m29s +foo-www-r4vlh Bound pvc-dd7f7cce-a3cb-4bba-a106-e5ad264959a2 1Gi RWO standard 6m47s + +$ kubectl -n default get pod foo-px487 -o yaml | grep instance-id + collaset.kusionstack.io/instance-id: "0" + collaset.kusionstack.io/instance-id: "1" + +$ kubectl -n default get pvc foo-www-r4vlh -o yaml | grep instance-id + collaset.kusionstack.io/instance-id: "0" # pvc foo-www-d48gx is retained + collaset.kusionstack.io/instance-id: "1" # pvc foo-www-r4vlh is retained +``` diff --git a/kuperator_versioned_docs/version-v0.3/manuals/poddecoration.md b/kuperator_versioned_docs/version-v0.3/manuals/poddecoration.md new file mode 100644 index 00000000..a4c87532 --- /dev/null +++ b/kuperator_versioned_docs/version-v0.3/manuals/poddecoration.md @@ -0,0 +1,352 @@ +--- +sidebar_position: 4 +--- + +# PodDecoration +PodDecoration works in conjunction with CollaSet to selectively inject specific configurations to Pods that meet certain criteria. + +PodDecoration not only allows injecting sidecar containers to Pods but also enables modifying existing container configurations, metadata, and scheduling parameters etc. +The PodDecoration controller does not control the upgrade of Pods. The actual upgrade process is fully controlled by the CollaSet controller. This means that the injection upgrade of PodDecoration can also be performed `InPlaceIfPossible`. + +About [CollaSet](collaset.md). +# Example + +## Create CollaSet + +```yaml +# collaset.yaml +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: foo + namespace: default +spec: + replicas: 3 + selector: + matchLabels: + app: foo + template: + metadata: + labels: + app: foo + spec: + containers: + - image: nginx:1.25.2 + name: foo +``` +Use `collaset.yaml` to create three pods under CollaSet `foo` management. +```shell +$ kubectl apply -f collaset.yaml +collaset.apps.kusionstack.io/foo created + +$ kubectl get cls +NAME DESIRED CURRENT AVAILABLE UPDATED UPDATED_READY UPDATED_AVAILABLE CURRENT_REVISION UPDATED_REVISION AGE +foo 3 3 3 3 3 3 foo-7bdb974bc7 foo-7bdb974bc7 7s + +$ kubectl get pod +NAME READY STATUS RESTARTS AGE +foo-2wnnf 1/1 Running 0 41s +foo-hqpx7 1/1 Running 0 41s +foo-mqt48 1/1 Running 0 41s +``` +## Create PodDecoration + +The following `poddecoration.yaml` file describes a PodDecoration, which selects the pod under CollaSet `foo` and injects the content in `template` into the pod with `instance-id=0`. + +```yaml +# poddecoration.yaml +apiVersion: apps.kusionstack.io/v1alpha1 +kind: PodDecoration +metadata: + name: sample-pd +spec: + selector: # selected pod range in which PodDecoration takes effect + matchLabels: + app: foo + updateStrategy: + rollingUpdate: + selector: # select pod to upgrade in effect range + matchLabels: + collaset.kusionstack.io/instance-id: "0" + template: + metadata: + - patchPolicy: Overwrite + labels: + custom.io/sidecar-version: "v1" + containers: + - injectPolicy: AfterPrimaryContainer + name: sidecar-a + image: ubuntu:22.04 + command: ["sleep", "2h"] + volumeMounts: + - name: sample-volume + mountPath: /vol/sample + volumes: + - name: sample-volume + emptyDir: {} +``` + +Create PodDecoration `sample-pd` to upgrade selected pod +```shell +$ kubectl apply -f poddecoration.yaml +poddecoration.apps.kusionstack.io/sample-pd created +``` +The status of PodDecoration is updated, and one pod is injected with sidecar through recreate. +```shell +$ kubectl get pd +NAME EFFECTIVE MATCHED INJECTED UPDATED UPDATED_READY CURRENT_REVISION UPDATED_REVISION AGE +sample-pd true 3 1 1 1 sample-pd-9465f4c84 20s + +$ kubectl get pod +NAME READY STATUS RESTARTS AGE +foo-2gnnl 2/2 Running 0 15s +foo-2wnnf 1/1 Running 0 2m +foo-hqpx7 1/1 Running 0 2m + +$ kubectl get pd sample-pd -o yaml | grep -A20 status +status: + details: + - affectedReplicas: 3 + collaSet: foo + pods: + - name: foo-2gnnl + revision: sample-pd-9465f4c84 + - name: foo-2wnnf + escaped: true + - name: foo-hqpx7 + escaped: true + matchedPods: 3 + injectedPods: 1 + updatedPods: 1 + updatedReadyPods: 1 + updatedAvailablePods: 1 + isEffective: true + updatedRevision: sample-pd-9465f4c84 +``` + +## Update PodDecoration + +### Rolling update v1 + +Edit `sample-pd` to expand the upgrade scope. +```shell +$ kubectl edit pd sample-pd +``` + +```yaml +# poddecoration.yaml +# Edit updateStrategy to select instance-id in [0, 1, 2] +... +spec: + ... + updateStrategy: + rollingUpdate: + selector: + matchExpressions: + - key: collaset.kusionstack.io/instance-id + operator: In + values: + - "0" + - "1" + - "2" + template: + ... +``` + +All pods updated. +```shell +$ kubectl get pd +NAME EFFECTIVE MATCHED INJECTED UPDATED UPDATED_READY CURRENT_REVISION UPDATED_REVISION AGE +sample-pd true 3 3 3 3 sample-pd-9465f4c84 sample-pd-9465f4c84 3m + +$ kubectl get pod +NAME READY STATUS RESTARTS AGE +foo-2gnnl 2/2 Running 0 3m +foo-lftw8 2/2 Running 0 8s +foo-n57rr 2/2 Running 0 8s + +$ kubectl get pd sample-pd -o yaml | grep -A20 status +status: + currentRevision: sample-pd-9465f4c84 + details: + - affectedReplicas: 3 + collaSet: foo + pods: + - name: foo-2gnnl + revision: sample-pd-9465f4c84 + - name: foo-lftw8 + revision: sample-pd-9465f4c84 + - name: foo-n57rr + revision: sample-pd-9465f4c84 + matchedPods: 3 + injectedPods: 3 + updatedPods: 3 + updatedReadyPods: 3 + updatedAvailablePods: 3 + isEffective: true + currentRevision: sample-pd-9465f4c84 + updatedRevision: sample-pd-9465f4c84 +``` +### Rolling update v1 -> v2 + + +Update `sample-pd`'s sidecar container image and `updateStrategy`. +```shell +$ kubectl edit pd sample-pd +``` +```yaml +# poddecoration.yaml +# Update sidecar-a's image with ubuntu:22.10 +# Edit updateStrategy to select instance-id in [0] +... +spec: + ... + updateStrategy: + rollingUpdate: + selector: + - key: collaset.kusionstack.io/instance-id + operator: In + values: + - "0" + template: + ... + containers: + - injectPolicy: AfterPrimaryContainer + name: sidecar-a + image: ubuntu:22.10 + ... +``` +Pod `foo-2gnnl` in-place upgrade sidecar container image. +```shell +$ kubectl get pd +NAME EFFECTIVE MATCHED INJECTED UPDATED UPDATED_READY CURRENT_REVISION UPDATED_REVISION AGE +sample-pd true 3 3 1 1 sample-pd-9465f4c84 sample-pd-8697d4bf8c 6min + +$ kubectl get pod +NAME READY STATUS RESTARTS AGE +foo-2gnnl 2/2 Running 1 (12s ago) 6m +foo-lftw8 2/2 Running 0 3min +foo-n57rr 2/2 Running 0 3min + +$ kubectl get pod foo-2gnnl -o yaml | grep "image: ubuntu" + image: ubuntu:22.10 + +$ kubectl get pd sample-pd -o yaml | grep -A20 status +status: + details: + - affectedReplicas: 3 + collaSet: foo + pods: + - name: foo-2gnnl + revision: sample-pd-8697d4bf8c + - name: foo-lftw8 + revision: sample-pd-9465f4c84 + - name: foo-n57rr + revision: sample-pd-9465f4c84 + matchedPods: 3 + injectedPods: 3 + updatedPods: 1 + updatedReadyPods: 1 + updatedAvailablePods: 1 + isEffective: true + currentRevision: sample-pd-9465f4c84 + updatedRevision: sample-pd-8697d4bf8c +``` + + +# Features + +## Injection + +### Metadata +```yaml +apiVersion: apps.kusionstack.io/v1alpha1 +kind: PodDecoration +spec: + template: + metadata: + - patchPolicy: MergePatchJson + annotations: + cafe.sofastack.io/decoration-version: '[{"name":"sample-pd","version":"v2"}]' + - patchPolicy: Overwrite + labels: + custom.io/sidecar-version: "v2" + annotations: + cafe.sofastack.io/decoration-name: sample-pd +``` +`patchPolicy` is the injected policy, as follows: +- `Retain`: The original value of annotations and labels will be retained. +- `Overwrite`: The value of annotations and labels corresponding to the existing key will be overwritten. +- `MergePatchJson`: It only takes effect for annotation. If the key does not exist, the value will be written directly. Otherwise, the json value will be merged. + +For example: +```yaml +# Old pod metadata +metadata: + labels: + custom.io/sidecar-version: "v1" + annotations: + cafe.sofastack.io/decoration-version: '[{"name":"old-pd","version":"v1"}]' + +# After metadata injected +metadata: + labels: + custom.io/sidecar-version: "v2" + annotations: + cafe.sofastack.io/decoration-type: sample-pd + cafe.sofastack.io/decoration-version: '[{"name":"old-pd","version":"v1"}, {"name":"sample-pd","version":"v2"}]' +``` +### Primary Container + +```yaml +apiVersion: apps.kusionstack.io/v1alpha1 +kind: PodDecoration +spec: + template: + primaryContainers: + - targetPolicy: ByName + name: foo + image: foo:v2 + env: + - name: APP_NAME + value: foo + volumeMounts: + - name: sample-volume + mountPath: /vol/sample + volumes: + - name: sample-volume + emptyDir: {} +``` +Injection into the primary containers only supports limited fields: `image`, `env` and `volumeMounts`. + +`targetPolicy` indicates which existed container these configuration should inject into, as follows: +- `ByName`: Only inject containers matching `name`. +- `All`: Inject all primary containers. +- `First`: Inject into first primary container. +- `Last`: Inject into last primary container. + +### Sidecar Container + +```yaml +spec: + template: + containers: + - injectPolicy: AfterPrimaryContainer # Container injected policy, AfterPrimaryContainer or BeforePrimaryContainer + name: sidecar-a + image: ubuntu:22.04 + ... +``` +Inject a new sidecar container. Optional, it can be placed in front or behind the primary container. +### InitContainer + +```yaml +spec: + template: + initContainers: + - name: init + image: custom-init-image:v1 + ... +``` + +## Upgrade strategy +Coming soon... \ No newline at end of file diff --git a/kuperator_versioned_docs/version-v0.3/manuals/podtransitionrule.md b/kuperator_versioned_docs/version-v0.3/manuals/podtransitionrule.md new file mode 100644 index 00000000..de0f7d2f --- /dev/null +++ b/kuperator_versioned_docs/version-v0.3/manuals/podtransitionrule.md @@ -0,0 +1,220 @@ +--- +sidebar_position: 3 +--- + +# PodTransitionRule +In normal pod lifecycle, some phases are defined. For example, K8s Pods follow a defined lifecycle,starting in the `Pending` phase, moving through `Running` if at least one of its primary containers starts `OK`, and then through either the `Succeeded` or `Failed` phases depending on whether any container in the Pod terminated in failure. + +These phase definitions can fulfill basic Pod change scenarios, but it are ambiguous. +Actually, before pod upgrade or ready, it is necessary to have some check mechanisms in place to ensure the safety of pod changes. Fortunately, [PodOpsLifecycle](../concepts/podopslifecycle.md) extends and supports some check stages: `PreCheck` before pod upgrade and `PostCheck` before pod ready. + +To ensure a more fine-grained and controlled change process for Pods, we introduce custom rules or perform additional tasks as prerequisites for state transitions before the desired state of a Pod is achieved. Similar to the Pod `readinessGates`, where certain conditions must be met for a Pod to be considered readiness. For example, we consider a Pod ready for the `PostCheck` phase only if it has specific labels. For this purpose, we introduce the `PodTransitionRule` as a prerequisite for the state transition of a Pod. + +## Rule Definition + +You can use `PodTransitionRule` to define a set of transition rules for your workload pods. +Each rule will be executed at the corresponding stage, and it will be blocked if the conditions are not met. + +Here is an example: +```yaml +apiVersion: apps.kusionstack.io/v1alpha1 +kind: PodTransitionRule +metadata: + name: podtransitionrule-sample +spec: + rules: + - availablePolicy: + maxUnavailableValue: 50% + name: maxUnavailable + - stage: PreCheck # stages are supported by PodOpsLifecycle. Defaults to PreCheck. + labelCheck: + requires: + matchLabels: + app.custom/ready: 'true' + name: labelCheck + - stage: PostCheck + webhook: + clientConfig: + url: https://1.1.1.1:8089/post-stop + caBundle: Cg== + poll: + url: http://1.1.1.1:8089/fetch-result + rawQueryKey: task-id # URL parameter key to carry trace ID when fetching result. Defaults to task-id in form 'QueryUrl=URL?rawQueryKey=' + intervalSeconds: 5 + timeoutSeconds: 60 + failurePolicy: Fail + parameters: + - key: podIP + valueFrom: + fieldRef: + fieldPath: status.podIP + name: webhookCheck + selector: # select pods in effect + matchLabels: + app: foo +``` + + +### Available Policy +An `availablePolicy` rule defines the availability strategy during the Pod update process. + +#### maxUnavailable +```yaml +availablePolicy: + maxUnavailable: + value: 50% # int or string +``` + +`maxUnavailableValue` is the maximum number of pods that can be unavailable during the update. +Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). +Absolute number is calculated from percentage by rounding down. +This can not be 0. + +#### minAvailable +```yaml +availablePolicy: + minAvailable: + value: 5 # int or string +``` +`minAvailableValue` is the minimum number of pods that should be available during the update. + +### Label Check + +A `labelCheck` rule is used to check if labels are satisfied. +You can define your own labels as change check conditions and modify the labels according to your needs. +```yaml +labelCheck: + requires: + matchLabels: + app.custom/ready: 'true' + matchExpressions: + - key: app.custom/forbidden + operator: DoesNotExist +``` + +### Webhook +A `webhook` is an HTTP callback, based on which a external web application can determine whether a pod can pass this check. + +* An HTTP POST occurs first when pods entries the configured stage which defaults PreCheck. +* If `poll` is provided, this rule then keeps calling polling url to fetch a long running job result. This job can be located by `task-id` returned from the response of the first request. + + +```yaml +webhook: + clientConfig: # custom server config + url: https://1.1.1.1:8089/post-stop + caBundle: Cg== + poll: + url: http://1.1.1.1:8089/fetch-result + rawQueryKey: task-id + intervalSeconds: 5 + timeoutSeconds: 60 + failurePolicy: Fail + parameters: + - key: podIP + valueFrom: + fieldRef: + fieldPath: status.podIP +``` +**Protocol without poll** + +Request: +```json +// URL: https://1.1.1.1:8089/post-stop +// Method: POST + +{ + "traceId": "", // is generated by Kuperator, which can be used to track request + "stage": "PreTrafficOff", + "ruleName": "webhookCheck", + "resources": [ // Information of Pods which are in this stage + { + "apiVersion": "v1", + "kind": "Pod", + "name": "pod-a", + "parameters": { + "podIP": "1.0.0.1" // Customized information users can indicate from rule paramter + } + }, + { + "apiVersion": "v1", + "kind": "Pod", + "name": "pod-b", + "parameters": { + "podIP": "1.0.0.2" + } + } + ] +} +``` +Response: +```json +{ + "success": false, + "message": "msg", + "finishedNames": ["pod-a", "pod-b"] +} +``` +Response `success` indicating all pods approved or not. If it's `false`, the `finishedNames` field can be used to approve partial pods. + +**Protocol with poll** + +Request: +```json +// URL: https://1.1.1.1:8089/post-stop +// Method: POST + +{ + "traceId": "", // is generated by Kuperator, which can be used to track request + "stage": "PreTrafficOff", + "ruleName": "webhookCheck", + "resources": [ // Information of Pods which are in this stage + { + "apiVersion": "v1", + "kind": "Pod", + "name": "pod-a", + "parameters": { + "podIP": "1.0.0.1" // Customized information users can indicate from rule paramter + } + }, + { + "apiVersion": "v1", + "kind": "Pod", + "name": "pod-b", + "parameters": { + "podIP": "1.0.0.2" + } + } + ] +} +``` + +Response: + +```json +{ + "success": true, + "poll": true, // required to indicate polling calls is necessary + "taskId": , // required to to fetch polling result + "message": "msg" +} +``` +Response `success` indicating whether the first request is success or not. If true and field `poll` in response is `true` (or field `async` in response is `true`), PodTransisionRule will then begin to keep calling poll URL to fetch process result. +Field `taskId` is required for polling. + +The request for polling is GET method and in form of `QueryUrl=URL?task-id=`. The parameter key in this URL defaults `task-id`, if using `poll` in above response. It would be `trace-id` if using `async` in above response. +Users can also indicate the key by field `poll.rawQueryKey`. + +The response from polling call is expected like following: + +```json +{ + "success": true, + "message": "msg", + "finished": false, + "finishedNames": ["pod-a", "pod-b"] +} +``` + +`success` is supposed to be true, if there is no error. If all pods is approved, `finished` should be `true`. +If `finished` is `false`, `finishedNames` can be used to allow partial pods to be approved. diff --git a/kuperator_versioned_docs/version-v0.3/manuals/resourceconsist.md b/kuperator_versioned_docs/version-v0.3/manuals/resourceconsist.md new file mode 100644 index 00000000..19aa12f6 --- /dev/null +++ b/kuperator_versioned_docs/version-v0.3/manuals/resourceconsist.md @@ -0,0 +1,437 @@ +--- +sidebar_position: 2 +--- + +# ResourceConsist +[**ResourceConsist**](https://github.com/KusionStack/resourceconsist/blob/main/README.md) aims to make a customized controller can be realized easily, and offering the ability of following +**PodOpsLifecycle** for controllers. + +## Tutorials +**kusionstack.io/resourceconsit** mainly consists of frame, experimental/adapters and adapters. + +The frame, ```kusionstack.io/resourceconsist/pkg/frame```, is used for adapters starting a controller, which handles +Reconcile and Employer/Employees' spec&status. If you wrote an adapter in your own repo, you can import +```kusionstack.io/resourceconsist/pkg/frame/controller``` and ```kusionstack.io/resourceconsist/pkg/frame/webhook```, +]and call AddToMgr to start a controller. + +>webhookAdapter is only necessary to be implemented for controllers following PodOpsLifecycle. + +```go +package main + +import ( + controllerframe "kusionstack.io/resourceconsist/pkg/frame/controller" + webhookframe "kusionstack.io/resourceconsist/pkg/frame/webhook" +) + +func main() { + controllerframe.AddToMgr(manager, yourOwnControllerAdapter) + webhookframe.AddToMgr(manager, yourOwnWebhookAdapter) +} +``` +### adapters +The adapters, ```kusionstack.io/resourceconsist/pkg/adapters```, consists of built-in adapters. You can start a +controller with built-in adapters just calling AddBuiltinControllerAdaptersToMgr and AddBuiltinWebhookAdaptersToMgr, +passing built-in adapters' names. Currently, an aliababacloudslb adapter has released. You can use it as follows: +```go +import ( + "kusionstack.io/resourceconsist/pkg/adapters" +) + +func main() { + adapters.AddBuiltinControllerAdaptersToMgr(manager, []adapters.AdapterName{adapters.AdapterAlibabaCloudSlb}) + adapters.AddBuiltinWebhookAdaptersToMgr(manager, []adapters.AdapterName{adapters.AdapterAlibabaCloudSlb}) +} +``` +Built-in adapters can also be used like how frame used. You can call NewAdapter from a certain built-in adapter pkg +and the call frame.AddToMgr to start a controller/webhook + +More built-in adapters will be implemented in the future. To make this repo stable, all new built-in adapters will +be added to ```kusionstack.io/resourceconsist/pkg/experimental/adapters``` first, and then moved to +```kusionstack.io/resourceconsist/pkg/adapters``` until ready to be released. +#### alibabacloudslb adapter +```pkg/adapters/alibabacloudslb``` is an adapter that implements ReconcileAdapter. It follows **PodOpsLifecycle** to +handle various scenarios during pod operations, such as creating a new pod, deleting an existing pod, or handling +changes to pod configurations. This adapter ensures minimal traffic loss and provides a seamless experience for users +accessing services load balanced by Alibaba Cloud SLB. + +In ```pkg/adapters/alibabacloudslb```, the real server is removed from SLB before pod operation in ACK. The LB +management and real server management are handled by CCM in ACK. Since alibabacloudslb adapter follows PodOpsLifecycle +and real servers are managed by CCM, ReconcileLifecycleOptions should be implemented. If the cluster is not in ACK or +CCM is not working in the cluster, the alibabacloudslb controller should implement additional methods of ReconcileAdapter. +### experimental/adapters +The experimental/adapters is more like a pre-release pkg for built-in adapters. Usage of experimental/adapters is same +with built-in adapters, and be aware that **DO NOT USE EXPERIMENTAL/ADAPTERS IN PRODUCTION** +### demo adapter +A demo is implemented in ```resource_controller_suite_test.go```. In the demo controller, the employer is represented +as a service and is expected to have the following **DemoServiceStatus**: +``` +DemoServiceStatus{ + EmployerId: employer.GetName(), + EmployerStatuses: DemoServiceDetails{ + RemoteVIP: "demo-remote-VIP", + RemoteVIPQPS: 100, + } +} +``` +The employee is represented as a pod and is expected to have the following **DemoPodStatus**: +``` +DemoPodStatus{ + EmployeeId: pod.Name, + EmployeeName: pod.Name, + EmployeeStatuses: PodEmployeeStatuses{ + Ip: string, + Ipv6: string, + LifecycleReady: bool, + ExtraStatus: PodExtraStatus{ + TrafficOn: bool, + TrafficWeight: int, + }, + } +} +``` +The DemoResourceProviderClient is a fake client that handles backend provider resources related to the employer/employee +(service/pods). In the Demo Controller, ```demoResourceVipStatusInProvider``` and ```demoResourceRsStatusInProvider``` +are mocked as resources in the backend provider. + +How the demo controller adapter realized will be introduced in detail as follows, +```DemoControllerAdapter``` was defined, including a kubernetes client and a resourceProviderClient. What included in +the Adapter struct can be defined as needed. +```go +type DemoControllerAdapter struct { + client.Client + resourceProviderClient *DemoResourceProviderClient +} +``` +Declaring that the DemoControllerAdapter implemented ```ReconcileAdapter``` and ```ReconcileLifecycleOptions```. +Implementing ```RconcileAdapter``` is a must action, while ```ReconcileLifecycleOptions``` isn't, check the remarks +for ```ReconcileLifecycleOptions``` in ```kusionstack.io/resourceconsist/pkg/frame/controller/types.go``` to find why. +```go +var _ ReconcileAdapter = &DemoControllerAdapter{} +var _ ReconcileLifecycleOptions = &DemoControllerAdapter{} +``` +Following two methods for DemoControllerAdapter inplementing ```ReconcileLifecycleOptions```, defines whether +DemoControllerAdapter following PodOpsLifecycle and need record employees. +```go +func (r *DemoControllerAdapter) FollowPodOpsLifeCycle() bool { + return true +} + +func (r *DemoControllerAdapter) NeedRecordEmployees() bool { + return needRecordEmployees +} +``` +```IEmployer``` and ```IEmployee``` are interfaces that includes several methods indicating the status employer and +employee. +```go +type IEmployer interface { + GetEmployerId() string + GetEmployerStatuses() interface{} + EmployerEqual(employer IEmployer) (bool, error) +} + +type IEmployee interface { + GetEmployeeId() string + GetEmployeeName() string + GetEmployeeStatuses() interface{} + EmployeeEqual(employee IEmployee) (bool, error) +} + +type DemoServiceStatus struct { + EmployerId string + EmployerStatuses DemoServiceDetails +} + +type DemoServiceDetails struct { + RemoteVIP string + RemoteVIPQPS int +} + +type DemoPodStatus struct { + EmployeeId string + EmployeeName string + EmployeeStatuses PodEmployeeStatuses +} +``` +```GetSelectedEmployeeNames``` returns all employees' names selected by employer, here is pods' names selected by +service. ```GetSelectedEmployeeNames``` is used for ensuring LifecycleFinalizer and ExpectedFinalizer, so you can give +it an empty return if your adapter doesn't follow PodOpsLifecycle. +```go +func (r *DemoControllerAdapter) GetSelectedEmployeeNames(ctx context.Context, employer client.Object) ([]string, error) { + svc, ok := employer.(*corev1.Service) + if !ok { + return nil, fmt.Errorf("expect employer kind is Service") + } + selector := labels.Set(svc.Spec.Selector).AsSelectorPreValidated() + var podList corev1.PodList + err := r.List(ctx, &podList, &client.ListOptions{Namespace: svc.Namespace, LabelSelector: selector}) + if err != nil { + return nil, err + } + + selected := make([]string, len(podList.Items)) + for idx, pod := range podList.Items { + selected[idx] = pod.Name + } + + return selected, nil +} +``` +```GetExpectedEmployer``` and ```GetCurrentEmployer``` defines what is expected under the spec of employer and what is +current status, like the load balancer from a cloud provider. Here in the demo adapter, expected is defined by hardcode +and current is retrieved from a fake resource provider ```demoResourceVipStatusInProvider```. +```go +func (r *DemoControllerAdapter) GetExpectedEmployer(ctx context.Context, employer client.Object) ([]IEmployer, error) { + if !employer.GetDeletionTimestamp().IsZero() { + return nil, nil + } + var expect []IEmployer + expect = append(expect, DemoServiceStatus{ + EmployerId: employer.GetName(), + EmployerStatuses: DemoServiceDetails{ + RemoteVIP: "demo-remote-VIP", + RemoteVIPQPS: 100, + }, + }) + return expect, nil +} + +func (r *DemoControllerAdapter) GetCurrentEmployer(ctx context.Context, employer client.Object) ([]IEmployer, error) { + var current []IEmployer + + req := &DemoResourceVipOps{} + resp, err := r.resourceProviderClient.QueryVip(req) + if err != nil { + return current, err + } + if resp == nil { + return current, fmt.Errorf("demo resource vip query resp is nil") + } + + for _, employerStatus := range resp.VipStatuses { + current = append(current, employerStatus) + } + return current, nil +} +``` +```CreateEmployer/UpdateEmployer/DeleteEmployer``` handles creation/update/deletion of resources related to employer on +related backend provider. Here in the demo adapter, ```CreateEmployer/UpdateEmployer/DeleteEmployer``` handles +```demoResourceVipStatusInProvider```. +```go +func (r *DemoControllerAdapter) CreateEmployer(ctx context.Context, employer client.Object, toCreates []IEmployer) ([]IEmployer, []IEmployer, error) { + if toCreates == nil || len(toCreates) == 0 { + return toCreates, nil, nil + } + + toCreateDemoServiceStatus := make([]DemoServiceStatus, len(toCreates)) + for idx, create := range toCreates { + createDemoServiceStatus, ok := create.(DemoServiceStatus) + if !ok { + return nil, toCreates, fmt.Errorf("toCreates employer is not DemoServiceStatus") + } + toCreateDemoServiceStatus[idx] = createDemoServiceStatus + } + + _, err := r.resourceProviderClient.CreateVip(&DemoResourceVipOps{ + VipStatuses: toCreateDemoServiceStatus, + }) + if err != nil { + return nil, toCreates, err + } + return toCreates, nil, nil +} + +func (r *DemoControllerAdapter) UpdateEmployer(ctx context.Context, employer client.Object, toUpdates []IEmployer) ([]IEmployer, []IEmployer, error) { + if toUpdates == nil || len(toUpdates) == 0 { + return toUpdates, nil, nil + } + + toUpdateDemoServiceStatus := make([]DemoServiceStatus, len(toUpdates)) + for idx, update := range toUpdates { + updateDemoServiceStatus, ok := update.(DemoServiceStatus) + if !ok { + return nil, toUpdates, fmt.Errorf("toUpdates employer is not DemoServiceStatus") + } + toUpdateDemoServiceStatus[idx] = updateDemoServiceStatus + } + + _, err := r.resourceProviderClient.UpdateVip(&DemoResourceVipOps{ + VipStatuses: toUpdateDemoServiceStatus, + }) + if err != nil { + return nil, toUpdates, err + } + return toUpdates, nil, nil +} + +func (r *DemoControllerAdapter) DeleteEmployer(ctx context.Context, employer client.Object, toDeletes []IEmployer) ([]IEmployer, []IEmployer, error) { + if toDeletes == nil || len(toDeletes) == 0 { + return toDeletes, nil, nil + } + + toDeleteDemoServiceStatus := make([]DemoServiceStatus, len(toDeletes)) + for idx, update := range toDeletes { + deleteDemoServiceStatus, ok := update.(DemoServiceStatus) + if !ok { + return nil, toDeletes, fmt.Errorf("toDeletes employer is not DemoServiceStatus") + } + toDeleteDemoServiceStatus[idx] = deleteDemoServiceStatus + } + + _, err := r.resourceProviderClient.DeleteVip(&DemoResourceVipOps{ + VipStatuses: toDeleteDemoServiceStatus, + }) + if err != nil { + return nil, toDeletes, err + } + return toDeletes, nil, nil +} +``` +```GetExpectedEmployee```and```GetCurrentEmployee``` defines what is expected under the spec of employer and employees +and what is current status, like real servers under the load balancer from a cloud provider. Here in the demo adapter, +expected is calculated from pods and current is retrieved from a fake resource provider ```demoResourceRsStatusInProvider```. +```go +// GetExpectEmployeeStatus return expect employee status +func (r *DemoControllerAdapter) GetExpectedEmployee(ctx context.Context, employer client.Object) ([]IEmployee, error) { + if !employer.GetDeletionTimestamp().IsZero() { + return []IEmployee{}, nil + } + + svc, ok := employer.(*corev1.Service) + if !ok { + return nil, fmt.Errorf("expect employer kind is Service") + } + selector := labels.Set(svc.Spec.Selector).AsSelectorPreValidated() + + var podList corev1.PodList + err := r.List(ctx, &podList, &client.ListOptions{Namespace: svc.Namespace, LabelSelector: selector}) + if err != nil { + return nil, err + } + + expected := make([]IEmployee, len(podList.Items)) + expectIdx := 0 + for _, pod := range podList.Items { + if !pod.DeletionTimestamp.IsZero() { + continue + } + status := DemoPodStatus{ + EmployeeId: pod.Name, + EmployeeName: pod.Name, + } + employeeStatuses, err := GetCommonPodEmployeeStatus(&pod) + if err != nil { + return nil, err + } + extraStatus := PodExtraStatus{} + if employeeStatuses.LifecycleReady { + extraStatus.TrafficOn = true + extraStatus.TrafficWeight = 100 + } else { + extraStatus.TrafficOn = false + extraStatus.TrafficWeight = 0 + } + employeeStatuses.ExtraStatus = extraStatus + status.EmployeeStatuses = employeeStatuses + expected[expectIdx] = status + expectIdx++ + } + + return expected[:expectIdx], nil +} + +func (r *DemoControllerAdapter) GetCurrentEmployee(ctx context.Context, employer client.Object) ([]IEmployee, error) { + var current []IEmployee + req := &DemoResourceRsOps{} + resp, err := r.resourceProviderClient.QueryRealServer(req) + if err != nil { + return current, err + } + if resp == nil { + return current, fmt.Errorf("demo resource rs query resp is nil") + } + + for _, rsStatus := range resp.RsStatuses { + current = append(current, rsStatus) + } + return current, nil +} +``` +```CreateEmployees/UpdateEmployees/DeleteEmployees``` handles creation/update/deletion of resources related to employee +on related backend provider. Here in the demo adapter, ```CreateEmployees/UpdateEmployees/DeleteEmployees``` +handles ```demoResourceRsStatusInProvider```. +```go +func (r *DemoControllerAdapter) CreateEmployees(ctx context.Context, employer client.Object, toCreates []IEmployee) ([]IEmployee, []IEmployee, error) { + if toCreates == nil || len(toCreates) == 0 { + return toCreates, nil, nil + } + toCreateDemoPodStatuses := make([]DemoPodStatus, len(toCreates)) + + for idx, toCreate := range toCreates { + podStatus, ok := toCreate.(DemoPodStatus) + if !ok { + return nil, toCreates, fmt.Errorf("toCreate is not DemoPodStatus") + } + toCreateDemoPodStatuses[idx] = podStatus + } + + _, err := r.resourceProviderClient.CreateRealServer(&DemoResourceRsOps{ + RsStatuses: toCreateDemoPodStatuses, + }) + if err != nil { + return nil, toCreates, err + } + + return toCreates, nil, nil +} + +func (r *DemoControllerAdapter) UpdateEmployees(ctx context.Context, employer client.Object, toUpdates []IEmployee) ([]IEmployee, []IEmployee, error) { + if toUpdates == nil || len(toUpdates) == 0 { + return toUpdates, nil, nil + } + + toUpdateDemoPodStatuses := make([]DemoPodStatus, len(toUpdates)) + + for idx, toUpdate := range toUpdates { + podStatus, ok := toUpdate.(DemoPodStatus) + if !ok { + return nil, toUpdates, fmt.Errorf("toUpdate is not DemoPodStatus") + } + toUpdateDemoPodStatuses[idx] = podStatus + } + + _, err := r.resourceProviderClient.UpdateRealServer(&DemoResourceRsOps{ + RsStatuses: toUpdateDemoPodStatuses, + }) + if err != nil { + return nil, toUpdates, err + } + + return toUpdates, nil, nil +} + +func (r *DemoControllerAdapter) DeleteEmployees(ctx context.Context, employer client.Object, toDeletes []IEmployee) ([]IEmployee, []IEmployee, error) { + if toDeletes == nil || len(toDeletes) == 0 { + return toDeletes, nil, nil + } + + toDeleteDemoPodStatuses := make([]DemoPodStatus, len(toDeletes)) + + for idx, toDelete := range toDeletes { + podStatus, ok := toDelete.(DemoPodStatus) + if !ok { + return nil, toDeletes, fmt.Errorf("toDelete is not DemoPodStatus") + } + toDeleteDemoPodStatuses[idx] = podStatus + } + + _, err := r.resourceProviderClient.DeleteRealServer(&DemoResourceRsOps{ + RsStatuses: toDeleteDemoPodStatuses, + }) + if err != nil { + return nil, toDeletes, err + } + + return toDeletes, nil, nil +} +``` diff --git a/kuperator_versioned_docs/version-v0.3/started/_category_.json b/kuperator_versioned_docs/version-v0.3/started/_category_.json new file mode 100644 index 00000000..877a378f --- /dev/null +++ b/kuperator_versioned_docs/version-v0.3/started/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Getting Started", + "position": 2 +} diff --git a/kuperator_versioned_docs/version-v0.3/started/demo-graceful-operation.md b/kuperator_versioned_docs/version-v0.3/started/demo-graceful-operation.md new file mode 100644 index 00000000..6eb1fce9 --- /dev/null +++ b/kuperator_versioned_docs/version-v0.3/started/demo-graceful-operation.md @@ -0,0 +1,340 @@ +# Using KusionStack Kuperator to operate Pods gracefully + +Applications always provide its service along with traffic routing. +On Kubernetes, they should be a set of Pods and a corresponding Kubernetes Service resource to expose the service. + +However, during operations such as updating Pod revisions, +there is a risk that client request traffic may be lost. This can lead to a poor user experience for developers. + +This tutorial will demonstrate how to operate Pods gracefully in a KusionStack Kuperator way on Aliyun ACK +with SLB as a Service backend provider. + +> You can also get the same point from [this video](https://www.bilibili.com/video/BV1n8411q7sP/?t=15.7), +> which shows the same case using both KusionStack Kusion and Kuperator. +> The sample used in this video can be found from [KusionStack Catalog](https://github.com/KusionStack/catalog/tree/main/models/samples/wordpress). + +## Preparing + +First, ensure that you have an Aliyun ACK Kubernetes cluster set up in order to provision an Aliyun SLB. + +Next, install KusionStack Kuperator on this Kubernetes cluster +following [installation doc](https://kusionstack.io/docs/kuperator/started/install). + +## Get started + +### Create a new namespace + +To begin, create a new namespace for this tutorial: + +```shell +$ kubectl create ns kuperator-tutorial +``` + +### Provision Pods and Services + +You can create a set of Pods to run up a demo application service +by creating CollaSet resource using following command: + +``` shell +echo ' +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: server +spec: + replicas: 3 + selector: + matchLabels: + app: server + template: + metadata: + labels: + app: server + spec: + containers: + - image: wu8685/echo:1.3 + name: server + command: + - /server + resources: + limits: + cpu: "0.1" + ephemeral-storage: 100Mi + memory: 100Mi + requests: + cpu: "0.1" + ephemeral-storage: 100Mi + memory: 100Mi + readinessProbe: + httpGet: + path: /healthz + port: 8080 + initialDelaySeconds: 5 + periodSeconds: 3 +' | kubectl -n kuperator-tutorial apply -f - +``` + +There should be 3 Pods created. + +```shell +$ kubectl -n kuperator-tutorial get pod +NAME READY STATUS RESTARTS AGE +server-c5lsr 1/1 Running 0 2m23s +server-p6wrx 1/1 Running 0 2m23s +server-zn62c 1/1 Running 0 2m23s +``` + +Then create a Kubernetes Service by running following command, +which will provision Aliyun SLB to expose service. + +```shell +echo ' +apiVersion: v1 +kind: Service +metadata: + annotations: + service.beta.kubernetes.io/alibaba-cloud-loadbalancer-spec: slb.s1.small + service.beta.kubernetes.io/backend-type: eni + labels: + kusionstack.io/control: "true" # this label is required + name: server +spec: + ports: + - port: 80 + protocol: TCP + targetPort: 8080 + selector: + app: server + type: LoadBalancer +' | kubectl -n kuperator-tutorial apply -f - +``` + +A service with external IP should be provisioned. + +```shell +$ kubectl -n kuperator-tutorial get svc server +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +server LoadBalancer 192.168.225.55 47.101.49.182 80:30146/TCP 51s +``` + +The label `kusionstack.io/control: "true"` on Service is very important. +It means this service resource will be recognized by ResourceConsist framework, and then participate in PodOpsLifecycle +to control the Aliyun SLB to switch off traffic before updating each Pod and switch on traffic after it finished, +in order to protect the service. + +### Provision a client + +Then we will provision a client to access the service we created before. +Please replace `` in the following CollaSet yaml with the external IP from Kubernetes Service created above, and apply again. + +```shell +echo ' +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: client +spec: + replicas: 1 + selector: + matchLabels: + app: client + template: + metadata: + labels: + app: client + spec: + containers: + - image: wu8685/echo:1.3 + name: nginx + command: + - /client + args: + - -url + - http:///echo # EXTERNAL_IP should be replaced + - -m + - POST + - d + - kuperator-tutorial + - -qps + - "10" + - -worker + - "10" + - -timeout + - "10000" + resources: + limits: + cpu: "0.1" + ephemeral-storage: 1Gi + memory: 100Mi + requests: + cpu: "0.1" + ephemeral-storage: 1Gi + memory: 100Mi +' | kubectl -n kuperator-tutorial apply -f - +``` + +A client Pod should be created. + +```shell +$ kubectl -n kuperator-tutorial get pod +NAME READY STATUS RESTARTS AGE +client-nc426 1/1 Running 0 30s +server-c5lsr 1/1 Running 0 19m +server-p6wrx 1/1 Running 0 19m +server-zn62c 1/1 Running 0 19m +``` + +This client will continuously access the service using the configuration provided in the command. +You can monitor the response codes from its logs: + +```shell +kubectl -n kuperator-tutorial logs -f client-nc426 +worker-0 another loop, request: 50, failed: 0 +worker-1 another loop, request: 50, failed: 0 +worker-0 another loop, request: 50, failed: 0 +worker-1 another loop, request: 50, failed: 0 +worker-0 another loop, request: 50, failed: 0 +worker-1 another loop, request: 50, failed: 0 +worker-0 another loop, request: 50, failed: 0 +worker-1 another loop, request: 50, failed: 0 +``` + +The accesses are all successful. + +### Update Pod revision + +To trigger a Pod revision update, run the following command +to edit the container image and command in the PodTemplate of CollaSet: + +```shell +echo ' +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: server +spec: + replicas: 3 + selector: + matchLabels: + app: server + template: + metadata: + labels: + app: server + spec: + containers: + - image: wu8685/echo:1.2 + name: server + command: + - /app/echo + resources: + limits: + cpu: "0.1" + ephemeral-storage: 100Mi + memory: 100Mi + requests: + cpu: "0.1" + ephemeral-storage: 100Mi + memory: 100Mi + readinessProbe: + httpGet: + path: /healthz + port: 8080 + initialDelaySeconds: 5 + periodSeconds: 3 +' | kubectl -n kuperator-tutorial apply -f - +``` + +It will trigger all Pods updated simultaneously. So the application `server` has no Pod to serve. +We can observe the error from client logs. + +```shell +worker-1 fails to request POST http://47.101.49.182/echo : Post "http://47.101.49.182/echo": read tcp 10.244.1.11:54040->47.101.49.182:80: read: connection reset by peer +worker-0 fails to request POST http://47.101.49.182/echo : Post "http://47.101.49.182/echo": read tcp 10.244.1.11:34438->47.101.49.182:80: read: connection reset by peer +worker-1 fails to request POST http://47.101.49.182/echo : Post "http://47.101.49.182/echo": context deadline exceeded (Client.Timeout exceeded while awaiting headers) +worker-0 fails to request POST http://47.101.49.182/echo : Post "http://47.101.49.182/echo": context deadline exceeded (Client.Timeout exceeded while awaiting headers) +worker-1 fails to request POST http://47.101.49.182/echo : Post "http://47.101.49.182/echo": context deadline exceeded (Client.Timeout exceeded while awaiting headers) +worker-1 another loop, request: 20, failed: 3 +worker-0 fails to request POST http://47.101.49.182/echo : Post "http://47.101.49.182/echo": context deadline exceeded (Client.Timeout exceeded while awaiting headers) +worker-0 another loop, request: 20, failed: 3 +worker-1 fails to request POST http://47.101.49.182/echo : Post "http://47.101.49.182/echo": context deadline exceeded (Client.Timeout exceeded while awaiting headers) +``` + +### Provision PodTransistionRule + +To avoid this problem, provision a PodTransitionRule with a maxUnavailable 50% rule by running the following command: + +```shell +echo ' +apiVersion: apps.kusionstack.io/v1alpha1 +kind: PodTransitionRule +metadata: + labels: + name: server +spec: + rules: + - availablePolicy: + maxUnavailableValue: 50% + name: maxUnavailable + selector: + matchLabels: + app: server +' | kubectl -n kuperator-tutorial apply -f - +``` + +After updating the CollaSet of the server to trigger an update, you will see the Pods rolling update one by one, +ensuring that at least one Pod is always available to serve. + +```shell +kubectl -n kuperator-tutorial get pod +NAME READY STATUS RESTARTS AGE +client-rrfbj 1/1 Running 0 25s +server-457sn 0/1 Running 0 5s +server-bd5sz 0/1 Running 0 5s +server-l842s 1/1 Running 0 2m4s +``` + +You can see from the client logs that no access requests fail during this update. + +```shell +worker-0 another loop, request: 50, failed: 0 +worker-1 another loop, request: 50, failed: 0 +worker-0 another loop, request: 50, failed: 0 +worker-1 another loop, request: 50, failed: 0 +worker-0 another loop, request: 50, failed: 0 +worker-1 another loop, request: 50, failed: 0 +worker-0 another loop, request: 50, failed: 0 +worker-0 another loop, request: 50, failed: 0 +worker-1 another loop, request: 50, failed: 0 +worker-1 another loop, request: 50, failed: 0 +worker-0 another loop, request: 50, failed: 0 +``` + +### Clean tutorial namespace + +At the end of this tutorial, you can clean up the resources by deleting the namespace: + +```shell +$ kubectl delete ns kuperator-tutorial +``` + +## Comparison with the Native Approach + +Kubernetes provides `preStop` and `postStart` hook in each container, by which users can also interact with service outside +Kubernetes like Aliyun SLB service. However, KusionStack Kuperator offers several advantages: + +* Pod level vs Container level + +Kuperator offers a Pod level hooks which have more complete information than one container, +especially there are several containers in one Pod. + +* Plugin-able + +Through KusionStack Kuperator, you can decouple operations executed before or after Pods actually change. +For example, traffic control can be added or removed without modifying the Pod's preStop configuration. + +* Rollback option + +In case of issues, rollback becomes a viable option when using the Kuperator approach to update Pods. +Since Kuperator does not modify the Pods or their containers during the update, +if the traffic service experiences problems, there is an opportunity to cancel the update. \ No newline at end of file diff --git a/kuperator_versioned_docs/version-v0.3/started/install.md b/kuperator_versioned_docs/version-v0.3/started/install.md new file mode 100644 index 00000000..1ac6a89c --- /dev/null +++ b/kuperator_versioned_docs/version-v0.3/started/install.md @@ -0,0 +1,55 @@ +--- +sidebar_position: 2 +--- + +# Installation + +## Install with helm +KusionStack Kuperator requires **Kubernetes version >= 1.18** +```shell +# Firstly add charts repository if you haven't do this. +$ helm repo add kusionstack https://kusionstack.github.io/charts + +# To update the kusionstack repo. +$ helm repo update kusionstack + +# Install the latest version. +$ helm install kuperator kusionstack/kuperator +``` + + +[Helm](https://github.com/helm/helm) is a tool for managing packages of pre-configured Kubernetes resources. +### Optional: chart parameters + +The following table lists the configurable parameters of the chart and their default values. + +| Parameter | Description | Default | +|-------------|----------------|----------------| +| `namespace` | namespace for Kuperator installation | `kusionstack-system` | +| `namespaceEnabled` | Whether to create the installation.namespace | `true` | +| `managerReplicas`| Replicas of Kuperator deployment | `3` | +| `image.repo` | Repository for kuperator image | `kusionstack/kuperator`| +| `image.pullPolicy`| Image pull policy for kuperator-manager container | `IfNotPresent` | +| `image.tag` | Tag for kuperator-manager image | `v0.1.0` | +| `resources.limits.cpu` | CPU resource limit of kuperator-manager container | `500m` | +| `resources.limits.memory` | Memory resource limit of kuperator-manager container | `128Mi` | +| `resources.requests.cpu` | CPU resource request of kuperator-manager container | `10m` | +| `resources.requests.memory` | Memory resource request of kuperator-manager container | `64Mi` | + +### Upgrade + +Run following command to upgrade KusionStack Kuperator to the latest version. + +```shell +# Upgrade to the latest version +$ helm upgrade kuperator kusionstack/kuperator +``` + +### Uninstall + +Run following command to uninstall KusionStack Kuperator. + +```shell +# Uninstall +$ helm uninstall kuperator +``` \ No newline at end of file diff --git a/kuperator_versioned_docs/version-v0.4/concepts/_category_.json b/kuperator_versioned_docs/version-v0.4/concepts/_category_.json new file mode 100644 index 00000000..1d3167d4 --- /dev/null +++ b/kuperator_versioned_docs/version-v0.4/concepts/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Concepts", + "position": 3 +} diff --git a/kuperator_versioned_docs/version-v0.4/concepts/podopslifecycle.md b/kuperator_versioned_docs/version-v0.4/concepts/podopslifecycle.md new file mode 100644 index 00000000..88068133 --- /dev/null +++ b/kuperator_versioned_docs/version-v0.4/concepts/podopslifecycle.md @@ -0,0 +1,232 @@ +--- +sidebar_position: 2 +--- + +# PodOpsLifecycle + +## Background + +Kubernetes provides a set of default controllers for workload management, such as StatefulSet, Deployment, and DaemonSet, which are responsible for Pod operations. +Meanwhile, application users may also have some services outside the Kubernetes cluster that are closely related to the Pod Lifecycle, including traffic routing, service discovery, or alert monitoring. +However, they face challenges in participating in the operational lifecycle of a Pod, even if they are connected to Kubernetes by developing a controller that watches the Pods. + +PodOpsLifecycle aims to offer Kubernetes administrators and developers finer-grained control over the entire lifecycle of a Pod. +It enables developers to execute necessary actions before, during, and after specific phases of a Pod operation. +For instance, removing the Pod's IP from the traffic route before initiating the Pod operation, performing the actual Pod operations, and adding it back after the Pod operation is completed to achieve a smooth and graceful Pod operation, and prevent any traffic loss. + +## Introduction + +In PodOpsLifecycle, participants are classified into two roles: `operation controllers` and `cooperation controllers`. +- **Operation controllers** are responsible for operating Pods, such as Deployments and StatefulSets from Kubernetes, and CollaSets from Kuperator which intend to scale, update, or recreate Pods. +- **Cooperation controllers** are sensitive with Pod status. They handle resources or configurations around Pods, which may include traffic controller, alert monitoring controller, etc. These controllers typically reconcile Kubernetes resources around Pods with external services, such as sync Pod IPs with the LB provider, or maintaining Pods' metadata with application monitoring system. + +The two types of controllers do not need to be aware of each other. All controllers are organized by PodOpsLifecycle. Additionally, KusionStack Kuperator introduces extra phases around the native Kubernetes Pod Lifecycle: ServiceAvailable, Preparing, and Completing. + +![pod-ops-lifecycle](/img/kuperator/concepts/podopslifecycle/pod-ops-lifecycle.png) + +- **Completing**: After a Pod is created or updated and becomes ready, Kuperator marks its PodOpsLifecycle as the `Completing` phase. During this phase, the Pod is in a ready condition, prompting cooperation controllers to perform actions such as registering the Pod IP in the traffic route. Once all cooperation controllers complete their tasks, Kuperator sets the PodOpsLifecycle to the `ServiceAvailable` phase. +- **ServiceAvailable**: This phase indicates that the Pod is in a normal state and ready to serve. If everything goes smoothly, the Pod remains in the `ServiceAvailable` phase until the next operation. +- **Preparing**: When an operation controller needs to operate the Pod, it triggers a new PodOpsLifecycle. The Pod then transitions from the `ServiceAvailable` phase to the `Preparing` phase. During this phase, the Pod is initially marked as Unready by setting ReadinessGate to false. All cooperation controllers then begin preparing tasks, such as removing the Pod's IP from the traffic route. After completing these tasks, the Pod enters the `Operating` phase. +- **Operating**: If a Pod enters the `Operating` phase, it is expected to accept any kind of operation without any damage, including recreation, scaling-in, upgrading, etc. Operation controllers are permitted to apply any changes to this Pod. Once all these operations are completed, the Pod advances to the next phase — `Completing`, and the PodOpsLifecycle continues. + +The PodOpsLifecycle detail and the relationship with Kubernetes native Pod Lifecycle is showed by following sequence diagram. + +![pod-ops-lifecycle-sequence-diagram](/img/kuperator/concepts/podopslifecycle/pod-ops-lifecycle-sequence-diagram.png) + +## Developer's Guide + +This section introduces how to develop operation controllers and cooperation controllers to interact with PodOpsLifecycle. +- The operation controller is responsible for a set of Pod operation tasks. KusionStack Kuperator has already provided various types of operation controllers. Users only need to develop a new operation controller if a new kind of Pod operation needs to be added. +- The cooperation controller participates in PodOpsLifecycle before and after operating on a Pod, such as the Traffic controller, alert monitoring controller, and other controllers responsible for maintaining the Pod and application status. Users should develop a new cooperation controller only when there is a new type of service or status around the Pod that needs to be maintained, such as integrating with a new traffic provider. + +### Operation Controller + +The operation controller is responsible for Pod operations. The tasks that an operation controller needs to perform during PodOpsLifecycle include triggering a PodOpsLifecycle, checking whether the Pod has entered the Operating phase, performing Pod operations, and marking Pod operations as finished. These actions interacting with PodOpsLifecycle are provided in the package `kusionstack.io/kuperator/pkg/controllers/utils/podopslifecycle/utils.go`. + +A simple operation controller reconcile method would look like this: + +```go +import ( + "context" + + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/client" + + "kusionstack.io/kuperator/pkg/controllers/utils/podopslifecycle" +) + +var operationAdapter = &OperationOpsLifecycleAdapter{} + +type OperationOpsLifecycleAdapter struct { +} + +// GetID indicates ID of the PodOpsLifecycle +func (a *OperationOpsLifecycleAdapter) GetID() string { + return "new-id" +} + +// GetType indicates type for this Operation Controller +func (a *OperationOpsLifecycleAdapter) GetType() podopslifecycle.OperationType { + return "new-type" +} + +// AllowMultiType indicates whether multiple IDs which have the same Type are allowed +func (a *OperationOpsLifecycleAdapter) AllowMultiType() bool { + return true +} + +// WhenBegin is a hook, which will be executed when begin a lifecycle +func (a *OperationOpsLifecycleAdapter) WhenBegin(pod client.Object) (bool, error) { + return false, nil +} + +// WhenFinish is a hook, which will be executed when finish a lifecycle +func (a *OperationOpsLifecycleAdapter) WhenFinish(pod client.Object) (bool, error) { + return false, nil +} + +...... +func (r *PodOperationReconciler) Reconcile(ctx context.Context, req reconcile.Request) (ctrl.Result, error) { + // get the Pod + pod := &corev1.Pod{} + if err := r.Get(ctx, req.NamespacedName, pod); err != nil { + if !errors.IsNotFound(err) { + return reconcile.Result{}, err + } + return reconcile.Result{}, nil + } + + // check if the Pod needs operation + if !r.needOperation(pod) { + return reconcile.Result{}, nil + } + + // if PodOpsLifecycle has not been triggered, trigger it + if !podopslifecycle.IsDuringOps(OpsLifecycleAdapter, pod) { + if updated, err := podopslifecycle.Begin(r, operationAdapter, pod); err != nil { + return reconcile.Result{}, err + } else if updated { + return reconcile.Result{}, nil + } + } + + // waiting until Pod enters operating phase + if _, allowed := podopslifecycle.AllowOps(operationAdapter, 0, pod); !allowed { + return reconcile.Result{}, nil + } + + // do operation works + if completed := r.doPodOperation(pod); !completed { + return reconcile.Result{}, nil + } + + // after operation works completed, finish operating phase to continue PodOpsLifecycle + if _, err := podopslifecycle.Finish(r, operationAdapter, pod); err != nil { + return reconcile.Result{}, err + } +} +``` + +### Pod Cooperation Controller + +There are two ways to develop a cooperation controller. +One way is to develop a controller using the controller runtime and adhering to some conventions of PodOpsLifecycle and Kubernetes. +Another way is to take the use of [ResourceConsist](https://github.com/KusionStack/resourceconsist) framework provided by KusionStack, which can be referenced from its [documentation](https://www.kusionstack.io/docs/kuperator/manuals/resourceconsist). + +The following outlines the first approach. + +```go +import ( + "context" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + k8spod "k8s.io/kubernetes/pkg/api/v1/pod/util.go" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + appsv1alpha1 "kusionstack.io/kuperator/apis/apps/v1alpha1" +) + +const ( + // Finalizer needs to have prefix: `prot.podopslifecycle.kusionstack.io`. + // KusionStack Kuperator keeps this prefix back-compatible, + // so that it can be hard code to decouple with KusionStack Kuperator. + finalizerPrefix = appsv1alpha1.PodOperationProtectionFinalizerPrefix + + protectionFinalizer = finalizerPrefix + "/" + "unique-id" +) + +...... +func (r *PodResourceReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { + // get the Pod + pod := &corev1.Pod{} + if err := r.Get(ctx, req.NamespacedName, pod); err != nil { + if !errors.IsNotFound(err) { + return reconcile.Result{}, err + } + return reconcile.Result{}, nil + } + + if k8spod.IsPodReady(pod) { + // do resource reconcile like add Pod IP to traffic route + r.trafficOn(pod.status.PodIP) + // It is important to add a unique finalizer on this Pod + return reconcile.Result{}, r.addFinalizer(ctx, pod, protectionFinalizer) + } + + if !k8spod.IsPodReady(pod) { + // do resource reconcile like remove Pod IP from traffic route + r.trafficOff(pod.status.PodIP) + // It is important to remove the unique finalizer from this Pod + return reconcile.Result{}, r.removeFinalizer(ctx, pod, protectionFinalizer) + } +} + +func (r *PodResourceReconciler) addFinalizer(ctx context.Context, pod *corev1.Pod, finalizer string) error { + if controllerutil.ContainsFinalizer(pod, finalizer) { + return nil + } + + controllerutil.AddFinalizer(pod, finalizer) + return r.Update(ctx, pod) +} + +func (r *PodResourceReconciler) removeFinalizer(ctx context.Context, pod *corev1.Pod, finalizer string) error { + if !controllerutil.ContainsFinalizer(pod, finalizer) { + return nil + } + + controllerutil.RemoveFinalizer(pod, finalizer) + return r.Update(ctx, pod) +} +``` + +## Key Features + +### Concurrency Support + +PodOpsLifecycle in KusionStack Kuperator supports concurrency. +It means PodOpsLifecycle is able to organize and track multi controllers operating the same pod at the same time. +For example, when a controller is going to update Pod, other controllers are allowed to do other operations at the same time, like delete, restart, recreate it, +although the result may not be meaningful. + +### General Workload Support + +PodOpsLifecycle offers seamless integration with various workload types, including Deployment and StatefulSet. +To enable this functionality, ensure the feature gate for `GraceDeleteWebhook` is enabled when starting the KusionStack Kuperator controller: + +```shell +# Enable the GraceDeleteWebhook feature when starting the controller with this argument +$ /manager --feature-gates=GraceDeleteWebhook=true +``` + +Once enabled, any Pod labeled with `kusionstack.io/control=true` under a general workload, such as Deployment, becomes manageable by PodOpsLifecycle. +This feature provides workloads a way to work closer with Pod Cooperation Controllers. + +> Due to the Kubernetes webhook mechanism, the following error will be returned when workloads or users delete a pod. This error is intentional and serves to indicate that the pod deletion process has started and is being managed by PodOpsLifecycle. +> ```shell +> $ kubectl -n default delete pod collaset-sample-74fsv +> Error from server (failed to validate GraceDeleteWebhook, pod deletion process is underway and being managed by PodOpsLifecycle): admission webhook "validating-pod.apps.kusionstack.io" denied the request: failed to validate GraceDeleteWebhook, pod deletion process is underway and being managed by PodOpsLifecycle +> ``` \ No newline at end of file diff --git a/kuperator_versioned_docs/version-v0.4/introduction/_category_.json b/kuperator_versioned_docs/version-v0.4/introduction/_category_.json new file mode 100644 index 00000000..537bad9b --- /dev/null +++ b/kuperator_versioned_docs/version-v0.4/introduction/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Introduction", + "position": 0 +} diff --git a/kuperator_versioned_docs/version-v0.4/introduction/introduction.md b/kuperator_versioned_docs/version-v0.4/introduction/introduction.md new file mode 100644 index 00000000..5adf228c --- /dev/null +++ b/kuperator_versioned_docs/version-v0.4/introduction/introduction.md @@ -0,0 +1,49 @@ +# What is KusionStack Kuperator? + +KusionStack Kuperator consists of workloads and operators built on Kubernetes Custom Resource Definitions, +with a primary aim of bridging the gap between platform development and Kubernetes. + +By keeping more operation works finished in Kubernetes layer, +KusionStack Kuperator reduces complexity when interacting with Kubernetes +and enhances convenience for platform developers. + +## Key features + +KusionStack Kuperator currently provides the following features, +streamlining application operations when developing platforms based on Kubernetes: + +### Fine-grained operation + +KusionStack Kuperator introduces PodOpsLifecycle to extend native Pod lifecycle with additional phases such as PreCheck, Preparing, etc. +All operators within KusionStack Kuperator will respect PodOpsLifecycle, +so that PodOpsLifecycle is able to orchestrate all of these operators to operate each Pod coordinately. + +### Advanced workloads + +KusionStack Kuperator offers several workloads to ensure it is convenient and effective to delivery and operate application resources. + +Recently, Kuperator provides the workload CollaSet. +Besides the basic ability of scaling and updating Pods like Deployment and StatefulSet of Kubernetes, +CollaSet also provides a range of scale and update strategies, +like in-place update with container image and pod revision consistency. + +### Streamlined Pod Operation + +KusionStack Kuperator introduces resource consist framework that offers a graceful way +to integrate resource management around Pods, including traffic control, into the PodOpsLifecycle. +This simplifies the works for platform developers dealing with Pod operation details. +KusionStack also integrates some resources by default, such as Aliyun SLB. + +### Risk management + +Building upon the PodOpsLifecycle, KusionStack Kuperator introduces the workload named PodTransitionRule +which will keep risks of pod operation under control. +By providing a MaxUnavailable rule similar to Kubernetes' PodDisruptionBudget (PDB), +it ensures there are always enough Pods available for service. +Furthermore, it allows for custom rules through extension via webhooks and label hooks. + +## Future works + +KusionStack Kuperator project is currently in its early stages. +Our goal is to simplify platform development. We will continue building in areas such as application operations, +observability, and insight. We hope the Kuperator will make it easier for you to build platforms. \ No newline at end of file diff --git a/kuperator_versioned_docs/version-v0.4/manuals/_category_.json b/kuperator_versioned_docs/version-v0.4/manuals/_category_.json new file mode 100644 index 00000000..795f138a --- /dev/null +++ b/kuperator_versioned_docs/version-v0.4/manuals/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Manuals", + "position": 4 +} diff --git a/kuperator_versioned_docs/version-v0.4/manuals/collaset.md b/kuperator_versioned_docs/version-v0.4/manuals/collaset.md new file mode 100644 index 00000000..620fb9c6 --- /dev/null +++ b/kuperator_versioned_docs/version-v0.4/manuals/collaset.md @@ -0,0 +1,972 @@ +--- +sidebar_position: 1 +--- + +# CollaSet +CollaSet is responsible for managing a set of Pods. Similar to Kubernetes Deployment and StatefulSet, it also supports scaling and updating Pods. Additionally, CollaSet offers advanced features to provide users with more granular control over managing Pods. + +A basic CollaSet configuration is represented in the following YAML format: + +``` yaml +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: collaset-sample +spec: + replicas: 2 + selector: + matchLabels: + app: foo + template: + metadata: + labels: + app: foo + spec: + containers: + - image: nginx:1.25.2 + name: nginx +``` +Let's explore the features of CollaSet. + +## Basic Features +### Scaling Pods +CollaSet utilizes the field spec.replicas to indicate the number of Pods under management. + +``` yaml +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: collaset-sample +spec: + replicas: 3 # indicate the number of Pods to manage + selector: + matchLabels: + app: foo + template: + metadata: + labels: + app: foo + spec: + containers: + - image: nginx:1.25.2 + name: nginx +... +``` +Pods can be provisioned by CollaSet. + +``` shell +$ kubectl -n default apply -f ./config/samples/apps_v1alpha1_collaset.yaml +collaset.apps.kusionstack.io/collaset-sample created + +$ kubectl -n default get pod +NAME READY STATUS RESTARTS AGE +collaset-sample-85q7g 1/1 Running 0 57s +collaset-sample-vx5ws 1/1 Running 0 57s +collaset-sample-hr7pv 1/1 Running 0 57s + +$ kubectl -n default get cls +NAME DESIRED CURRENT UPDATED UPDATED_READY UPDATED_AVAILABLE CURRENT_REVISION UPDATED_REVISION AGE +collaset-sample 3 3 3 3 3 collaset-sample-6d7b7c58f collaset-sample-6d7b7c58f 64s +``` + +By default, CollaSet always creates new Pods using the latest template specified in `spec.template`. CollaSet establishes ownership over a set of Pods through the label selector defined in `spec.selector`. Thus, it's important to ensure that the labels provided in `spec.selector` match those in `spec.template.metadata.labels`. + +CollaSet status provides general information about this CollaSet and all Pods under it. + +``` shell +$ kubectl -n default get cls collaset-sample -o yaml +...... +status: + availableReplicas: 3 + collisionCount: 0 + conditions: + - lastTransitionTime: "2023-09-01T03:56:09Z" + reason: Updated + status: "True" + type: Update + currentRevision: collaset-sample-6d7b7c58f + observedGeneration: 1 + operatingReplicas: 0 + readyReplicas: 3 + replicas: 3 + scheduledReplicas: 3 + updatedAvailableReplicas: 3 + updatedReadyReplicas: 3 + updatedReplicas: 3 + updatedRevision: collaset-sample-6d7b7c58f +``` + +Some fields in CollaSet status are explained here: + +`updatedRevision` indicates the latest revision that CollaSet uses to create or update Pods. + +`currentRevision` indicates the last updated revision. It will be set to updatedRevision after all Pods are updated, and their PodReady conditions become True. + +`replicas` indicates the count of Pods under this CollaSet. + +`scheduledReplicas` indicates the count of Pods under this CollaSet that successfully got scheduled. + +`availableReplicas` indicates the count of Pods under this CollaSet that have all expected finalizers attached. + +`updatedReplicas` indicates the count of Pods under this CollaSet that have the updated revision. + +`updatedReadyReplicas` indicates the count of Pods under this CollaSet that are counted in `updatedReplicas` and have their PodReady conditions set to True. + +`updatedAvailableReplicas` indicates the count of Pods under this CollaSet that is counted in `updatedReadyReplicas` and have all expected finalizers attached. + +### Updating Pods +CollaSet generates Pods according to the pod template described in `spec.template`. This template can be updated to signal CollaSet to update each owned Pod: + +``` shell +$ kubectl -n default edit cls collaset-sample +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: +...... +spec: +...... + template: + ...... + spec: + containers: + - image: nginx:1.24.0 # changed from nginx:1.25.2 +...... +``` + +CollaSet immediately updates all Pods it owns with the new Pod template by default. + +``` shell +$ kubectl -n default get pod -o yaml | grep "image: nginx" + - image: nginx:1.24.0 + - image: nginx:1.24.0 + - image: nginx:1.24.0 +``` + +The update progress can be controlled using partition. + +#### Partition +Similar to StatefulSet, `partition` is used to control the upgrade progress. + +By default, if not indicated, all Pods will be updated when spec.template changes. The `partition` can be adjusted from 0 to `spec.replicas` to specify how many Pods CollaSet should update. **Unlike StatefulSet, the partition in CollaSet represents the number of Pods to update.** + +Let's update the image back to nginx:1.25.2: + +``` shell +$ kubectl -n default edit cls collaset-sample +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: collaset-sample +spec: + template: + ...... + spec: + containers: + - image: nginx:1.25.2 # changed from nginx:1.24.0 + ... + updateStrategy: + rollingUpdate: + byPartition: + partition: 1 # use partition to control upgrade progress +``` + +In this case, CollaSet only updates 1 Pod to the updated revision. + +``` shell +$ kubectl -n default get pod -o yaml | grep "image: nginx" + - image: nginx:1.24.0 + - image: nginx:1.25.2 # only 1 Pod updated + - image: nginx:1.24.0 +``` + +#### Update by Label +By configuring the `byLabel` rolling update policy, users can precisely specify which Pods they want to update by using labels. + +If you go back to the sample in the [section Partition](#Partition) and change `byPartition` to `byLabel` like the following: + +``` shell +$ kubectl -n default edit cls collaset-sample +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: collaset-sample +spec: + ... + updateStrategy: + rollingUpdate: +- byPartition: +- partition: 1 ++ byLabel: {} +``` + +Subsequently, each Pod will only be updated if it's marked with the label `collaset.kusionstack.io/update-included`. + +## Advanced Features +### Pod Instance ID +Each Pod created by CollaSet has a unique ID held by the label `collaset.kusionstack.io/instance-id`, which can be used to identify each individual Pod. + +``` yaml +apiVersion: v1 +kind: Pod +metadata: + labels: + collaset.kusionstack.io/instance-id: "0" # Pod instance ID +... +``` + +CollaSet provides a context to specify an ID pool, which defaults to the same name as the CollaSet and is immutable. + +``` yaml +... +spec: + scaleStrategy: + context: +``` + +The same ID pool name can be indicated for multiple CollaSets, allowing them to share a single ID pool. Consequently, each Pod created by these CollaSets will be assigned a unique ID. + +For example, these are two CollaSets with the same context: + +``` shell +$ cat ~/sample.yaml +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: collaset-sample-a +spec: + replicas: 2 + scaleStrategy: + context: foo # with the same context foo + selector: + matchLabels: + app: foo + template: + metadata: + labels: + app: foo + spec: + containers: + - image: nginx:1.25.2 + name: nginx +--- + +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: collaset-sample-b +spec: + replicas: 2 + scaleStrategy: + context: foo # with the same context foo + selector: + matchLabels: + app: foo + template: + metadata: + labels: + app: foo + spec: + containers: + - image: nginx:1.25.2 + name: nginx +``` + +Then create these CollaSets with the sample file: + +``` shell +$ kubectl -n default apply -f ~/sample.yaml +collaset.apps.kusionstack.io/collaset-sample-a created +collaset.apps.kusionstack.io/collaset-sample-b created + +$ kubectl -n default get pod +NAME READY STATUS RESTARTS AGE +collaset-sample-a-g4sjj 1/1 Running 0 42s +collaset-sample-a-ph9vc 1/1 Running 0 42s +collaset-sample-b-fqkq4 1/1 Running 0 42s +collaset-sample-b-lqg8f 1/1 Running 0 42s + +$ kubectl -n default get pod -o yaml | grep collaset.kusionstack.io/instance-id + collaset.kusionstack.io/instance-id: "0" + collaset.kusionstack.io/instance-id: "1" + collaset.kusionstack.io/instance-id: "3" + collaset.kusionstack.io/instance-id: "2" +``` + +Now, the 4 Pods created by these 2 CollaSets will have a unique instance ID. + +### Revision Consistency +Pods within a CollaSet can utilize more than two different Pod templates simultaneously, including both the current and updated revisions. This can result from partial updates. To ensure the stability of Pod revisions over time, CollaSet records this information. When a Pod is deleted, CollaSet recreates it using its previous revision. + +It can be reproduced by following steps: + +1. Provision a new CollaSet with replicas 3. + +``` shell +$ kubectl -n default apply -f ./config/samples/apps_v1alpha1_collaset.yaml +collaset.apps.kusionstack.io/collaset-sample created + +$ kubectl get pod +NAME READY STATUS RESTARTS AGE +collaset-sample-5tgcs 1/1 Running 0 4s +collaset-sample-glgnb 1/1 Running 0 4s +collaset-sample-qs46r 1/1 Running 0 4s + +$ kubectl -n default get cls +NAME DESIRED CURRENT UPDATED UPDATED_READY UPDATED_AVAILABLE CURRENT_REVISION UPDATED_REVISION AGE +collaset-sample 3 3 3 3 3 collaset-sample-6d7b7c58f collaset-sample-6d7b7c58f 64s +``` + +2. Update the image of PodTemplate of the CollaSet to image nginx:1.24.0 and set the partition to 2. Then there will be 2 Pods with image nginx:1.24.0 and 1 Pod with image nginx:1.25.2. + +``` shell +$ kubectl -n default edit cls collaset-sample +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: collaset-sample +spec: + template: + ...... + spec: + containers: + - image: nginx:1.24.0 # changed from nginx:1.25.2 + ... + updateStrategy: + rollingUpdate: + byPartition: + partition: 2 # update 2 Pods + +# Wait until these 2 Pods are updated, and check the Pod's images. +$ kubectl get pod -o yaml | grep "image: nginx" + - image: nginx:1.25.2 + - image: nginx:1.24.0 + - image: nginx:1.24.0 +``` + +3. Update the image of PodTemplate of the CollaSet to image nginx:1.23.4 and set the partition to 1. + +``` shell +$ kubectl -n default edit cls collaset-sample +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: collaset-sample +spec: + template: + ...... + spec: + containers: + - image: nginx:1.23.4 # changed from nginx:1.24.0 + ... + updateStrategy: + rollingUpdate: + byPartition: + partition: 1 # update 1 Pod + +# Wait until the Pod is updated, and check the Pod's images. +$ kubectl get pod -o yaml | grep "image: nginx" + - image: nginx:1.25.2 + - image: nginx:1.24.0 # Pod collaset-sample-qs46r + - image: nginx:1.23.4 +``` + +Now, there are 3 Pods, each of which has an individual image. If we then delete the Pod with the image nginx:1.24.0, the new Pod replacing it will be created with the same image nginx:1.24.0 in order for the Pod to inherit the revision. + +``` shell +$ kubectl delete -n default delete pod collaset-sample-qs46r +pod "collaset-sample-qs46r" deleted + +$ kubectl get pod +NAME READY STATUS RESTARTS AGE +collaset-sample-5tgcs 1/1 Running 0 3h +collaset-sample-ht9x6 1/1 Running 0 2m27s # Pod recreated +collaset-sample-qs46r 1/1 Running 1 (3h ago) 3h + +$ kubectl get pod -o yaml | grep "image: nginx" + - image: nginx:1.25.2 + - image: nginx:1.24.0 # image has not been changed + - image: nginx:1.23.4 +``` + +### In-Place Update Pod +In addition to the `Recreate` update policy, which is identical to Deployment and StatefulSet, CollaSet offers the `InPlaceIfPossible` update policy. + +``` yaml +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: collaset-sample +spec: + ... + updateStrategy: + podUpgradePolicy: InPlaceIfPossible # Options: InPlaceIfPossible, Recreate, Replace +``` + +`InPlaceIfPossible` is the default value, which instructs CollaSets to try to update Pods in place when only container images, labels, and annotations have changed. If some other fields have changed too, the policy will back off to the `Recreate` policy. + +`Recreate` indicates CollaSets always delete the old Pod and create a new one with an updated revision. + +If update pod template with `InPlaceIfPossible` policy as following example, the Pod will not be recreated. + +``` shell +$ kubectl -n default edit cls collaset-sample +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: collaset-sample +spec: + template: + ...... + spec: + containers: + - image: nginx:1.24.0 # changed from nginx:1.25.2 + ... + updateStrategy: + podUpgradePolicy: InPlaceIfPossible # use InPlaceIfPossible policy + +$ kubectl -n default get pod -o yaml | grep "image: nginx" + - image: nginx:1.24.0 + - image: nginx:1.24.0 + - image: nginx:1.24.0 + +$ kubectl -n default get pod +NAME READY STATUS RESTARTS AGE +collaset-sample-5wvlh 1/1 Running 1 (6s ago) 2m10s +collaset-sample-ldvrg 1/1 Running 1 (6s ago) 2m10s +collaset-sample-pbz75 1/1 Running 1 (6s ago) 2m10s +``` + +### Replace Update Pod + +CollaSet provides the `Replace` policy for certain applications that are sensitive to the available number of Pods. + +``` yaml +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: collaset-sample +spec: + ... + updateStrategy: + podUpgradePolicy: Replace # Options: InPlaceIfPossible, Recreate, Replace +``` + +The `Replace` policy indicates that CollaSet should update a Pod by creating a new one to replace it. +Unlike the `Recreate` policy, which deletes the old Pod before creating a new updated one, or the `InPlaceIfPossible` policy, which updates the current Pod in place, +the `Replace` policy first creates a new Pod with the updated revision. It then deletes the old Pod once the new one becomes available for service. + +```shell +# Before updating CollaSet +$ kubectl -n default get pod +NAME READY STATUS RESTARTS AGE +collaset-sample-dwkls 1/1 Running 0 6m55s + +# After updating CollaSet, the updated Pod is created first +$ kubectl -n default get pod +NAME READY STATUS RESTARTS AGE +collaset-sample-dwkls 1/1 Running 0 6m55s +collaset-sample-rcmbv 0/1 ContainerCreating 0 0s + +# Once the created Pod is available for service, the old Pod will be deleted +$ kubectl -n default get pod +NAME READY STATUS RESTARTS AGE +collaset-sample-rcmbv 1/1 Running 0 1s +collaset-sample-dwkls 1/1 Terminating 0 7m12s +``` + +The two Pods will have a pair of labels to identify their relationship. The new Pod will have the label `collaset.kusionstack.io/replace-pair-origin-name` to indicate the name of the old Pod, and the old Pod will have the label `collaset.kusionstack.io/replace-pair-new-id` to indicate the instance ID of the new Pod. + +Additionally, the new Pod and old Pod will each begin their own PodOpsLifecycles, which are independent of each other. + + +### Selective Pod Deletion +When scaling down CollSet, users may want to delete specified pods instead of delete pods randomly. +ColloSet supports specifying a set of Pod names within the ```spec.scaleStrategy.podToDelete``` for recreating or scaling in specified pods. + +```yaml +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: sample +spec: + replicas: 2 + scaleStrategy: + podToDelete: # replace or scaleIn listed pods + - podName1 + - podName2 + # ... +``` +When user specifies a set of pods: +1. On the one hand, if ```replicas``` is scaled down simultaneously, CollaSet will scale down pods listed in ```podToDelete``` first. +2. On the other hand, if ```replicas``` is not scale down, pods listed in ```podToDelete``` will be recreated, and new pods will inherit origin pods' ```instance-id```. + +Note that, by default, controller will clear the pod name once CollaSet cannot find the pod specified in ```podToDelete```. +Users can disable clear pod name after pod deletion by disabling the feature ```ReclaimPodToDelete``` to false (the default value is true). + +```shell +# Disable the ReclaimPodToDelete feature when starting the controller with this argument +$ /manager --feature-gates=ReclaimPodToDelete=false +``` + +### Recreate And Replace Pod by Label + +In practice, users often need to recreate or replace specified Pods under a CollaSet. + +To delete a Pod, users can simply call the Kubernetes API, like executing `kubectl delete pod `. +However, this will bypass the [PodOpsLifecycle](https://www.kusionstack.io/docs/kuperator/concepts/podopslifecycle) Mechanism. +We provide following two options: + +1. Enable the feature `GraceDeleteWebhook` so that it is possible to delete Pods through `PodOpsLifecycle`. +```shell +# Enable the GraceDeleteWebhook feature when starting the controller with this argument +$ /manager --feature-gates=GraceDeleteWebhook=true +``` +```shell +$ kubectl -n default get pod +NAME READY STATUS RESTARTS AGE +collaset-sample-vqccr 1/1 Running 0 21s + +# Delete the pod directly. A message will respond indicating that the Pod deletion is handled by PodOpsLifecycle +kubectl -n default delete pod collaset-sample-vqccr +Error from server (failed to validate GraceDeleteWebhook, pod deletion process is underway and being managed by PodOpsLifecycle): admission webhook "validating-pod.apps.kusionstack.io" denied the request: failed to validate GraceDeleteWebhook, pod deletion process is underway and being managed by PodOpsLifecycle + +# The old Pod is deleted, and a new Pod will be created +$ kubectl -n default get pod -w +collaset-sample-vqccr 1/1 Running 0 71s +collaset-sample-vqccr 1/1 Terminating 0 71s +...... +collaset-sample-nbl6t 0/1 Pending 0 0s +collaset-sample-nbl6t 0/1 ContainerCreating 0 0s +...... +collaset-sample-nbl6t 1/1 Running 0 0s +``` +2. Label the Pod with `podopslifecycle.kusionstack.io/to-delete`, so that CollaSet will delete the Pod through PodOpsLifecycle. + +```shell +# Label Pod +$ kubectl -n default label pod collaset-sample-nbl6t podopslifecycle.kusionstack.io/to-delete=true + +# The old Pod is deleted, and a new Pod will be recreated +$ kubectl -n default get pod -w +collaset-sample-nbl6t 1/1 Running 0 5m28s +collaset-sample-nbl6t 1/1 Terminating 0 5m28s +...... +collaset-sample-w6x69 0/1 Pending 0 0s +...... +collaset-sample-w6x69 0/1 ContainerCreating 0 0s +...... +collaset-sample-w6x69 1/1 Running 0 2s +``` + +Recreating a Pod will delete the old Pod first and then create a new one. This will affect the available Pod count. +To avoid this, CollaSet provides a feature to replace Pods by labeling them with `podopslifecycle.kusionstack.io/to-replace`. + +```shell +# Replace Pod by label +$ kubectl -n echo label pod collaset-sample-w6x69 podopslifecycle.kusionstack.io/to-replace=true + +# The old Pod is deleted, and a new Pod will be created +$ kubectl -n default get pod -w +collaset-sample-w6x69 1/1 Running 0 5m29s +collaset-sample-74fsv 0/1 Pending 0 0s +collaset-sample-74fsv 0/1 ContainerCreating 0 0s +...... +collaset-sample-74fsv 1/1 Running 0 2s +...... +collaset-sample-w6x69 0/1 Terminating 0 5m33s +``` + + +### Supporting PVCs +CollaSet introduces support for PVCs, allowing user to declare `VolumeClaimTemplates` to create PVCs for each Pod. +Furthermore, in response to common issues with PVCs management, such as high modification costs and difficult control, CollaSet extends its functionality with the following advantages vs. StatefulSet: + +1. Support update, add and delete on `volumeClaimTemplates`. +2. Provide control over PVC lifecycle. + +#### Provision PVCs +The `collaset-pvc.yaml` file declares a CollaSet with `VolumeClaimTemplates` to provision a PVC with `1Gi` storage for each Pod. +These PVCs are then mounted on the container at the path `/path/mount/www`. + +``` yaml +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: foo +spec: + replicas: 2 + selector: + matchLabels: + app: foo + template: + metadata: + labels: + app: foo + spec: + containers: + - image: nginx:1.25 + name: nginx + volumeMounts: + - mountPath: /path/mount/www # path to mount PVC + name: www + volumeClaimTemplates: + - metadata: + name: www + spec: + storageClassName: standard + volumeMode: Filesystem + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: 1Gi +``` + +Pods and PVCs can be provisioned by CollaSet. + +``` shell +$ kubectl -n default apply -f collaset-pvc.yaml +collaset.apps.kusionstack.io/foo created + +$ kubectl -n default get pod +NAME READY STATUS RESTARTS AGE +foo-pw5lg 1/1 Running 0 4s +foo-5n6ts 1/1 Running 0 4s + +$ kubectl -n default get pvc +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +foo-www-h5zv7 Bound pvc-8a7d8ea0-ced0-423a-9255-bedfad0f2db6 1Gi RWO standard 7s +foo-www-lswp2 Bound pvc-9564b44b-9c99-467b-abee-4285183ff9c3 1Gi RWO standard 7s +``` + +Each Pod and its related PVC have the same value of label `collaset.kusionstack.io/instance-id`. + +``` shell +$ kubectl -n default get pod -o yaml | grep instance-id + collaset.kusionstack.io/instance-id: "1" + collaset.kusionstack.io/instance-id: "0" + +$ kubectl -n default get pvc -o yaml | grep instance-id + collaset.kusionstack.io/instance-id: "1" + collaset.kusionstack.io/instance-id: "0" +``` + +#### Update PVCs +To save the operating costs of PVCs, i.e. expand storage capacity, CollaSet supports update, add and delete on `volumeClaimTemplates`. + +To achieve this, for each PVC, CollaSet calculates a hash value based on its template, and attatch it to label `collaset.kusionstack.io/pvc-template-hash`. +Once users modify the templates, CollaSet recognizes, caculates a new hash value and attach it on new PVCs to replace old ones. + +Let's give it a try, update the storage of PVC template from `1Gi` to `2Gi`. +``` shell +$ kubectl -n default edit cls foo + ...... + volumeClaimTemplates: + - metadata: + name: www + spec: + storageClassName: standard + volumeModes: Filesystem + accessModes: [ "ReadWriteOnce" ] + resources: + requests: +- storage: 1Gi ++ storage: 2Gi # update pvc template to expand storage +...... +``` + +There are 2 new PVCs with `2Gi` storage created with different hash values. + +``` shell +$ kubectl -n default edit cls foo +collaset.apps.kusionstack.io/foo edited + +$ kubectl -n default get pod +NAME READY STATUS RESTARTS AGE +foo-pw5lg 1/1 Terminating 0 7s +foo-5n6ts 1/1 Terminating 0 7s +foo-9nhz4 0/1 Pending 0 1s +foo-xb2gd 0/1 Pending 0 1s + +$ kubectl -n default get pvc +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +foo-www-h5zv7 Terminating pvc-8a7d8ea0-ced0-423a-9255-bedfad0f2db6 1Gi RWO standard 11s +foo-www-lswp2 Terminating pvc-9564b44b-9c99-467b-abee-4285183ff9c3 1Gi RWO standard 11s +foo-www-cj2s9 Bound pvc-647e2a81-7fc6-4f37-a835-e63da9172de3 2Gi RWO standard 5s +foo-www-hp2t6 Bound pvc-03d7536e-cd3f-465f-bd30-362a9510f0c9 2Gi RWO standard 5s + +$ kubectl -n default get pvc -o yaml | grep pvc-template-hash + collaset.kusionstack.io/pvc-template-hash: 594d8857f9 # hash value of old pvc + collaset.kusionstack.io/pvc-template-hash: 594d8857f9 + collaset.kusionstack.io/pvc-template-hash: d78c5ff6b # hash value of new pvc + collaset.kusionstack.io/pvc-template-hash: d78c5ff6b +``` + +For old Pvcs, users can retain them by configuring `whenScaled` policy to `Retain` . +Then old PVCs can be re-mount on its related Pod after rolling back. +Otherwise, old PVCs can be deleted by default policy `Delete`. + + +#### Add PVCs +Add a PVC template `yyy`, which is mounted on the container at the path `/path/mount/yyy`. + +``` shell +$ kubectl -n default edit cls foo +...... + spec: + containers: + - image: nginx:1.25 + name: nginx + volumeMounts: + - mountPath: /path/mount/www # path to mount PVC + name: www ++ - mountPath: /path/mount/yyy # path to mount PVC ++ name: yyy + volumeClaimTemplates: + - metadata: + name: www + spec: + storageClassName: standard + volumeMode: Filesystem + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: 2Gi ++ - metadata: # added pvc template ++ name: yyy ++ spec: ++ storageClassName: standard ++ volumeMode: Filesystem ++ accessModes: [ "ReadWriteOnce" ] ++ resources: ++ requests: ++ storage: 2Gi +``` + +Now, each pod has two PVCs, which include a new PVCs claimed by template `yyy` and one old PVC claimed by template `www`. + +``` shell +$ kubectl -n default edit cls foo +collaset.apps.kusionstack.io/foo edited + +$ kubectl -n default get pod +NAME READY STATUS RESTARTS AGE +foo-8wwsz 0/1 Pending 0 1s +foo-9nhz4 1/1 Terminating 0 23s +foo-hd2cv 0/1 Pending 0 1s +foo-xb2gd 1/1 Terminating 0 23s + +$ kubectl -n default get pvc +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +foo-www-cj2s9 Bound pvc-647e2a81-7fc6-4f37-a835-e63da9172de3 2Gi RWO standard 25s +foo-www-hp2t6 Bound pvc-03d7536e-cd3f-465f-bd30-362a9510f0c9 2Gi RWO standard 25s +foo-yyy-c68nh Bound pvc-94ee5eff-2350-4cb7-8411-85f0928d25fc 2Gi RWO standard 3s # new pvc +foo-yyy-vpwss Bound pvc-8363dc78-3340-47d0-aa11-0adac36308d5 2Gi RWO standard 3s # new pvc +``` + +#### Delete PVCs +Delete the PVC template `yyy` on CollaSet. + +``` shell +$ kubectl -n default edit cls foo +...... + spec: + containers: + - image: nginx:1.25 + name: nginx + volumeMounts: + - mountPath: /path/mount/www # path to mount PVC + name: www +- - mountPath: /path/mount/yyy # path to mount PVC +- name: yyy + volumeClaimTemplates: + - metadata: + name: www + spec: + storageClassName: standard + volumeMode: Filesystem + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: 2Gi +- - metadata: # delete pvc template +- name: yyy +- spec: +- storageClassName: standard +- volumeMode: Filesystem +- accessModes: [ "ReadWriteOnce" ] +- resources: +- requests: +- storage: 2Gi +``` + +Now, PVCs claimed by template `yyy` are deleted and the origin PVCs claimed by template `www` are retained. + +``` shell +$ kubectl -n default edit cls foo +collaset.apps.kusionstack.io/foo edited + +$ kubectl -n default get pod +NAME READY STATUS RESTARTS AGE +foo-6qcpc 1/1 Running 0 2s +foo-z2jqv 1/1 Running 0 2s +foo-8wwsz 1/1 Terminating 0 38s +foo-hd2cv 1/1 Terminating 0 38s + +$ kubectl -n default get pvc +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +foo-www-cj2s9 Bound pvc-647e2a81-7fc6-4f37-a835-e63da9172de3 2Gi RWO standard 61s +foo-www-hp2t6 Bound pvc-03d7536e-cd3f-465f-bd30-362a9510f0c9 2Gi RWO standard 61s +foo-yyy-c68nh Terminating pvc-94ee5eff-2350-4cb7-8411-85f0928d25fc 2Gi RWO standard 39s +foo-yyy-vpwss Terminating pvc-8363dc78-3340-47d0-aa11-0adac36308d5 2Gi RWO standard 39s +``` + +#### PVC Retention Policy +CollaSet provides control over PVC lifecycle by configuring `spec.persistentVolumeClaimRetentionPolicy`. +Users can retain or delete PVCs after its related Pod is scaled down or CollaSet is deleted, respectively. +This feature is also supported by [StatefulSet](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention) since v1.27. +Basic rule is detailed as follows: +- `whenScale` : decides to delete or retain PVCs after Pod is scaled down. +- `whenDeleted`: decides to delete or retain PVCs after CollaSet is deleted. + +For each policy users can set the value to either Delete (by default) or Retain. +Note that for StatefulSet, the default policy is Retain. + +#### whenScaled +Apply `collaset-pvc.yaml` and edit foo to scale replicas to 1. +``` shell +$ kubectl apply -f collaset-pvc.yaml +collaset.apps.kusionstack.io/foo created + +$ kubectl edit cls foo + ...... + spec: +- replicas: 2 ++ replicas: 1 # scale in 1 pod + selector: + matchLabels: + app: foo + ...... +``` +As the `whenScaled` is not configured, thus its value is `Delete` by default. +Consequently, PVC `foo-www-wzwbq` is deleted as its related Pod `foo-tkc5m` is scaling down. + +``` shell +$ kubectl -n default edit cls foo +collaset.apps.kusionstack.io/foo edited + +$ kubectl -n default get pod +NAME READY STATUS RESTARTS AGE +foo-tkc5m 0/1 Terminating 0 27s # related pvc is terminating +foo-vwtcm 1/1 Running 0 27s + +$ kubectl -n default get pvc +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +foo-www-wzwbq Terminating pvc-b92c28c6-59ad-4976-810c-8d538c4a22c6 1Gi RWO standard 29s +foo-www-r4vlh Bound pvc-dd7f7cce-a3cb-4bba-a106-e5ad264959a2 1Gi RWO standard 29s +``` + +Set `Retain` to `whenScaled`, and scale replicas to 0. + +``` shell +$ kubectl -n default edit cls foo + ...... + spec: +- replicas: 1 ++ replicas: 0 # scale in 1 pod + selector: + matchLabels: + app: foo ++ scaleStrategy: ++ persistentVolumeClaimRetentionPolicy: ++ whenScaled: Retain # retain the pvc after pod is scaled down + ...... +``` + +Pod `foo-vwtcm` is terminating, while its related PVC `foo-www-r4vlh` is retained. + +``` shell +$ kubectl -n default edit cls foo +collaset.apps.kusionstack.io/foo edited + +$ kubectl -n default get pod +NAME READY STATUS RESTARTS AGE +foo-vwtcm -n default 1/1 Terminating 0 62s # related pvc is retained + +$ kubectl -n default get pvc +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +foo-www-r4vlh Bound pvc-dd7f7cce-a3cb-4bba-a106-e5ad264959a2 1Gi RWO standard 63s +``` + +To validate the retention policy, try ro scale replicas to 2, and the remaining PVC should be mounted again. + +``` shell +$ kubectl -n default edit cls foo + ...... + spec: +- replicas: 0 ++ replicas: 2 # scale out 2 pods + ...... +``` + +We can see that PVC `foo-www-r4vlh` is retained by Pod `foo-px487` as they have the same `instance-id`. + +``` shell +$ kubectl -n default edit cls foo +collaset.apps.kusionstack.io/foo edited + +$ kubectl -n default get pod +NAME READY STATUS RESTARTS AGE +foo-ld5xc 1/1 Running 0 27s +foo-px487 1/1 Running 0 27s + +$ kubectl -n default get pvc +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +foo-www-d48gx Bound pvc-1884ee45-5cc9-48ee-b01a-20f5ad63d6d4 1Gi RWO standard 29s +foo-www-r4vlh Bound pvc-dd7f7cce-a3cb-4bba-a106-e5ad264959a2 1Gi RWO standard 2m47s + +$ kubectl -n default get pod foo-px487 -o yaml | grep instance-id + collaset.kusionstack.io/instance-id: "1" + +$ kubectl -n default get pvc foo-www-r4vlh -o yaml | grep instance-id + collaset.kusionstack.io/instance-id: "1" # pvc foo-www-r4vlh is retained +``` + +#### whenDelete +Edit `foo` to configure `Retain` policy for `whenDelete`, and then delete this CollaSet. +``` shell +$ kubectl -n default edit cls foo + ...... + scaleStrategy: + persistentVolumeClaimRetentionPolicy: + whenScaled: Retain ++ whenDelete: Retain # retain the pvc after collaset is deleted + ...... +collaset.apps.kusionstack.io/foo edited + +$ kubectl -n default delete cls foo +collaset.apps.kusionstack.io "foo" deleted +``` + +Now, try to recreate `foo` with 2 replicas, and the result shows both PVCs are retained. +``` shell +$ kubectl -n default apply -f collaset-pvc.yaml +collaset.apps.kusionstack.io/foo created + +$ kubectl -n default get pod +NAME READY STATUS RESTARTS AGE +foo-qhh8t 1/1 Running 0 2s +foo-ss255 1/1 Running 0 2s + +$ kubectl -n default get pvc +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +foo-www-d48gx Bound pvc-1884ee45-5cc9-48ee-b01a-20f5ad63d6d4 1Gi RWO standard 4m29s +foo-www-r4vlh Bound pvc-dd7f7cce-a3cb-4bba-a106-e5ad264959a2 1Gi RWO standard 6m47s + +$ kubectl -n default get pod foo-px487 -o yaml | grep instance-id + collaset.kusionstack.io/instance-id: "0" + collaset.kusionstack.io/instance-id: "1" + +$ kubectl -n default get pvc foo-www-r4vlh -o yaml | grep instance-id + collaset.kusionstack.io/instance-id: "0" # pvc foo-www-d48gx is retained + collaset.kusionstack.io/instance-id: "1" # pvc foo-www-r4vlh is retained +``` \ No newline at end of file diff --git a/kuperator_versioned_docs/version-v0.4/manuals/poddecoration.md b/kuperator_versioned_docs/version-v0.4/manuals/poddecoration.md new file mode 100644 index 00000000..9c3114f8 --- /dev/null +++ b/kuperator_versioned_docs/version-v0.4/manuals/poddecoration.md @@ -0,0 +1,401 @@ +--- +sidebar_position: 4 +--- + +# PodDecoration +PodDecoration works in conjunction with CollaSet to selectively inject specific configurations to Pods that meet certain criteria. + +PodDecoration not only allows injecting sidecar containers to Pods but also enables modifying existing container configurations, metadata, and scheduling parameters etc. +The PodDecoration controller does not control the upgrade of Pods. The actual upgrade process is fully controlled by the CollaSet controller. This means that the injection upgrade of PodDecoration can also be performed `InPlaceIfPossible`. + +About [CollaSet](collaset.md). +# Example + +## 1. Create CollaSet + +```yaml +# collaset.yaml +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: foo + namespace: default +spec: + replicas: 3 + selector: + matchLabels: + app: foo + template: + metadata: + labels: + app: foo + spec: + containers: + - image: nginx:1.25.2 + name: foo +``` +Use `collaset.yaml` to create three pods under CollaSet `foo` management. +```shell +$ kubectl apply -f collaset.yaml +collaset.apps.kusionstack.io/foo created + +$ kubectl get cls +NAME DESIRED CURRENT AVAILABLE UPDATED UPDATED_READY UPDATED_AVAILABLE CURRENT_REVISION UPDATED_REVISION AGE +foo 3 3 3 3 3 3 foo-7bdb974bc7 foo-7bdb974bc7 7s + +$ kubectl get pod +NAME READY STATUS RESTARTS AGE +foo-2wnnf 1/1 Running 0 41s +foo-hqpx7 1/1 Running 0 41s +foo-mqt48 1/1 Running 0 41s +``` +## 2. Create PodDecoration + +The following `poddecoration.yaml` file describes a PodDecoration, which selects the pod under CollaSet `foo` and injects the content in `template` into the pod with `instance-id=0`. + +```yaml +# poddecoration.yaml +apiVersion: apps.kusionstack.io/v1alpha1 +kind: PodDecoration +metadata: + name: poddecoration +spec: + selector: # selected pod range in which PodDecoration takes effect + matchLabels: + app: foo + updateStrategy: + rollingUpdate: + selector: # select pod to upgrade in effect range + matchLabels: + collaset.kusionstack.io/instance-id: "0" + template: + metadata: + - patchPolicy: Overwrite + labels: + custom.io/sidecar-version: "v1" + containers: + - injectPolicy: AfterPrimaryContainer + name: sidecar-a + image: ubuntu:22.04 + command: ["sleep", "2h"] + volumeMounts: + - name: sample-volume + mountPath: /vol/sample + volumes: + - name: sample-volume + emptyDir: {} +``` + +Create PodDecoration `sample-pd` to upgrade selected pod +```shell +$ kubectl apply -f poddecoration.yaml +poddecoration.apps.kusionstack.io/sample-pd created +``` +The status of PodDecoration is updated, and one pod is injected with sidecar through recreate. +```shell +$ kubectl get pd +NAME EFFECTIVE MATCHED INJECTED UPDATED UPDATED_READY CURRENT_REVISION UPDATED_REVISION AGE +sample-pd true 3 1 1 1 sample-pd-9465f4c84 20s + +$ kubectl get pod +NAME READY STATUS RESTARTS AGE +foo-2gnnl 2/2 Running 0 15s +foo-2wnnf 1/1 Running 0 2m +foo-hqpx7 1/1 Running 0 2m + +$ kubectl get pd sample-pd -o yaml | grep -A20 status +status: + details: + - affectedReplicas: 3 + collaSet: foo + pods: + - name: foo-2gnnl + revision: sample-pd-9465f4c84 + - name: foo-2wnnf + escaped: true + - name: foo-hqpx7 + escaped: true + matchedPods: 3 + injectedPods: 1 + updatedPods: 1 + updatedReadyPods: 1 + updatedAvailablePods: 1 + isEffective: true + updatedRevision: sample-pd-9465f4c84 +``` + +## 3. Update PodDecoration + +### 3.1. Rolling update v1 + +Edit `sample-pd` to expand the upgrade scope. +```shell +$ kubectl edit pd sample-pd +``` + +```yaml +# poddecoration.yaml +# Edit updateStrategy to select instance-id in [0, 1, 2] +... +spec: + ... + updateStrategy: + rollingUpdate: + selector: + matchExpressions: + - key: collaset.kusionstack.io/instance-id + operator: In + values: + - "0" + - "1" # add + - "2" # add + template: + ... +``` + +All pods updated. +```shell +$ kubectl get pd +NAME EFFECTIVE MATCHED INJECTED UPDATED UPDATED_READY CURRENT_REVISION UPDATED_REVISION AGE +sample-pd true 3 3 3 3 sample-pd-9465f4c84 sample-pd-9465f4c84 3m + +$ kubectl get pod +NAME READY STATUS RESTARTS AGE +foo-2gnnl 2/2 Running 0 3m +foo-lftw8 2/2 Running 0 8s +foo-n57rr 2/2 Running 0 8s + +$ kubectl get pd sample-pd -o yaml | grep -A20 status +status: + currentRevision: sample-pd-9465f4c84 + details: + - affectedReplicas: 3 + collaSet: foo + pods: + - name: foo-2gnnl + revision: sample-pd-9465f4c84 + - name: foo-lftw8 + revision: sample-pd-9465f4c84 + - name: foo-n57rr + revision: sample-pd-9465f4c84 + matchedPods: 3 + injectedPods: 3 + updatedPods: 3 + updatedReadyPods: 3 + updatedAvailablePods: 3 + isEffective: true + currentRevision: sample-pd-9465f4c84 + updatedRevision: sample-pd-9465f4c84 +``` +### 3.2. Rolling update v1 -> v2 + + +Update `sample-pd`'s sidecar container image and `updateStrategy`. +```shell +$ kubectl edit pd sample-pd +``` +```yaml +# poddecoration.yaml +# Update sidecar-a's image with ubuntu:22.10 +# Edit updateStrategy to select instance-id in [0] +apiVersion: apps.kusionstack.io/v1alpha1 +kind: PodDecoration +metadata: + name: poddecoration +spec: + ... + updateStrategy: + rollingUpdate: + selector: + - key: collaset.kusionstack.io/instance-id + operator: In + values: + - "0" + template: + ... + containers: + - injectPolicy: AfterPrimaryContainer + name: sidecar-a + image: ubuntu:22.10 + ... +``` +Pod `foo-2gnnl` in-place upgrade sidecar container image. +```shell +$ kubectl get pd +NAME EFFECTIVE MATCHED INJECTED UPDATED UPDATED_READY CURRENT_REVISION UPDATED_REVISION AGE +sample-pd true 3 3 1 1 sample-pd-9465f4c84 sample-pd-8697d4bf8c 6min + +$ kubectl get pod +NAME READY STATUS RESTARTS AGE +foo-2gnnl 2/2 Running 1 (12s ago) 6m +foo-lftw8 2/2 Running 0 3min +foo-n57rr 2/2 Running 0 3min + +$ kubectl get pod foo-2gnnl -o yaml | grep "image: ubuntu" + image: ubuntu:22.10 + +$ kubectl get pd sample-pd -o yaml | grep -A20 status +status: + details: + - affectedReplicas: 3 + collaSet: foo + pods: + - name: foo-2gnnl + revision: sample-pd-8697d4bf8c + - name: foo-lftw8 + revision: sample-pd-9465f4c84 + - name: foo-n57rr + revision: sample-pd-9465f4c84 + matchedPods: 3 + injectedPods: 3 + updatedPods: 1 + updatedReadyPods: 1 + updatedAvailablePods: 1 + isEffective: true + currentRevision: sample-pd-9465f4c84 + updatedRevision: sample-pd-8697d4bf8c +``` + + +# Features + +## Injection + +### Metadata +```yaml +apiVersion: apps.kusionstack.io/v1alpha1 +kind: PodDecoration +metadata: + name: poddecoration +spec: + template: + metadata: + - patchPolicy: MergePatchJson + annotations: + cafe.sofastack.io/decoration-version: '[{"name":"sample-pd","version":"v2"}]' + - patchPolicy: Overwrite + labels: + custom.io/sidecar-version: "v2" + annotations: + cafe.sofastack.io/decoration-name: sample-pd +``` +`patchPolicy` is the injected policy, as follows: +- `Retain`: The original value of annotations and labels will be retained. +- `Overwrite`: The value of annotations and labels corresponding to the existing key will be overwritten. +- `MergePatchJson`: It only takes effect for annotation. If the key does not exist, the value will be written directly. Otherwise, the json value will be merged. + +For example: +```yaml +# Old pod metadata +metadata: + labels: + custom.io/sidecar-version: "v1" + annotations: + cafe.sofastack.io/decoration-version: '[{"name":"old-pd","version":"v1"}]' + +# After metadata injected +metadata: + labels: + custom.io/sidecar-version: "v2" + annotations: + cafe.sofastack.io/decoration-type: sample-pd + cafe.sofastack.io/decoration-version: '[{"name":"old-pd","version":"v1"}, {"name":"sample-pd","version":"v2"}]' +``` +### Primary Container + +```yaml +apiVersion: apps.kusionstack.io/v1alpha1 +kind: PodDecoration +metadata: + name: poddecoration +spec: + # ... + template: + primaryContainers: + - targetPolicy: ByName + name: foo + image: foo:v2 + env: + - name: APP_NAME + value: foo + volumeMounts: + - name: sample-volume + mountPath: /vol/sample + volumes: + - name: sample-volume + emptyDir: {} +``` +Injection into the primary containers only supports limited fields: `image`, `env` and `volumeMounts`. + +`targetPolicy` indicates which existed container these configuration should inject into, as follows: +- `ByName`: Only inject containers matching `name`. +- `All`: Inject all primary containers. +- `First`: Inject into first primary container. +- `Last`: Inject into last primary container. + +### Sidecar Container + +```yaml +apiVersion: apps.kusionstack.io/v1alpha1 +kind: PodDecoration +metadata: + name: poddecoration +spec: + # ... + template: + containers: + - injectPolicy: AfterPrimaryContainer # Container injected policy, AfterPrimaryContainer or BeforePrimaryContainer + name: sidecar-a + image: ubuntu:22.04 + ... +``` +Inject a new sidecar container. Optional, it can be placed in front or behind the primary container. +### InitContainer + +```yaml +apiVersion: apps.kusionstack.io/v1alpha1 +kind: PodDecoration +metadata: + name: poddecoration +spec: + # ... + template: + initContainers: + - name: init + image: custom-init-image:v1 + ... +``` + +## Upgrade strategy + +### selector +You can use `selector` to select the pod. The `CollaSet` provides a unique `instance-id` for each pod. Of course, custom labels can also be used to label pods for triggering upgrades. +```yaml +apiVersion: apps.kusionstack.io/v1alpha1 +kind: PodDecoration +metadata: + name: poddecoration +spec: + # ... + updateStrategy: + rollingUpdate: + selector: + - key: collaset.kusionstack.io/instance-id + operator: In + values: + - "0" +``` + +### partition +Partition is the desired number or percent of Pods in **old revisions**, defaults to `0`. +```yaml +apiVersion: apps.kusionstack.io/v1alpha1 +kind: PodDecoration +metadata: + name: poddecoration +spec: + # ... + updateStrategy: + rollingUpdate: + partition: 2 # int number +``` diff --git a/kuperator_versioned_docs/version-v0.4/manuals/podtransitionrule.md b/kuperator_versioned_docs/version-v0.4/manuals/podtransitionrule.md new file mode 100644 index 00000000..de0f7d2f --- /dev/null +++ b/kuperator_versioned_docs/version-v0.4/manuals/podtransitionrule.md @@ -0,0 +1,220 @@ +--- +sidebar_position: 3 +--- + +# PodTransitionRule +In normal pod lifecycle, some phases are defined. For example, K8s Pods follow a defined lifecycle,starting in the `Pending` phase, moving through `Running` if at least one of its primary containers starts `OK`, and then through either the `Succeeded` or `Failed` phases depending on whether any container in the Pod terminated in failure. + +These phase definitions can fulfill basic Pod change scenarios, but it are ambiguous. +Actually, before pod upgrade or ready, it is necessary to have some check mechanisms in place to ensure the safety of pod changes. Fortunately, [PodOpsLifecycle](../concepts/podopslifecycle.md) extends and supports some check stages: `PreCheck` before pod upgrade and `PostCheck` before pod ready. + +To ensure a more fine-grained and controlled change process for Pods, we introduce custom rules or perform additional tasks as prerequisites for state transitions before the desired state of a Pod is achieved. Similar to the Pod `readinessGates`, where certain conditions must be met for a Pod to be considered readiness. For example, we consider a Pod ready for the `PostCheck` phase only if it has specific labels. For this purpose, we introduce the `PodTransitionRule` as a prerequisite for the state transition of a Pod. + +## Rule Definition + +You can use `PodTransitionRule` to define a set of transition rules for your workload pods. +Each rule will be executed at the corresponding stage, and it will be blocked if the conditions are not met. + +Here is an example: +```yaml +apiVersion: apps.kusionstack.io/v1alpha1 +kind: PodTransitionRule +metadata: + name: podtransitionrule-sample +spec: + rules: + - availablePolicy: + maxUnavailableValue: 50% + name: maxUnavailable + - stage: PreCheck # stages are supported by PodOpsLifecycle. Defaults to PreCheck. + labelCheck: + requires: + matchLabels: + app.custom/ready: 'true' + name: labelCheck + - stage: PostCheck + webhook: + clientConfig: + url: https://1.1.1.1:8089/post-stop + caBundle: Cg== + poll: + url: http://1.1.1.1:8089/fetch-result + rawQueryKey: task-id # URL parameter key to carry trace ID when fetching result. Defaults to task-id in form 'QueryUrl=URL?rawQueryKey=' + intervalSeconds: 5 + timeoutSeconds: 60 + failurePolicy: Fail + parameters: + - key: podIP + valueFrom: + fieldRef: + fieldPath: status.podIP + name: webhookCheck + selector: # select pods in effect + matchLabels: + app: foo +``` + + +### Available Policy +An `availablePolicy` rule defines the availability strategy during the Pod update process. + +#### maxUnavailable +```yaml +availablePolicy: + maxUnavailable: + value: 50% # int or string +``` + +`maxUnavailableValue` is the maximum number of pods that can be unavailable during the update. +Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). +Absolute number is calculated from percentage by rounding down. +This can not be 0. + +#### minAvailable +```yaml +availablePolicy: + minAvailable: + value: 5 # int or string +``` +`minAvailableValue` is the minimum number of pods that should be available during the update. + +### Label Check + +A `labelCheck` rule is used to check if labels are satisfied. +You can define your own labels as change check conditions and modify the labels according to your needs. +```yaml +labelCheck: + requires: + matchLabels: + app.custom/ready: 'true' + matchExpressions: + - key: app.custom/forbidden + operator: DoesNotExist +``` + +### Webhook +A `webhook` is an HTTP callback, based on which a external web application can determine whether a pod can pass this check. + +* An HTTP POST occurs first when pods entries the configured stage which defaults PreCheck. +* If `poll` is provided, this rule then keeps calling polling url to fetch a long running job result. This job can be located by `task-id` returned from the response of the first request. + + +```yaml +webhook: + clientConfig: # custom server config + url: https://1.1.1.1:8089/post-stop + caBundle: Cg== + poll: + url: http://1.1.1.1:8089/fetch-result + rawQueryKey: task-id + intervalSeconds: 5 + timeoutSeconds: 60 + failurePolicy: Fail + parameters: + - key: podIP + valueFrom: + fieldRef: + fieldPath: status.podIP +``` +**Protocol without poll** + +Request: +```json +// URL: https://1.1.1.1:8089/post-stop +// Method: POST + +{ + "traceId": "", // is generated by Kuperator, which can be used to track request + "stage": "PreTrafficOff", + "ruleName": "webhookCheck", + "resources": [ // Information of Pods which are in this stage + { + "apiVersion": "v1", + "kind": "Pod", + "name": "pod-a", + "parameters": { + "podIP": "1.0.0.1" // Customized information users can indicate from rule paramter + } + }, + { + "apiVersion": "v1", + "kind": "Pod", + "name": "pod-b", + "parameters": { + "podIP": "1.0.0.2" + } + } + ] +} +``` +Response: +```json +{ + "success": false, + "message": "msg", + "finishedNames": ["pod-a", "pod-b"] +} +``` +Response `success` indicating all pods approved or not. If it's `false`, the `finishedNames` field can be used to approve partial pods. + +**Protocol with poll** + +Request: +```json +// URL: https://1.1.1.1:8089/post-stop +// Method: POST + +{ + "traceId": "", // is generated by Kuperator, which can be used to track request + "stage": "PreTrafficOff", + "ruleName": "webhookCheck", + "resources": [ // Information of Pods which are in this stage + { + "apiVersion": "v1", + "kind": "Pod", + "name": "pod-a", + "parameters": { + "podIP": "1.0.0.1" // Customized information users can indicate from rule paramter + } + }, + { + "apiVersion": "v1", + "kind": "Pod", + "name": "pod-b", + "parameters": { + "podIP": "1.0.0.2" + } + } + ] +} +``` + +Response: + +```json +{ + "success": true, + "poll": true, // required to indicate polling calls is necessary + "taskId": , // required to to fetch polling result + "message": "msg" +} +``` +Response `success` indicating whether the first request is success or not. If true and field `poll` in response is `true` (or field `async` in response is `true`), PodTransisionRule will then begin to keep calling poll URL to fetch process result. +Field `taskId` is required for polling. + +The request for polling is GET method and in form of `QueryUrl=URL?task-id=`. The parameter key in this URL defaults `task-id`, if using `poll` in above response. It would be `trace-id` if using `async` in above response. +Users can also indicate the key by field `poll.rawQueryKey`. + +The response from polling call is expected like following: + +```json +{ + "success": true, + "message": "msg", + "finished": false, + "finishedNames": ["pod-a", "pod-b"] +} +``` + +`success` is supposed to be true, if there is no error. If all pods is approved, `finished` should be `true`. +If `finished` is `false`, `finishedNames` can be used to allow partial pods to be approved. diff --git a/kuperator_versioned_docs/version-v0.4/manuals/resourceconsist.md b/kuperator_versioned_docs/version-v0.4/manuals/resourceconsist.md new file mode 100644 index 00000000..19aa12f6 --- /dev/null +++ b/kuperator_versioned_docs/version-v0.4/manuals/resourceconsist.md @@ -0,0 +1,437 @@ +--- +sidebar_position: 2 +--- + +# ResourceConsist +[**ResourceConsist**](https://github.com/KusionStack/resourceconsist/blob/main/README.md) aims to make a customized controller can be realized easily, and offering the ability of following +**PodOpsLifecycle** for controllers. + +## Tutorials +**kusionstack.io/resourceconsit** mainly consists of frame, experimental/adapters and adapters. + +The frame, ```kusionstack.io/resourceconsist/pkg/frame```, is used for adapters starting a controller, which handles +Reconcile and Employer/Employees' spec&status. If you wrote an adapter in your own repo, you can import +```kusionstack.io/resourceconsist/pkg/frame/controller``` and ```kusionstack.io/resourceconsist/pkg/frame/webhook```, +]and call AddToMgr to start a controller. + +>webhookAdapter is only necessary to be implemented for controllers following PodOpsLifecycle. + +```go +package main + +import ( + controllerframe "kusionstack.io/resourceconsist/pkg/frame/controller" + webhookframe "kusionstack.io/resourceconsist/pkg/frame/webhook" +) + +func main() { + controllerframe.AddToMgr(manager, yourOwnControllerAdapter) + webhookframe.AddToMgr(manager, yourOwnWebhookAdapter) +} +``` +### adapters +The adapters, ```kusionstack.io/resourceconsist/pkg/adapters```, consists of built-in adapters. You can start a +controller with built-in adapters just calling AddBuiltinControllerAdaptersToMgr and AddBuiltinWebhookAdaptersToMgr, +passing built-in adapters' names. Currently, an aliababacloudslb adapter has released. You can use it as follows: +```go +import ( + "kusionstack.io/resourceconsist/pkg/adapters" +) + +func main() { + adapters.AddBuiltinControllerAdaptersToMgr(manager, []adapters.AdapterName{adapters.AdapterAlibabaCloudSlb}) + adapters.AddBuiltinWebhookAdaptersToMgr(manager, []adapters.AdapterName{adapters.AdapterAlibabaCloudSlb}) +} +``` +Built-in adapters can also be used like how frame used. You can call NewAdapter from a certain built-in adapter pkg +and the call frame.AddToMgr to start a controller/webhook + +More built-in adapters will be implemented in the future. To make this repo stable, all new built-in adapters will +be added to ```kusionstack.io/resourceconsist/pkg/experimental/adapters``` first, and then moved to +```kusionstack.io/resourceconsist/pkg/adapters``` until ready to be released. +#### alibabacloudslb adapter +```pkg/adapters/alibabacloudslb``` is an adapter that implements ReconcileAdapter. It follows **PodOpsLifecycle** to +handle various scenarios during pod operations, such as creating a new pod, deleting an existing pod, or handling +changes to pod configurations. This adapter ensures minimal traffic loss and provides a seamless experience for users +accessing services load balanced by Alibaba Cloud SLB. + +In ```pkg/adapters/alibabacloudslb```, the real server is removed from SLB before pod operation in ACK. The LB +management and real server management are handled by CCM in ACK. Since alibabacloudslb adapter follows PodOpsLifecycle +and real servers are managed by CCM, ReconcileLifecycleOptions should be implemented. If the cluster is not in ACK or +CCM is not working in the cluster, the alibabacloudslb controller should implement additional methods of ReconcileAdapter. +### experimental/adapters +The experimental/adapters is more like a pre-release pkg for built-in adapters. Usage of experimental/adapters is same +with built-in adapters, and be aware that **DO NOT USE EXPERIMENTAL/ADAPTERS IN PRODUCTION** +### demo adapter +A demo is implemented in ```resource_controller_suite_test.go```. In the demo controller, the employer is represented +as a service and is expected to have the following **DemoServiceStatus**: +``` +DemoServiceStatus{ + EmployerId: employer.GetName(), + EmployerStatuses: DemoServiceDetails{ + RemoteVIP: "demo-remote-VIP", + RemoteVIPQPS: 100, + } +} +``` +The employee is represented as a pod and is expected to have the following **DemoPodStatus**: +``` +DemoPodStatus{ + EmployeeId: pod.Name, + EmployeeName: pod.Name, + EmployeeStatuses: PodEmployeeStatuses{ + Ip: string, + Ipv6: string, + LifecycleReady: bool, + ExtraStatus: PodExtraStatus{ + TrafficOn: bool, + TrafficWeight: int, + }, + } +} +``` +The DemoResourceProviderClient is a fake client that handles backend provider resources related to the employer/employee +(service/pods). In the Demo Controller, ```demoResourceVipStatusInProvider``` and ```demoResourceRsStatusInProvider``` +are mocked as resources in the backend provider. + +How the demo controller adapter realized will be introduced in detail as follows, +```DemoControllerAdapter``` was defined, including a kubernetes client and a resourceProviderClient. What included in +the Adapter struct can be defined as needed. +```go +type DemoControllerAdapter struct { + client.Client + resourceProviderClient *DemoResourceProviderClient +} +``` +Declaring that the DemoControllerAdapter implemented ```ReconcileAdapter``` and ```ReconcileLifecycleOptions```. +Implementing ```RconcileAdapter``` is a must action, while ```ReconcileLifecycleOptions``` isn't, check the remarks +for ```ReconcileLifecycleOptions``` in ```kusionstack.io/resourceconsist/pkg/frame/controller/types.go``` to find why. +```go +var _ ReconcileAdapter = &DemoControllerAdapter{} +var _ ReconcileLifecycleOptions = &DemoControllerAdapter{} +``` +Following two methods for DemoControllerAdapter inplementing ```ReconcileLifecycleOptions```, defines whether +DemoControllerAdapter following PodOpsLifecycle and need record employees. +```go +func (r *DemoControllerAdapter) FollowPodOpsLifeCycle() bool { + return true +} + +func (r *DemoControllerAdapter) NeedRecordEmployees() bool { + return needRecordEmployees +} +``` +```IEmployer``` and ```IEmployee``` are interfaces that includes several methods indicating the status employer and +employee. +```go +type IEmployer interface { + GetEmployerId() string + GetEmployerStatuses() interface{} + EmployerEqual(employer IEmployer) (bool, error) +} + +type IEmployee interface { + GetEmployeeId() string + GetEmployeeName() string + GetEmployeeStatuses() interface{} + EmployeeEqual(employee IEmployee) (bool, error) +} + +type DemoServiceStatus struct { + EmployerId string + EmployerStatuses DemoServiceDetails +} + +type DemoServiceDetails struct { + RemoteVIP string + RemoteVIPQPS int +} + +type DemoPodStatus struct { + EmployeeId string + EmployeeName string + EmployeeStatuses PodEmployeeStatuses +} +``` +```GetSelectedEmployeeNames``` returns all employees' names selected by employer, here is pods' names selected by +service. ```GetSelectedEmployeeNames``` is used for ensuring LifecycleFinalizer and ExpectedFinalizer, so you can give +it an empty return if your adapter doesn't follow PodOpsLifecycle. +```go +func (r *DemoControllerAdapter) GetSelectedEmployeeNames(ctx context.Context, employer client.Object) ([]string, error) { + svc, ok := employer.(*corev1.Service) + if !ok { + return nil, fmt.Errorf("expect employer kind is Service") + } + selector := labels.Set(svc.Spec.Selector).AsSelectorPreValidated() + var podList corev1.PodList + err := r.List(ctx, &podList, &client.ListOptions{Namespace: svc.Namespace, LabelSelector: selector}) + if err != nil { + return nil, err + } + + selected := make([]string, len(podList.Items)) + for idx, pod := range podList.Items { + selected[idx] = pod.Name + } + + return selected, nil +} +``` +```GetExpectedEmployer``` and ```GetCurrentEmployer``` defines what is expected under the spec of employer and what is +current status, like the load balancer from a cloud provider. Here in the demo adapter, expected is defined by hardcode +and current is retrieved from a fake resource provider ```demoResourceVipStatusInProvider```. +```go +func (r *DemoControllerAdapter) GetExpectedEmployer(ctx context.Context, employer client.Object) ([]IEmployer, error) { + if !employer.GetDeletionTimestamp().IsZero() { + return nil, nil + } + var expect []IEmployer + expect = append(expect, DemoServiceStatus{ + EmployerId: employer.GetName(), + EmployerStatuses: DemoServiceDetails{ + RemoteVIP: "demo-remote-VIP", + RemoteVIPQPS: 100, + }, + }) + return expect, nil +} + +func (r *DemoControllerAdapter) GetCurrentEmployer(ctx context.Context, employer client.Object) ([]IEmployer, error) { + var current []IEmployer + + req := &DemoResourceVipOps{} + resp, err := r.resourceProviderClient.QueryVip(req) + if err != nil { + return current, err + } + if resp == nil { + return current, fmt.Errorf("demo resource vip query resp is nil") + } + + for _, employerStatus := range resp.VipStatuses { + current = append(current, employerStatus) + } + return current, nil +} +``` +```CreateEmployer/UpdateEmployer/DeleteEmployer``` handles creation/update/deletion of resources related to employer on +related backend provider. Here in the demo adapter, ```CreateEmployer/UpdateEmployer/DeleteEmployer``` handles +```demoResourceVipStatusInProvider```. +```go +func (r *DemoControllerAdapter) CreateEmployer(ctx context.Context, employer client.Object, toCreates []IEmployer) ([]IEmployer, []IEmployer, error) { + if toCreates == nil || len(toCreates) == 0 { + return toCreates, nil, nil + } + + toCreateDemoServiceStatus := make([]DemoServiceStatus, len(toCreates)) + for idx, create := range toCreates { + createDemoServiceStatus, ok := create.(DemoServiceStatus) + if !ok { + return nil, toCreates, fmt.Errorf("toCreates employer is not DemoServiceStatus") + } + toCreateDemoServiceStatus[idx] = createDemoServiceStatus + } + + _, err := r.resourceProviderClient.CreateVip(&DemoResourceVipOps{ + VipStatuses: toCreateDemoServiceStatus, + }) + if err != nil { + return nil, toCreates, err + } + return toCreates, nil, nil +} + +func (r *DemoControllerAdapter) UpdateEmployer(ctx context.Context, employer client.Object, toUpdates []IEmployer) ([]IEmployer, []IEmployer, error) { + if toUpdates == nil || len(toUpdates) == 0 { + return toUpdates, nil, nil + } + + toUpdateDemoServiceStatus := make([]DemoServiceStatus, len(toUpdates)) + for idx, update := range toUpdates { + updateDemoServiceStatus, ok := update.(DemoServiceStatus) + if !ok { + return nil, toUpdates, fmt.Errorf("toUpdates employer is not DemoServiceStatus") + } + toUpdateDemoServiceStatus[idx] = updateDemoServiceStatus + } + + _, err := r.resourceProviderClient.UpdateVip(&DemoResourceVipOps{ + VipStatuses: toUpdateDemoServiceStatus, + }) + if err != nil { + return nil, toUpdates, err + } + return toUpdates, nil, nil +} + +func (r *DemoControllerAdapter) DeleteEmployer(ctx context.Context, employer client.Object, toDeletes []IEmployer) ([]IEmployer, []IEmployer, error) { + if toDeletes == nil || len(toDeletes) == 0 { + return toDeletes, nil, nil + } + + toDeleteDemoServiceStatus := make([]DemoServiceStatus, len(toDeletes)) + for idx, update := range toDeletes { + deleteDemoServiceStatus, ok := update.(DemoServiceStatus) + if !ok { + return nil, toDeletes, fmt.Errorf("toDeletes employer is not DemoServiceStatus") + } + toDeleteDemoServiceStatus[idx] = deleteDemoServiceStatus + } + + _, err := r.resourceProviderClient.DeleteVip(&DemoResourceVipOps{ + VipStatuses: toDeleteDemoServiceStatus, + }) + if err != nil { + return nil, toDeletes, err + } + return toDeletes, nil, nil +} +``` +```GetExpectedEmployee```and```GetCurrentEmployee``` defines what is expected under the spec of employer and employees +and what is current status, like real servers under the load balancer from a cloud provider. Here in the demo adapter, +expected is calculated from pods and current is retrieved from a fake resource provider ```demoResourceRsStatusInProvider```. +```go +// GetExpectEmployeeStatus return expect employee status +func (r *DemoControllerAdapter) GetExpectedEmployee(ctx context.Context, employer client.Object) ([]IEmployee, error) { + if !employer.GetDeletionTimestamp().IsZero() { + return []IEmployee{}, nil + } + + svc, ok := employer.(*corev1.Service) + if !ok { + return nil, fmt.Errorf("expect employer kind is Service") + } + selector := labels.Set(svc.Spec.Selector).AsSelectorPreValidated() + + var podList corev1.PodList + err := r.List(ctx, &podList, &client.ListOptions{Namespace: svc.Namespace, LabelSelector: selector}) + if err != nil { + return nil, err + } + + expected := make([]IEmployee, len(podList.Items)) + expectIdx := 0 + for _, pod := range podList.Items { + if !pod.DeletionTimestamp.IsZero() { + continue + } + status := DemoPodStatus{ + EmployeeId: pod.Name, + EmployeeName: pod.Name, + } + employeeStatuses, err := GetCommonPodEmployeeStatus(&pod) + if err != nil { + return nil, err + } + extraStatus := PodExtraStatus{} + if employeeStatuses.LifecycleReady { + extraStatus.TrafficOn = true + extraStatus.TrafficWeight = 100 + } else { + extraStatus.TrafficOn = false + extraStatus.TrafficWeight = 0 + } + employeeStatuses.ExtraStatus = extraStatus + status.EmployeeStatuses = employeeStatuses + expected[expectIdx] = status + expectIdx++ + } + + return expected[:expectIdx], nil +} + +func (r *DemoControllerAdapter) GetCurrentEmployee(ctx context.Context, employer client.Object) ([]IEmployee, error) { + var current []IEmployee + req := &DemoResourceRsOps{} + resp, err := r.resourceProviderClient.QueryRealServer(req) + if err != nil { + return current, err + } + if resp == nil { + return current, fmt.Errorf("demo resource rs query resp is nil") + } + + for _, rsStatus := range resp.RsStatuses { + current = append(current, rsStatus) + } + return current, nil +} +``` +```CreateEmployees/UpdateEmployees/DeleteEmployees``` handles creation/update/deletion of resources related to employee +on related backend provider. Here in the demo adapter, ```CreateEmployees/UpdateEmployees/DeleteEmployees``` +handles ```demoResourceRsStatusInProvider```. +```go +func (r *DemoControllerAdapter) CreateEmployees(ctx context.Context, employer client.Object, toCreates []IEmployee) ([]IEmployee, []IEmployee, error) { + if toCreates == nil || len(toCreates) == 0 { + return toCreates, nil, nil + } + toCreateDemoPodStatuses := make([]DemoPodStatus, len(toCreates)) + + for idx, toCreate := range toCreates { + podStatus, ok := toCreate.(DemoPodStatus) + if !ok { + return nil, toCreates, fmt.Errorf("toCreate is not DemoPodStatus") + } + toCreateDemoPodStatuses[idx] = podStatus + } + + _, err := r.resourceProviderClient.CreateRealServer(&DemoResourceRsOps{ + RsStatuses: toCreateDemoPodStatuses, + }) + if err != nil { + return nil, toCreates, err + } + + return toCreates, nil, nil +} + +func (r *DemoControllerAdapter) UpdateEmployees(ctx context.Context, employer client.Object, toUpdates []IEmployee) ([]IEmployee, []IEmployee, error) { + if toUpdates == nil || len(toUpdates) == 0 { + return toUpdates, nil, nil + } + + toUpdateDemoPodStatuses := make([]DemoPodStatus, len(toUpdates)) + + for idx, toUpdate := range toUpdates { + podStatus, ok := toUpdate.(DemoPodStatus) + if !ok { + return nil, toUpdates, fmt.Errorf("toUpdate is not DemoPodStatus") + } + toUpdateDemoPodStatuses[idx] = podStatus + } + + _, err := r.resourceProviderClient.UpdateRealServer(&DemoResourceRsOps{ + RsStatuses: toUpdateDemoPodStatuses, + }) + if err != nil { + return nil, toUpdates, err + } + + return toUpdates, nil, nil +} + +func (r *DemoControllerAdapter) DeleteEmployees(ctx context.Context, employer client.Object, toDeletes []IEmployee) ([]IEmployee, []IEmployee, error) { + if toDeletes == nil || len(toDeletes) == 0 { + return toDeletes, nil, nil + } + + toDeleteDemoPodStatuses := make([]DemoPodStatus, len(toDeletes)) + + for idx, toDelete := range toDeletes { + podStatus, ok := toDelete.(DemoPodStatus) + if !ok { + return nil, toDeletes, fmt.Errorf("toDelete is not DemoPodStatus") + } + toDeleteDemoPodStatuses[idx] = podStatus + } + + _, err := r.resourceProviderClient.DeleteRealServer(&DemoResourceRsOps{ + RsStatuses: toDeleteDemoPodStatuses, + }) + if err != nil { + return nil, toDeletes, err + } + + return toDeletes, nil, nil +} +``` diff --git a/kuperator_versioned_docs/version-v0.4/started/_category_.json b/kuperator_versioned_docs/version-v0.4/started/_category_.json new file mode 100644 index 00000000..877a378f --- /dev/null +++ b/kuperator_versioned_docs/version-v0.4/started/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Getting Started", + "position": 2 +} diff --git a/kuperator_versioned_docs/version-v0.4/started/demo-graceful-operation.md b/kuperator_versioned_docs/version-v0.4/started/demo-graceful-operation.md new file mode 100644 index 00000000..6eb1fce9 --- /dev/null +++ b/kuperator_versioned_docs/version-v0.4/started/demo-graceful-operation.md @@ -0,0 +1,340 @@ +# Using KusionStack Kuperator to operate Pods gracefully + +Applications always provide its service along with traffic routing. +On Kubernetes, they should be a set of Pods and a corresponding Kubernetes Service resource to expose the service. + +However, during operations such as updating Pod revisions, +there is a risk that client request traffic may be lost. This can lead to a poor user experience for developers. + +This tutorial will demonstrate how to operate Pods gracefully in a KusionStack Kuperator way on Aliyun ACK +with SLB as a Service backend provider. + +> You can also get the same point from [this video](https://www.bilibili.com/video/BV1n8411q7sP/?t=15.7), +> which shows the same case using both KusionStack Kusion and Kuperator. +> The sample used in this video can be found from [KusionStack Catalog](https://github.com/KusionStack/catalog/tree/main/models/samples/wordpress). + +## Preparing + +First, ensure that you have an Aliyun ACK Kubernetes cluster set up in order to provision an Aliyun SLB. + +Next, install KusionStack Kuperator on this Kubernetes cluster +following [installation doc](https://kusionstack.io/docs/kuperator/started/install). + +## Get started + +### Create a new namespace + +To begin, create a new namespace for this tutorial: + +```shell +$ kubectl create ns kuperator-tutorial +``` + +### Provision Pods and Services + +You can create a set of Pods to run up a demo application service +by creating CollaSet resource using following command: + +``` shell +echo ' +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: server +spec: + replicas: 3 + selector: + matchLabels: + app: server + template: + metadata: + labels: + app: server + spec: + containers: + - image: wu8685/echo:1.3 + name: server + command: + - /server + resources: + limits: + cpu: "0.1" + ephemeral-storage: 100Mi + memory: 100Mi + requests: + cpu: "0.1" + ephemeral-storage: 100Mi + memory: 100Mi + readinessProbe: + httpGet: + path: /healthz + port: 8080 + initialDelaySeconds: 5 + periodSeconds: 3 +' | kubectl -n kuperator-tutorial apply -f - +``` + +There should be 3 Pods created. + +```shell +$ kubectl -n kuperator-tutorial get pod +NAME READY STATUS RESTARTS AGE +server-c5lsr 1/1 Running 0 2m23s +server-p6wrx 1/1 Running 0 2m23s +server-zn62c 1/1 Running 0 2m23s +``` + +Then create a Kubernetes Service by running following command, +which will provision Aliyun SLB to expose service. + +```shell +echo ' +apiVersion: v1 +kind: Service +metadata: + annotations: + service.beta.kubernetes.io/alibaba-cloud-loadbalancer-spec: slb.s1.small + service.beta.kubernetes.io/backend-type: eni + labels: + kusionstack.io/control: "true" # this label is required + name: server +spec: + ports: + - port: 80 + protocol: TCP + targetPort: 8080 + selector: + app: server + type: LoadBalancer +' | kubectl -n kuperator-tutorial apply -f - +``` + +A service with external IP should be provisioned. + +```shell +$ kubectl -n kuperator-tutorial get svc server +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +server LoadBalancer 192.168.225.55 47.101.49.182 80:30146/TCP 51s +``` + +The label `kusionstack.io/control: "true"` on Service is very important. +It means this service resource will be recognized by ResourceConsist framework, and then participate in PodOpsLifecycle +to control the Aliyun SLB to switch off traffic before updating each Pod and switch on traffic after it finished, +in order to protect the service. + +### Provision a client + +Then we will provision a client to access the service we created before. +Please replace `` in the following CollaSet yaml with the external IP from Kubernetes Service created above, and apply again. + +```shell +echo ' +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: client +spec: + replicas: 1 + selector: + matchLabels: + app: client + template: + metadata: + labels: + app: client + spec: + containers: + - image: wu8685/echo:1.3 + name: nginx + command: + - /client + args: + - -url + - http:///echo # EXTERNAL_IP should be replaced + - -m + - POST + - d + - kuperator-tutorial + - -qps + - "10" + - -worker + - "10" + - -timeout + - "10000" + resources: + limits: + cpu: "0.1" + ephemeral-storage: 1Gi + memory: 100Mi + requests: + cpu: "0.1" + ephemeral-storage: 1Gi + memory: 100Mi +' | kubectl -n kuperator-tutorial apply -f - +``` + +A client Pod should be created. + +```shell +$ kubectl -n kuperator-tutorial get pod +NAME READY STATUS RESTARTS AGE +client-nc426 1/1 Running 0 30s +server-c5lsr 1/1 Running 0 19m +server-p6wrx 1/1 Running 0 19m +server-zn62c 1/1 Running 0 19m +``` + +This client will continuously access the service using the configuration provided in the command. +You can monitor the response codes from its logs: + +```shell +kubectl -n kuperator-tutorial logs -f client-nc426 +worker-0 another loop, request: 50, failed: 0 +worker-1 another loop, request: 50, failed: 0 +worker-0 another loop, request: 50, failed: 0 +worker-1 another loop, request: 50, failed: 0 +worker-0 another loop, request: 50, failed: 0 +worker-1 another loop, request: 50, failed: 0 +worker-0 another loop, request: 50, failed: 0 +worker-1 another loop, request: 50, failed: 0 +``` + +The accesses are all successful. + +### Update Pod revision + +To trigger a Pod revision update, run the following command +to edit the container image and command in the PodTemplate of CollaSet: + +```shell +echo ' +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: server +spec: + replicas: 3 + selector: + matchLabels: + app: server + template: + metadata: + labels: + app: server + spec: + containers: + - image: wu8685/echo:1.2 + name: server + command: + - /app/echo + resources: + limits: + cpu: "0.1" + ephemeral-storage: 100Mi + memory: 100Mi + requests: + cpu: "0.1" + ephemeral-storage: 100Mi + memory: 100Mi + readinessProbe: + httpGet: + path: /healthz + port: 8080 + initialDelaySeconds: 5 + periodSeconds: 3 +' | kubectl -n kuperator-tutorial apply -f - +``` + +It will trigger all Pods updated simultaneously. So the application `server` has no Pod to serve. +We can observe the error from client logs. + +```shell +worker-1 fails to request POST http://47.101.49.182/echo : Post "http://47.101.49.182/echo": read tcp 10.244.1.11:54040->47.101.49.182:80: read: connection reset by peer +worker-0 fails to request POST http://47.101.49.182/echo : Post "http://47.101.49.182/echo": read tcp 10.244.1.11:34438->47.101.49.182:80: read: connection reset by peer +worker-1 fails to request POST http://47.101.49.182/echo : Post "http://47.101.49.182/echo": context deadline exceeded (Client.Timeout exceeded while awaiting headers) +worker-0 fails to request POST http://47.101.49.182/echo : Post "http://47.101.49.182/echo": context deadline exceeded (Client.Timeout exceeded while awaiting headers) +worker-1 fails to request POST http://47.101.49.182/echo : Post "http://47.101.49.182/echo": context deadline exceeded (Client.Timeout exceeded while awaiting headers) +worker-1 another loop, request: 20, failed: 3 +worker-0 fails to request POST http://47.101.49.182/echo : Post "http://47.101.49.182/echo": context deadline exceeded (Client.Timeout exceeded while awaiting headers) +worker-0 another loop, request: 20, failed: 3 +worker-1 fails to request POST http://47.101.49.182/echo : Post "http://47.101.49.182/echo": context deadline exceeded (Client.Timeout exceeded while awaiting headers) +``` + +### Provision PodTransistionRule + +To avoid this problem, provision a PodTransitionRule with a maxUnavailable 50% rule by running the following command: + +```shell +echo ' +apiVersion: apps.kusionstack.io/v1alpha1 +kind: PodTransitionRule +metadata: + labels: + name: server +spec: + rules: + - availablePolicy: + maxUnavailableValue: 50% + name: maxUnavailable + selector: + matchLabels: + app: server +' | kubectl -n kuperator-tutorial apply -f - +``` + +After updating the CollaSet of the server to trigger an update, you will see the Pods rolling update one by one, +ensuring that at least one Pod is always available to serve. + +```shell +kubectl -n kuperator-tutorial get pod +NAME READY STATUS RESTARTS AGE +client-rrfbj 1/1 Running 0 25s +server-457sn 0/1 Running 0 5s +server-bd5sz 0/1 Running 0 5s +server-l842s 1/1 Running 0 2m4s +``` + +You can see from the client logs that no access requests fail during this update. + +```shell +worker-0 another loop, request: 50, failed: 0 +worker-1 another loop, request: 50, failed: 0 +worker-0 another loop, request: 50, failed: 0 +worker-1 another loop, request: 50, failed: 0 +worker-0 another loop, request: 50, failed: 0 +worker-1 another loop, request: 50, failed: 0 +worker-0 another loop, request: 50, failed: 0 +worker-0 another loop, request: 50, failed: 0 +worker-1 another loop, request: 50, failed: 0 +worker-1 another loop, request: 50, failed: 0 +worker-0 another loop, request: 50, failed: 0 +``` + +### Clean tutorial namespace + +At the end of this tutorial, you can clean up the resources by deleting the namespace: + +```shell +$ kubectl delete ns kuperator-tutorial +``` + +## Comparison with the Native Approach + +Kubernetes provides `preStop` and `postStart` hook in each container, by which users can also interact with service outside +Kubernetes like Aliyun SLB service. However, KusionStack Kuperator offers several advantages: + +* Pod level vs Container level + +Kuperator offers a Pod level hooks which have more complete information than one container, +especially there are several containers in one Pod. + +* Plugin-able + +Through KusionStack Kuperator, you can decouple operations executed before or after Pods actually change. +For example, traffic control can be added or removed without modifying the Pod's preStop configuration. + +* Rollback option + +In case of issues, rollback becomes a viable option when using the Kuperator approach to update Pods. +Since Kuperator does not modify the Pods or their containers during the update, +if the traffic service experiences problems, there is an opportunity to cancel the update. \ No newline at end of file diff --git a/kuperator_versioned_docs/version-v0.4/started/install.md b/kuperator_versioned_docs/version-v0.4/started/install.md new file mode 100644 index 00000000..1ac6a89c --- /dev/null +++ b/kuperator_versioned_docs/version-v0.4/started/install.md @@ -0,0 +1,55 @@ +--- +sidebar_position: 2 +--- + +# Installation + +## Install with helm +KusionStack Kuperator requires **Kubernetes version >= 1.18** +```shell +# Firstly add charts repository if you haven't do this. +$ helm repo add kusionstack https://kusionstack.github.io/charts + +# To update the kusionstack repo. +$ helm repo update kusionstack + +# Install the latest version. +$ helm install kuperator kusionstack/kuperator +``` + + +[Helm](https://github.com/helm/helm) is a tool for managing packages of pre-configured Kubernetes resources. +### Optional: chart parameters + +The following table lists the configurable parameters of the chart and their default values. + +| Parameter | Description | Default | +|-------------|----------------|----------------| +| `namespace` | namespace for Kuperator installation | `kusionstack-system` | +| `namespaceEnabled` | Whether to create the installation.namespace | `true` | +| `managerReplicas`| Replicas of Kuperator deployment | `3` | +| `image.repo` | Repository for kuperator image | `kusionstack/kuperator`| +| `image.pullPolicy`| Image pull policy for kuperator-manager container | `IfNotPresent` | +| `image.tag` | Tag for kuperator-manager image | `v0.1.0` | +| `resources.limits.cpu` | CPU resource limit of kuperator-manager container | `500m` | +| `resources.limits.memory` | Memory resource limit of kuperator-manager container | `128Mi` | +| `resources.requests.cpu` | CPU resource request of kuperator-manager container | `10m` | +| `resources.requests.memory` | Memory resource request of kuperator-manager container | `64Mi` | + +### Upgrade + +Run following command to upgrade KusionStack Kuperator to the latest version. + +```shell +# Upgrade to the latest version +$ helm upgrade kuperator kusionstack/kuperator +``` + +### Uninstall + +Run following command to uninstall KusionStack Kuperator. + +```shell +# Uninstall +$ helm uninstall kuperator +``` \ No newline at end of file diff --git a/kuperator_versioned_docs/version-v0.5/concepts/_category_.json b/kuperator_versioned_docs/version-v0.5/concepts/_category_.json new file mode 100644 index 00000000..1d3167d4 --- /dev/null +++ b/kuperator_versioned_docs/version-v0.5/concepts/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Concepts", + "position": 3 +} diff --git a/kuperator_versioned_docs/version-v0.5/concepts/podopslifecycle.md b/kuperator_versioned_docs/version-v0.5/concepts/podopslifecycle.md new file mode 100644 index 00000000..88068133 --- /dev/null +++ b/kuperator_versioned_docs/version-v0.5/concepts/podopslifecycle.md @@ -0,0 +1,232 @@ +--- +sidebar_position: 2 +--- + +# PodOpsLifecycle + +## Background + +Kubernetes provides a set of default controllers for workload management, such as StatefulSet, Deployment, and DaemonSet, which are responsible for Pod operations. +Meanwhile, application users may also have some services outside the Kubernetes cluster that are closely related to the Pod Lifecycle, including traffic routing, service discovery, or alert monitoring. +However, they face challenges in participating in the operational lifecycle of a Pod, even if they are connected to Kubernetes by developing a controller that watches the Pods. + +PodOpsLifecycle aims to offer Kubernetes administrators and developers finer-grained control over the entire lifecycle of a Pod. +It enables developers to execute necessary actions before, during, and after specific phases of a Pod operation. +For instance, removing the Pod's IP from the traffic route before initiating the Pod operation, performing the actual Pod operations, and adding it back after the Pod operation is completed to achieve a smooth and graceful Pod operation, and prevent any traffic loss. + +## Introduction + +In PodOpsLifecycle, participants are classified into two roles: `operation controllers` and `cooperation controllers`. +- **Operation controllers** are responsible for operating Pods, such as Deployments and StatefulSets from Kubernetes, and CollaSets from Kuperator which intend to scale, update, or recreate Pods. +- **Cooperation controllers** are sensitive with Pod status. They handle resources or configurations around Pods, which may include traffic controller, alert monitoring controller, etc. These controllers typically reconcile Kubernetes resources around Pods with external services, such as sync Pod IPs with the LB provider, or maintaining Pods' metadata with application monitoring system. + +The two types of controllers do not need to be aware of each other. All controllers are organized by PodOpsLifecycle. Additionally, KusionStack Kuperator introduces extra phases around the native Kubernetes Pod Lifecycle: ServiceAvailable, Preparing, and Completing. + +![pod-ops-lifecycle](/img/kuperator/concepts/podopslifecycle/pod-ops-lifecycle.png) + +- **Completing**: After a Pod is created or updated and becomes ready, Kuperator marks its PodOpsLifecycle as the `Completing` phase. During this phase, the Pod is in a ready condition, prompting cooperation controllers to perform actions such as registering the Pod IP in the traffic route. Once all cooperation controllers complete their tasks, Kuperator sets the PodOpsLifecycle to the `ServiceAvailable` phase. +- **ServiceAvailable**: This phase indicates that the Pod is in a normal state and ready to serve. If everything goes smoothly, the Pod remains in the `ServiceAvailable` phase until the next operation. +- **Preparing**: When an operation controller needs to operate the Pod, it triggers a new PodOpsLifecycle. The Pod then transitions from the `ServiceAvailable` phase to the `Preparing` phase. During this phase, the Pod is initially marked as Unready by setting ReadinessGate to false. All cooperation controllers then begin preparing tasks, such as removing the Pod's IP from the traffic route. After completing these tasks, the Pod enters the `Operating` phase. +- **Operating**: If a Pod enters the `Operating` phase, it is expected to accept any kind of operation without any damage, including recreation, scaling-in, upgrading, etc. Operation controllers are permitted to apply any changes to this Pod. Once all these operations are completed, the Pod advances to the next phase — `Completing`, and the PodOpsLifecycle continues. + +The PodOpsLifecycle detail and the relationship with Kubernetes native Pod Lifecycle is showed by following sequence diagram. + +![pod-ops-lifecycle-sequence-diagram](/img/kuperator/concepts/podopslifecycle/pod-ops-lifecycle-sequence-diagram.png) + +## Developer's Guide + +This section introduces how to develop operation controllers and cooperation controllers to interact with PodOpsLifecycle. +- The operation controller is responsible for a set of Pod operation tasks. KusionStack Kuperator has already provided various types of operation controllers. Users only need to develop a new operation controller if a new kind of Pod operation needs to be added. +- The cooperation controller participates in PodOpsLifecycle before and after operating on a Pod, such as the Traffic controller, alert monitoring controller, and other controllers responsible for maintaining the Pod and application status. Users should develop a new cooperation controller only when there is a new type of service or status around the Pod that needs to be maintained, such as integrating with a new traffic provider. + +### Operation Controller + +The operation controller is responsible for Pod operations. The tasks that an operation controller needs to perform during PodOpsLifecycle include triggering a PodOpsLifecycle, checking whether the Pod has entered the Operating phase, performing Pod operations, and marking Pod operations as finished. These actions interacting with PodOpsLifecycle are provided in the package `kusionstack.io/kuperator/pkg/controllers/utils/podopslifecycle/utils.go`. + +A simple operation controller reconcile method would look like this: + +```go +import ( + "context" + + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/client" + + "kusionstack.io/kuperator/pkg/controllers/utils/podopslifecycle" +) + +var operationAdapter = &OperationOpsLifecycleAdapter{} + +type OperationOpsLifecycleAdapter struct { +} + +// GetID indicates ID of the PodOpsLifecycle +func (a *OperationOpsLifecycleAdapter) GetID() string { + return "new-id" +} + +// GetType indicates type for this Operation Controller +func (a *OperationOpsLifecycleAdapter) GetType() podopslifecycle.OperationType { + return "new-type" +} + +// AllowMultiType indicates whether multiple IDs which have the same Type are allowed +func (a *OperationOpsLifecycleAdapter) AllowMultiType() bool { + return true +} + +// WhenBegin is a hook, which will be executed when begin a lifecycle +func (a *OperationOpsLifecycleAdapter) WhenBegin(pod client.Object) (bool, error) { + return false, nil +} + +// WhenFinish is a hook, which will be executed when finish a lifecycle +func (a *OperationOpsLifecycleAdapter) WhenFinish(pod client.Object) (bool, error) { + return false, nil +} + +...... +func (r *PodOperationReconciler) Reconcile(ctx context.Context, req reconcile.Request) (ctrl.Result, error) { + // get the Pod + pod := &corev1.Pod{} + if err := r.Get(ctx, req.NamespacedName, pod); err != nil { + if !errors.IsNotFound(err) { + return reconcile.Result{}, err + } + return reconcile.Result{}, nil + } + + // check if the Pod needs operation + if !r.needOperation(pod) { + return reconcile.Result{}, nil + } + + // if PodOpsLifecycle has not been triggered, trigger it + if !podopslifecycle.IsDuringOps(OpsLifecycleAdapter, pod) { + if updated, err := podopslifecycle.Begin(r, operationAdapter, pod); err != nil { + return reconcile.Result{}, err + } else if updated { + return reconcile.Result{}, nil + } + } + + // waiting until Pod enters operating phase + if _, allowed := podopslifecycle.AllowOps(operationAdapter, 0, pod); !allowed { + return reconcile.Result{}, nil + } + + // do operation works + if completed := r.doPodOperation(pod); !completed { + return reconcile.Result{}, nil + } + + // after operation works completed, finish operating phase to continue PodOpsLifecycle + if _, err := podopslifecycle.Finish(r, operationAdapter, pod); err != nil { + return reconcile.Result{}, err + } +} +``` + +### Pod Cooperation Controller + +There are two ways to develop a cooperation controller. +One way is to develop a controller using the controller runtime and adhering to some conventions of PodOpsLifecycle and Kubernetes. +Another way is to take the use of [ResourceConsist](https://github.com/KusionStack/resourceconsist) framework provided by KusionStack, which can be referenced from its [documentation](https://www.kusionstack.io/docs/kuperator/manuals/resourceconsist). + +The following outlines the first approach. + +```go +import ( + "context" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + k8spod "k8s.io/kubernetes/pkg/api/v1/pod/util.go" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + appsv1alpha1 "kusionstack.io/kuperator/apis/apps/v1alpha1" +) + +const ( + // Finalizer needs to have prefix: `prot.podopslifecycle.kusionstack.io`. + // KusionStack Kuperator keeps this prefix back-compatible, + // so that it can be hard code to decouple with KusionStack Kuperator. + finalizerPrefix = appsv1alpha1.PodOperationProtectionFinalizerPrefix + + protectionFinalizer = finalizerPrefix + "/" + "unique-id" +) + +...... +func (r *PodResourceReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { + // get the Pod + pod := &corev1.Pod{} + if err := r.Get(ctx, req.NamespacedName, pod); err != nil { + if !errors.IsNotFound(err) { + return reconcile.Result{}, err + } + return reconcile.Result{}, nil + } + + if k8spod.IsPodReady(pod) { + // do resource reconcile like add Pod IP to traffic route + r.trafficOn(pod.status.PodIP) + // It is important to add a unique finalizer on this Pod + return reconcile.Result{}, r.addFinalizer(ctx, pod, protectionFinalizer) + } + + if !k8spod.IsPodReady(pod) { + // do resource reconcile like remove Pod IP from traffic route + r.trafficOff(pod.status.PodIP) + // It is important to remove the unique finalizer from this Pod + return reconcile.Result{}, r.removeFinalizer(ctx, pod, protectionFinalizer) + } +} + +func (r *PodResourceReconciler) addFinalizer(ctx context.Context, pod *corev1.Pod, finalizer string) error { + if controllerutil.ContainsFinalizer(pod, finalizer) { + return nil + } + + controllerutil.AddFinalizer(pod, finalizer) + return r.Update(ctx, pod) +} + +func (r *PodResourceReconciler) removeFinalizer(ctx context.Context, pod *corev1.Pod, finalizer string) error { + if !controllerutil.ContainsFinalizer(pod, finalizer) { + return nil + } + + controllerutil.RemoveFinalizer(pod, finalizer) + return r.Update(ctx, pod) +} +``` + +## Key Features + +### Concurrency Support + +PodOpsLifecycle in KusionStack Kuperator supports concurrency. +It means PodOpsLifecycle is able to organize and track multi controllers operating the same pod at the same time. +For example, when a controller is going to update Pod, other controllers are allowed to do other operations at the same time, like delete, restart, recreate it, +although the result may not be meaningful. + +### General Workload Support + +PodOpsLifecycle offers seamless integration with various workload types, including Deployment and StatefulSet. +To enable this functionality, ensure the feature gate for `GraceDeleteWebhook` is enabled when starting the KusionStack Kuperator controller: + +```shell +# Enable the GraceDeleteWebhook feature when starting the controller with this argument +$ /manager --feature-gates=GraceDeleteWebhook=true +``` + +Once enabled, any Pod labeled with `kusionstack.io/control=true` under a general workload, such as Deployment, becomes manageable by PodOpsLifecycle. +This feature provides workloads a way to work closer with Pod Cooperation Controllers. + +> Due to the Kubernetes webhook mechanism, the following error will be returned when workloads or users delete a pod. This error is intentional and serves to indicate that the pod deletion process has started and is being managed by PodOpsLifecycle. +> ```shell +> $ kubectl -n default delete pod collaset-sample-74fsv +> Error from server (failed to validate GraceDeleteWebhook, pod deletion process is underway and being managed by PodOpsLifecycle): admission webhook "validating-pod.apps.kusionstack.io" denied the request: failed to validate GraceDeleteWebhook, pod deletion process is underway and being managed by PodOpsLifecycle +> ``` \ No newline at end of file diff --git a/kuperator_versioned_docs/version-v0.5/introduction/_category_.json b/kuperator_versioned_docs/version-v0.5/introduction/_category_.json new file mode 100644 index 00000000..537bad9b --- /dev/null +++ b/kuperator_versioned_docs/version-v0.5/introduction/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Introduction", + "position": 0 +} diff --git a/kuperator_versioned_docs/version-v0.5/introduction/introduction.md b/kuperator_versioned_docs/version-v0.5/introduction/introduction.md new file mode 100644 index 00000000..5adf228c --- /dev/null +++ b/kuperator_versioned_docs/version-v0.5/introduction/introduction.md @@ -0,0 +1,49 @@ +# What is KusionStack Kuperator? + +KusionStack Kuperator consists of workloads and operators built on Kubernetes Custom Resource Definitions, +with a primary aim of bridging the gap between platform development and Kubernetes. + +By keeping more operation works finished in Kubernetes layer, +KusionStack Kuperator reduces complexity when interacting with Kubernetes +and enhances convenience for platform developers. + +## Key features + +KusionStack Kuperator currently provides the following features, +streamlining application operations when developing platforms based on Kubernetes: + +### Fine-grained operation + +KusionStack Kuperator introduces PodOpsLifecycle to extend native Pod lifecycle with additional phases such as PreCheck, Preparing, etc. +All operators within KusionStack Kuperator will respect PodOpsLifecycle, +so that PodOpsLifecycle is able to orchestrate all of these operators to operate each Pod coordinately. + +### Advanced workloads + +KusionStack Kuperator offers several workloads to ensure it is convenient and effective to delivery and operate application resources. + +Recently, Kuperator provides the workload CollaSet. +Besides the basic ability of scaling and updating Pods like Deployment and StatefulSet of Kubernetes, +CollaSet also provides a range of scale and update strategies, +like in-place update with container image and pod revision consistency. + +### Streamlined Pod Operation + +KusionStack Kuperator introduces resource consist framework that offers a graceful way +to integrate resource management around Pods, including traffic control, into the PodOpsLifecycle. +This simplifies the works for platform developers dealing with Pod operation details. +KusionStack also integrates some resources by default, such as Aliyun SLB. + +### Risk management + +Building upon the PodOpsLifecycle, KusionStack Kuperator introduces the workload named PodTransitionRule +which will keep risks of pod operation under control. +By providing a MaxUnavailable rule similar to Kubernetes' PodDisruptionBudget (PDB), +it ensures there are always enough Pods available for service. +Furthermore, it allows for custom rules through extension via webhooks and label hooks. + +## Future works + +KusionStack Kuperator project is currently in its early stages. +Our goal is to simplify platform development. We will continue building in areas such as application operations, +observability, and insight. We hope the Kuperator will make it easier for you to build platforms. \ No newline at end of file diff --git a/kuperator_versioned_docs/version-v0.5/manuals/_category_.json b/kuperator_versioned_docs/version-v0.5/manuals/_category_.json new file mode 100644 index 00000000..795f138a --- /dev/null +++ b/kuperator_versioned_docs/version-v0.5/manuals/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Manuals", + "position": 4 +} diff --git a/kuperator_versioned_docs/version-v0.5/manuals/collaset.md b/kuperator_versioned_docs/version-v0.5/manuals/collaset.md new file mode 100644 index 00000000..6e1aaddf --- /dev/null +++ b/kuperator_versioned_docs/version-v0.5/manuals/collaset.md @@ -0,0 +1,996 @@ +--- +sidebar_position: 1 +--- + +# CollaSet +CollaSet is responsible for managing a set of Pods. Similar to Kubernetes Deployment and StatefulSet, it also supports scaling and updating Pods. Additionally, CollaSet offers advanced features to provide users with more granular control over managing Pods. + +A basic CollaSet configuration is represented in the following YAML format: + +``` yaml +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: collaset-sample +spec: + replicas: 2 + selector: + matchLabels: + app: foo + template: + metadata: + labels: + app: foo + spec: + containers: + - image: nginx:1.25.2 + name: nginx +``` +Let's explore the features of CollaSet. + +## Basic Features +### Scaling Pods +CollaSet utilizes the field spec.replicas to indicate the number of Pods under management. + +``` yaml +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: collaset-sample +spec: + replicas: 3 # indicate the number of Pods to manage + selector: + matchLabels: + app: foo + template: + metadata: + labels: + app: foo + spec: + containers: + - image: nginx:1.25.2 + name: nginx +... +``` +Pods can be provisioned by CollaSet. + +``` shell +$ kubectl -n default apply -f ./config/samples/apps_v1alpha1_collaset.yaml +collaset.apps.kusionstack.io/collaset-sample created + +$ kubectl -n default get pod +NAME READY STATUS RESTARTS AGE +collaset-sample-85q7g 1/1 Running 0 57s +collaset-sample-vx5ws 1/1 Running 0 57s +collaset-sample-hr7pv 1/1 Running 0 57s + +$ kubectl -n default get cls +NAME DESIRED CURRENT UPDATED UPDATED_READY UPDATED_AVAILABLE CURRENT_REVISION UPDATED_REVISION AGE +collaset-sample 3 3 3 3 3 collaset-sample-6d7b7c58f collaset-sample-6d7b7c58f 64s +``` + +By default, CollaSet always creates new Pods using the latest template specified in `spec.template`. CollaSet establishes ownership over a set of Pods through the label selector defined in `spec.selector`. Thus, it's important to ensure that the labels provided in `spec.selector` match those in `spec.template.metadata.labels`. + +CollaSet status provides general information about this CollaSet and all Pods under it. + +``` shell +$ kubectl -n default get cls collaset-sample -o yaml +...... +status: + availableReplicas: 3 + collisionCount: 0 + conditions: + - lastTransitionTime: "2023-09-01T03:56:09Z" + reason: Updated + status: "True" + type: Update + currentRevision: collaset-sample-6d7b7c58f + observedGeneration: 1 + operatingReplicas: 0 + readyReplicas: 3 + replicas: 3 + scheduledReplicas: 3 + updatedAvailableReplicas: 3 + updatedReadyReplicas: 3 + updatedReplicas: 3 + updatedRevision: collaset-sample-6d7b7c58f +``` + +Some fields in CollaSet status are explained here: + +`updatedRevision` indicates the latest revision that CollaSet uses to create or update Pods. + +`currentRevision` indicates the last updated revision. It will be set to updatedRevision after all Pods are updated, and their PodReady conditions become True. + +`replicas` indicates the count of Pods under this CollaSet. + +`scheduledReplicas` indicates the count of Pods under this CollaSet that successfully got scheduled. + +`availableReplicas` indicates the count of Pods under this CollaSet that have all expected finalizers attached. + +`updatedReplicas` indicates the count of Pods under this CollaSet that have the updated revision. + +`updatedReadyReplicas` indicates the count of Pods under this CollaSet that are counted in `updatedReplicas` and have their PodReady conditions set to True. + +`updatedAvailableReplicas` indicates the count of Pods under this CollaSet that is counted in `updatedReadyReplicas` and have all expected finalizers attached. + +### Updating Pods +CollaSet generates Pods according to the pod template described in `spec.template`. This template can be updated to signal CollaSet to update each owned Pod: + +``` shell +$ kubectl -n default edit cls collaset-sample +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: +...... +spec: +...... + template: + ...... + spec: + containers: + - image: nginx:1.24.0 # changed from nginx:1.25.2 +...... +``` + +CollaSet immediately updates all Pods it owns with the new Pod template by default. + +``` shell +$ kubectl -n default get pod -o yaml | grep "image: nginx" + - image: nginx:1.24.0 + - image: nginx:1.24.0 + - image: nginx:1.24.0 +``` + +The update progress can be controlled using partition. + +#### Partition +Similar to StatefulSet, `partition` is used to control the upgrade progress. + +By default, if not indicated, all Pods will be updated when spec.template changes. The `partition` can be adjusted from `spec.replicas` to 0 to specify the number of pods in old revisions. + +Let's update the image back to nginx:1.25.2: + +``` shell +$ kubectl -n default edit cls collaset-sample +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: collaset-sample +spec: + template: + ...... + spec: + containers: + - image: nginx:1.25.2 # changed from nginx:1.24.0 + ... + updateStrategy: + rollingUpdate: + byPartition: + partition: 2 # use partition to control upgrade progress +``` + +In this case, CollaSet only updates 1 Pod to the updated revision. + +``` shell +$ kubectl -n default get pod -o yaml | grep "image: nginx" + - image: nginx:1.24.0 + - image: nginx:1.25.2 # only 1 Pod updated + - image: nginx:1.24.0 +``` +#### Update by Label +By configuring the `byLabel` rolling update policy, users can precisely specify which Pods they want to update by using labels. + +If you go back to the sample in the [section Partition](#Partition) and change `byPartition` to `byLabel` like the following: + +``` shell +$ kubectl -n default edit cls collaset-sample +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: collaset-sample +spec: + ... + updateStrategy: + rollingUpdate: +- byPartition: +- partition: 1 ++ byLabel: {} +``` + +Subsequently, each Pod will only be updated if it's marked with the label `collaset.kusionstack.io/update-included`. + +## Advanced Features +### Pod Instance ID +Each Pod created by CollaSet has a unique ID held by the label `collaset.kusionstack.io/instance-id`, which can be used to identify each individual Pod. + +``` yaml +apiVersion: v1 +kind: Pod +metadata: + labels: + collaset.kusionstack.io/instance-id: "0" # Pod instance ID +... +``` + +CollaSet provides a context to specify an ID pool, which defaults to the same name as the CollaSet and is immutable. + +``` yaml +... +spec: + scaleStrategy: + context: +``` + +The same ID pool name can be indicated for multiple CollaSets, allowing them to share a single ID pool. Consequently, each Pod created by these CollaSets will be assigned a unique ID. + +For example, these are two CollaSets with the same context: + +``` shell +$ cat ~/sample.yaml +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: collaset-sample-a +spec: + replicas: 2 + scaleStrategy: + context: foo # with the same context foo + selector: + matchLabels: + app: foo + template: + metadata: + labels: + app: foo + spec: + containers: + - image: nginx:1.25.2 + name: nginx +--- + +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: collaset-sample-b +spec: + replicas: 2 + scaleStrategy: + context: foo # with the same context foo + selector: + matchLabels: + app: foo + template: + metadata: + labels: + app: foo + spec: + containers: + - image: nginx:1.25.2 + name: nginx +``` + +Then create these CollaSets with the sample file: + +``` shell +$ kubectl -n default apply -f ~/sample.yaml +collaset.apps.kusionstack.io/collaset-sample-a created +collaset.apps.kusionstack.io/collaset-sample-b created + +$ kubectl -n default get pod +NAME READY STATUS RESTARTS AGE +collaset-sample-a-g4sjj 1/1 Running 0 42s +collaset-sample-a-ph9vc 1/1 Running 0 42s +collaset-sample-b-fqkq4 1/1 Running 0 42s +collaset-sample-b-lqg8f 1/1 Running 0 42s + +$ kubectl -n default get pod -o yaml | grep collaset.kusionstack.io/instance-id + collaset.kusionstack.io/instance-id: "0" + collaset.kusionstack.io/instance-id: "1" + collaset.kusionstack.io/instance-id: "3" + collaset.kusionstack.io/instance-id: "2" +``` + +Now, the 4 Pods created by these 2 CollaSets will have a unique instance ID. + +### Revision Consistency +Pods within a CollaSet can utilize more than two different Pod templates simultaneously, including both the current and updated revisions. This can result from partial updates. To ensure the stability of Pod revisions over time, CollaSet records this information. When a Pod is deleted, CollaSet recreates it using its previous revision. + +It can be reproduced by following steps: + +1. Provision a new CollaSet with replicas 3. + +``` shell +$ kubectl -n default apply -f ./config/samples/apps_v1alpha1_collaset.yaml +collaset.apps.kusionstack.io/collaset-sample created + +$ kubectl get pod +NAME READY STATUS RESTARTS AGE +collaset-sample-5tgcs 1/1 Running 0 4s +collaset-sample-glgnb 1/1 Running 0 4s +collaset-sample-qs46r 1/1 Running 0 4s + +$ kubectl -n default get cls +NAME DESIRED CURRENT UPDATED UPDATED_READY UPDATED_AVAILABLE CURRENT_REVISION UPDATED_REVISION AGE +collaset-sample 3 3 3 3 3 collaset-sample-6d7b7c58f collaset-sample-6d7b7c58f 64s +``` + +2. Update the image of PodTemplate of the CollaSet to image nginx:1.24.0 and set the partition to 2. Then there will be 2 Pods with image nginx:1.24.0 and 1 Pod with image nginx:1.25.2. + +``` shell +$ kubectl -n default edit cls collaset-sample +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: collaset-sample +spec: + template: + ...... + spec: + containers: + - image: nginx:1.24.0 # changed from nginx:1.25.2 + ... + updateStrategy: + rollingUpdate: + byPartition: + partition: 2 # update 2 Pods + +# Wait until these 2 Pods are updated, and check the Pod's images. +$ kubectl get pod -o yaml | grep "image: nginx" + - image: nginx:1.25.2 + - image: nginx:1.24.0 + - image: nginx:1.24.0 +``` + +3. Update the image of PodTemplate of the CollaSet to image nginx:1.23.4 and set the partition to 1. + +``` shell +$ kubectl -n default edit cls collaset-sample +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: collaset-sample +spec: + template: + ...... + spec: + containers: + - image: nginx:1.23.4 # changed from nginx:1.24.0 + ... + updateStrategy: + rollingUpdate: + byPartition: + partition: 1 # update 1 Pod + +# Wait until the Pod is updated, and check the Pod's images. +$ kubectl get pod -o yaml | grep "image: nginx" + - image: nginx:1.25.2 + - image: nginx:1.24.0 # Pod collaset-sample-qs46r + - image: nginx:1.23.4 +``` + +Now, there are 3 Pods, each of which has an individual image. If we then delete the Pod with the image nginx:1.24.0, the new Pod replacing it will be created with the same image nginx:1.24.0 in order for the Pod to inherit the revision. + +``` shell +$ kubectl delete -n default delete pod collaset-sample-qs46r +pod "collaset-sample-qs46r" deleted + +$ kubectl get pod +NAME READY STATUS RESTARTS AGE +collaset-sample-5tgcs 1/1 Running 0 3h +collaset-sample-ht9x6 1/1 Running 0 2m27s # Pod recreated +collaset-sample-qs46r 1/1 Running 1 (3h ago) 3h + +$ kubectl get pod -o yaml | grep "image: nginx" + - image: nginx:1.25.2 + - image: nginx:1.24.0 # image has not been changed + - image: nginx:1.23.4 +``` + +### In-Place Update Pod +In addition to the `Recreate` update policy, which is identical to Deployment and StatefulSet, CollaSet offers the `InPlaceIfPossible` update policy. + +``` yaml +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: collaset-sample +spec: + ... + updateStrategy: + podUpgradePolicy: InPlaceIfPossible # Options: InPlaceIfPossible, Recreate, Replace +``` + +`InPlaceIfPossible` is the default value, which instructs CollaSets to try to update Pods in place when only container images, labels, and annotations have changed. If some other fields have changed too, the policy will back off to the `Recreate` policy. + +`Recreate` indicates CollaSets always delete the old Pod and create a new one with an updated revision. + +If update pod template with `InPlaceIfPossible` policy as following example, the Pod will not be recreated. + +``` shell +$ kubectl -n default edit cls collaset-sample +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: collaset-sample +spec: + template: + ...... + spec: + containers: + - image: nginx:1.24.0 # changed from nginx:1.25.2 + ... + updateStrategy: + podUpgradePolicy: InPlaceIfPossible # use InPlaceIfPossible policy + +$ kubectl -n default get pod -o yaml | grep "image: nginx" + - image: nginx:1.24.0 + - image: nginx:1.24.0 + - image: nginx:1.24.0 + +$ kubectl -n default get pod +NAME READY STATUS RESTARTS AGE +collaset-sample-5wvlh 1/1 Running 1 (6s ago) 2m10s +collaset-sample-ldvrg 1/1 Running 1 (6s ago) 2m10s +collaset-sample-pbz75 1/1 Running 1 (6s ago) 2m10s +``` + +### Replace Update Pod + +CollaSet provides the `Replace` policy for certain applications that are sensitive to the available number of Pods. + +``` yaml +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: collaset-sample +spec: + ... + updateStrategy: + podUpgradePolicy: Replace # Options: InPlaceIfPossible, Recreate, Replace +``` + +The `Replace` policy indicates that CollaSet should update a Pod by creating a new one to replace it. +Unlike the `Recreate` policy, which deletes the old Pod before creating a new updated one, or the `InPlaceIfPossible` policy, which updates the current Pod in place, +the `Replace` policy first creates a new Pod with the updated revision. It then deletes the old Pod once the new one becomes available for service. + +```shell +# Before updating CollaSet +$ kubectl -n default get pod +NAME READY STATUS RESTARTS AGE +collaset-sample-dwkls 1/1 Running 0 6m55s + +# After updating CollaSet, the updated Pod is created first +$ kubectl -n default get pod +NAME READY STATUS RESTARTS AGE +collaset-sample-dwkls 1/1 Running 0 6m55s +collaset-sample-rcmbv 0/1 ContainerCreating 0 0s + +# Once the created Pod is available for service, the old Pod will be deleted +$ kubectl -n default get pod +NAME READY STATUS RESTARTS AGE +collaset-sample-rcmbv 1/1 Running 0 1s +collaset-sample-dwkls 1/1 Terminating 0 7m12s +``` + +The two Pods will have a pair of labels to identify their relationship. The new Pod will have the label `collaset.kusionstack.io/replace-pair-origin-name` to indicate the name of the old Pod, and the old Pod will have the label `collaset.kusionstack.io/replace-pair-new-id` to indicate the instance ID of the new Pod. + +Additionally, the new Pod and old Pod will each begin their own PodOpsLifecycles, which are independent of each other. + +### OperationDelaySeconds +Since v0.5.0, CollaSet supports configuring the `operationDelaySeconds` to control the duration between the completion of traffic off and the container shutdown, to protect long connection requests. +``` +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: collaset-sample +spec: + scaleStrategy: + operationDelaySeconds: 60 # duration between traffic off and container shutdown +``` + +In pod operations, including scaling down, updates, and deletions, [PodOpsLifecycle](http://localhost:3000/kuperator/concepts/podopslifecycle) is responsible for managing the full lifecycle of pods (e.g., `ServiceAvailable`, `Preparing`, `Operating` and `Completing`). +These operations involve shutting down or restarting containers. + +Directly shutting down containers after traffic off can lead to situations where long connection requests become unresponsive. +During this period, it is expected to close the containers after waiting for some time after traffic off to handle the remaining long connection requests. + +#### Comparing to terminationGracePeriodSeconds +The `operationDelaySeconds` serves a purpose similar to the `terminationGracePeriodSeconds` on a pod. +However, the difference is that since operationDelaySeconds is configured in the Spec of CollaSet, modifications to this setting will not trigger pod upgrade. + +![operation-delay-seconds](/img/kuperator/manuals/collaset/operation-delay-seconds.png) + +Note that if both operationDelaySeconds and terminationGracePeriodSeconds fields are configured simultaneously, after the traffic off, the user application may wait (operationDelaySeconds + terminationGracePeriodSeconds) before the container being shutdown. + +### Selective Pod Deletion +When scaling down CollSet, users may want to delete specified pods instead of delete pods randomly. +ColloSet supports specifying a set of Pod names within the ```spec.scaleStrategy.podToDelete``` for recreating or scaling in specified pods. + +```yaml +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: sample +spec: + replicas: 2 + scaleStrategy: + podToDelete: # replace or scaleIn listed pods + - podName1 + - podName2 + # ... +``` +When user specifies a set of pods: +1. On the one hand, if ```replicas``` is scaled down simultaneously, CollaSet will scale down pods listed in ```podToDelete``` first. +2. On the other hand, if ```replicas``` is not scale down, pods listed in ```podToDelete``` will be recreated, and new pods will inherit origin pods' ```instance-id```. + +Note that, by default, controller will clear the pod name once CollaSet cannot find the pod specified in ```podToDelete```. +Users can disable clear pod name after pod deletion by disabling the feature ```ReclaimPodToDelete``` to false (the default value is true). + +```shell +# Disable the ReclaimPodToDelete feature when starting the controller with this argument +$ /manager --feature-gates=ReclaimPodToDelete=false +``` + +### Recreate And Replace Pod by Label + +In practice, users often need to recreate or replace specified Pods under a CollaSet. + +To delete a Pod, users can simply call the Kubernetes API, like executing `kubectl delete pod `. +However, this will bypass the [PodOpsLifecycle](https://www.kusionstack.io/docs/kuperator/concepts/podopslifecycle) Mechanism. +We provide following two options: + +1. Enable the feature `GraceDeleteWebhook` so that it is possible to delete Pods through `PodOpsLifecycle`. +```shell +# Enable the GraceDeleteWebhook feature when starting the controller with this argument +$ /manager --feature-gates=GraceDeleteWebhook=true +``` +```shell +$ kubectl -n default get pod +NAME READY STATUS RESTARTS AGE +collaset-sample-vqccr 1/1 Running 0 21s + +# Delete the pod directly. A message will respond indicating that the Pod deletion is handled by PodOpsLifecycle +kubectl -n default delete pod collaset-sample-vqccr +Error from server (failed to validate GraceDeleteWebhook, pod deletion process is underway and being managed by PodOpsLifecycle): admission webhook "validating-pod.apps.kusionstack.io" denied the request: failed to validate GraceDeleteWebhook, pod deletion process is underway and being managed by PodOpsLifecycle + +# The old Pod is deleted, and a new Pod will be created +$ kubectl -n default get pod -w +collaset-sample-vqccr 1/1 Running 0 71s +collaset-sample-vqccr 1/1 Terminating 0 71s +...... +collaset-sample-nbl6t 0/1 Pending 0 0s +collaset-sample-nbl6t 0/1 ContainerCreating 0 0s +...... +collaset-sample-nbl6t 1/1 Running 0 0s +``` +2. Label the Pod with `podopslifecycle.kusionstack.io/to-delete`, so that CollaSet will delete the Pod through PodOpsLifecycle. + +```shell +# Label Pod +$ kubectl -n default label pod collaset-sample-nbl6t podopslifecycle.kusionstack.io/to-delete=true + +# The old Pod is deleted, and a new Pod will be recreated +$ kubectl -n default get pod -w +collaset-sample-nbl6t 1/1 Running 0 5m28s +collaset-sample-nbl6t 1/1 Terminating 0 5m28s +...... +collaset-sample-w6x69 0/1 Pending 0 0s +...... +collaset-sample-w6x69 0/1 ContainerCreating 0 0s +...... +collaset-sample-w6x69 1/1 Running 0 2s +``` + +Recreating a Pod will delete the old Pod first and then create a new one. This will affect the available Pod count. +To avoid this, CollaSet provides a feature to replace Pods by labeling them with `podopslifecycle.kusionstack.io/to-replace`. + +```shell +# Replace Pod by label +$ kubectl -n echo label pod collaset-sample-w6x69 podopslifecycle.kusionstack.io/to-replace=true + +# The old Pod is deleted, and a new Pod will be created +$ kubectl -n default get pod -w +collaset-sample-w6x69 1/1 Running 0 5m29s +collaset-sample-74fsv 0/1 Pending 0 0s +collaset-sample-74fsv 0/1 ContainerCreating 0 0s +...... +collaset-sample-74fsv 1/1 Running 0 2s +...... +collaset-sample-w6x69 0/1 Terminating 0 5m33s +``` + + +### Supporting PVCs +CollaSet introduces support for PVCs, allowing user to declare `VolumeClaimTemplates` to create PVCs for each Pod. +Furthermore, in response to common issues with PVCs management, such as high modification costs and difficult control, CollaSet extends its functionality with the following advantages vs. StatefulSet: + +1. Support update, add and delete on `volumeClaimTemplates`. +2. Provide control over PVC lifecycle. + +#### Provision PVCs +The `collaset-pvc.yaml` file declares a CollaSet with `VolumeClaimTemplates` to provision a PVC with `1Gi` storage for each Pod. +These PVCs are then mounted on the container at the path `/path/mount/www`. + +``` yaml +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: foo +spec: + replicas: 2 + selector: + matchLabels: + app: foo + template: + metadata: + labels: + app: foo + spec: + containers: + - image: nginx:1.25 + name: nginx + volumeMounts: + - mountPath: /path/mount/www # path to mount PVC + name: www + volumeClaimTemplates: + - metadata: + name: www + spec: + storageClassName: standard + volumeMode: Filesystem + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: 1Gi +``` + +Pods and PVCs can be provisioned by CollaSet. + +``` shell +$ kubectl -n default apply -f collaset-pvc.yaml +collaset.apps.kusionstack.io/foo created + +$ kubectl -n default get pod +NAME READY STATUS RESTARTS AGE +foo-pw5lg 1/1 Running 0 4s +foo-5n6ts 1/1 Running 0 4s + +$ kubectl -n default get pvc +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +foo-www-h5zv7 Bound pvc-8a7d8ea0-ced0-423a-9255-bedfad0f2db6 1Gi RWO standard 7s +foo-www-lswp2 Bound pvc-9564b44b-9c99-467b-abee-4285183ff9c3 1Gi RWO standard 7s +``` + +Each Pod and its related PVC have the same value of label `collaset.kusionstack.io/instance-id`. + +``` shell +$ kubectl -n default get pod -o yaml | grep instance-id + collaset.kusionstack.io/instance-id: "1" + collaset.kusionstack.io/instance-id: "0" + +$ kubectl -n default get pvc -o yaml | grep instance-id + collaset.kusionstack.io/instance-id: "1" + collaset.kusionstack.io/instance-id: "0" +``` + +#### Update PVCs +To save the operating costs of PVCs, i.e. expand storage capacity, CollaSet supports update, add and delete on `volumeClaimTemplates`. + +To achieve this, for each PVC, CollaSet calculates a hash value based on its template, and attatch it to label `collaset.kusionstack.io/pvc-template-hash`. +Once users modify the templates, CollaSet recognizes, caculates a new hash value and attach it on new PVCs to replace old ones. + +Let's give it a try, update the storage of PVC template from `1Gi` to `2Gi`. +``` shell +$ kubectl -n default edit cls foo + ...... + volumeClaimTemplates: + - metadata: + name: www + spec: + storageClassName: standard + volumeModes: Filesystem + accessModes: [ "ReadWriteOnce" ] + resources: + requests: +- storage: 1Gi ++ storage: 2Gi # update pvc template to expand storage +...... +``` + +There are 2 new PVCs with `2Gi` storage created with different hash values. + +``` shell +$ kubectl -n default edit cls foo +collaset.apps.kusionstack.io/foo edited + +$ kubectl -n default get pod +NAME READY STATUS RESTARTS AGE +foo-pw5lg 1/1 Terminating 0 7s +foo-5n6ts 1/1 Terminating 0 7s +foo-9nhz4 0/1 Pending 0 1s +foo-xb2gd 0/1 Pending 0 1s + +$ kubectl -n default get pvc +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +foo-www-h5zv7 Terminating pvc-8a7d8ea0-ced0-423a-9255-bedfad0f2db6 1Gi RWO standard 11s +foo-www-lswp2 Terminating pvc-9564b44b-9c99-467b-abee-4285183ff9c3 1Gi RWO standard 11s +foo-www-cj2s9 Bound pvc-647e2a81-7fc6-4f37-a835-e63da9172de3 2Gi RWO standard 5s +foo-www-hp2t6 Bound pvc-03d7536e-cd3f-465f-bd30-362a9510f0c9 2Gi RWO standard 5s + +$ kubectl -n default get pvc -o yaml | grep pvc-template-hash + collaset.kusionstack.io/pvc-template-hash: 594d8857f9 # hash value of old pvc + collaset.kusionstack.io/pvc-template-hash: 594d8857f9 + collaset.kusionstack.io/pvc-template-hash: d78c5ff6b # hash value of new pvc + collaset.kusionstack.io/pvc-template-hash: d78c5ff6b +``` + +For old Pvcs, users can retain them by configuring `whenScaled` policy to `Retain` . +Then old PVCs can be re-mount on its related Pod after rolling back. +Otherwise, old PVCs can be deleted by default policy `Delete`. + + +#### Add PVCs +Add a PVC template `yyy`, which is mounted on the container at the path `/path/mount/yyy`. + +``` shell +$ kubectl -n default edit cls foo +...... + spec: + containers: + - image: nginx:1.25 + name: nginx + volumeMounts: + - mountPath: /path/mount/www # path to mount PVC + name: www ++ - mountPath: /path/mount/yyy # path to mount PVC ++ name: yyy + volumeClaimTemplates: + - metadata: + name: www + spec: + storageClassName: standard + volumeMode: Filesystem + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: 2Gi ++ - metadata: # added pvc template ++ name: yyy ++ spec: ++ storageClassName: standard ++ volumeMode: Filesystem ++ accessModes: [ "ReadWriteOnce" ] ++ resources: ++ requests: ++ storage: 2Gi +``` + +Now, each pod has two PVCs, which include a new PVCs claimed by template `yyy` and one old PVC claimed by template `www`. + +``` shell +$ kubectl -n default edit cls foo +collaset.apps.kusionstack.io/foo edited + +$ kubectl -n default get pod +NAME READY STATUS RESTARTS AGE +foo-8wwsz 0/1 Pending 0 1s +foo-9nhz4 1/1 Terminating 0 23s +foo-hd2cv 0/1 Pending 0 1s +foo-xb2gd 1/1 Terminating 0 23s + +$ kubectl -n default get pvc +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +foo-www-cj2s9 Bound pvc-647e2a81-7fc6-4f37-a835-e63da9172de3 2Gi RWO standard 25s +foo-www-hp2t6 Bound pvc-03d7536e-cd3f-465f-bd30-362a9510f0c9 2Gi RWO standard 25s +foo-yyy-c68nh Bound pvc-94ee5eff-2350-4cb7-8411-85f0928d25fc 2Gi RWO standard 3s # new pvc +foo-yyy-vpwss Bound pvc-8363dc78-3340-47d0-aa11-0adac36308d5 2Gi RWO standard 3s # new pvc +``` + +#### Delete PVCs +Delete the PVC template `yyy` on CollaSet. + +``` shell +$ kubectl -n default edit cls foo +...... + spec: + containers: + - image: nginx:1.25 + name: nginx + volumeMounts: + - mountPath: /path/mount/www # path to mount PVC + name: www +- - mountPath: /path/mount/yyy # path to mount PVC +- name: yyy + volumeClaimTemplates: + - metadata: + name: www + spec: + storageClassName: standard + volumeMode: Filesystem + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: 2Gi +- - metadata: # delete pvc template +- name: yyy +- spec: +- storageClassName: standard +- volumeMode: Filesystem +- accessModes: [ "ReadWriteOnce" ] +- resources: +- requests: +- storage: 2Gi +``` + +Now, PVCs claimed by template `yyy` are deleted and the origin PVCs claimed by template `www` are retained. + +``` shell +$ kubectl -n default edit cls foo +collaset.apps.kusionstack.io/foo edited + +$ kubectl -n default get pod +NAME READY STATUS RESTARTS AGE +foo-6qcpc 1/1 Running 0 2s +foo-z2jqv 1/1 Running 0 2s +foo-8wwsz 1/1 Terminating 0 38s +foo-hd2cv 1/1 Terminating 0 38s + +$ kubectl -n default get pvc +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +foo-www-cj2s9 Bound pvc-647e2a81-7fc6-4f37-a835-e63da9172de3 2Gi RWO standard 61s +foo-www-hp2t6 Bound pvc-03d7536e-cd3f-465f-bd30-362a9510f0c9 2Gi RWO standard 61s +foo-yyy-c68nh Terminating pvc-94ee5eff-2350-4cb7-8411-85f0928d25fc 2Gi RWO standard 39s +foo-yyy-vpwss Terminating pvc-8363dc78-3340-47d0-aa11-0adac36308d5 2Gi RWO standard 39s +``` + +#### PVC Retention Policy +CollaSet provides control over PVC lifecycle by configuring `spec.persistentVolumeClaimRetentionPolicy`. +Users can retain or delete PVCs after its related Pod is scaled down or CollaSet is deleted, respectively. +This feature is also supported by [StatefulSet](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention) since v1.27. +Basic rule is detailed as follows: +- `whenScale` : decides to delete or retain PVCs after Pod is scaled down. +- `whenDeleted`: decides to delete or retain PVCs after CollaSet is deleted. + +For each policy users can set the value to either Delete (by default) or Retain. +Note that for StatefulSet, the default policy is Retain. + +#### whenScaled +Apply `collaset-pvc.yaml` and edit foo to scale replicas to 1. +``` shell +$ kubectl apply -f collaset-pvc.yaml +collaset.apps.kusionstack.io/foo created + +$ kubectl edit cls foo + ...... + spec: +- replicas: 2 ++ replicas: 1 # scale in 1 pod + selector: + matchLabels: + app: foo + ...... +``` +As the `whenScaled` is not configured, thus its value is `Delete` by default. +Consequently, PVC `foo-www-wzwbq` is deleted as its related Pod `foo-tkc5m` is scaling down. + +``` shell +$ kubectl -n default edit cls foo +collaset.apps.kusionstack.io/foo edited + +$ kubectl -n default get pod +NAME READY STATUS RESTARTS AGE +foo-tkc5m 0/1 Terminating 0 27s # related pvc is terminating +foo-vwtcm 1/1 Running 0 27s + +$ kubectl -n default get pvc +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +foo-www-wzwbq Terminating pvc-b92c28c6-59ad-4976-810c-8d538c4a22c6 1Gi RWO standard 29s +foo-www-r4vlh Bound pvc-dd7f7cce-a3cb-4bba-a106-e5ad264959a2 1Gi RWO standard 29s +``` + +Set `Retain` to `whenScaled`, and scale replicas to 0. + +``` shell +$ kubectl -n default edit cls foo + ...... + spec: +- replicas: 1 ++ replicas: 0 # scale in 1 pod + selector: + matchLabels: + app: foo ++ scaleStrategy: ++ persistentVolumeClaimRetentionPolicy: ++ whenScaled: Retain # retain the pvc after pod is scaled down + ...... +``` + +Pod `foo-vwtcm` is terminating, while its related PVC `foo-www-r4vlh` is retained. + +``` shell +$ kubectl -n default edit cls foo +collaset.apps.kusionstack.io/foo edited + +$ kubectl -n default get pod +NAME READY STATUS RESTARTS AGE +foo-vwtcm -n default 1/1 Terminating 0 62s # related pvc is retained + +$ kubectl -n default get pvc +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +foo-www-r4vlh Bound pvc-dd7f7cce-a3cb-4bba-a106-e5ad264959a2 1Gi RWO standard 63s +``` + +To validate the retention policy, try ro scale replicas to 2, and the remaining PVC should be mounted again. + +``` shell +$ kubectl -n default edit cls foo + ...... + spec: +- replicas: 0 ++ replicas: 2 # scale out 2 pods + ...... +``` + +We can see that PVC `foo-www-r4vlh` is retained by Pod `foo-px487` as they have the same `instance-id`. + +``` shell +$ kubectl -n default edit cls foo +collaset.apps.kusionstack.io/foo edited + +$ kubectl -n default get pod +NAME READY STATUS RESTARTS AGE +foo-ld5xc 1/1 Running 0 27s +foo-px487 1/1 Running 0 27s + +$ kubectl -n default get pvc +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +foo-www-d48gx Bound pvc-1884ee45-5cc9-48ee-b01a-20f5ad63d6d4 1Gi RWO standard 29s +foo-www-r4vlh Bound pvc-dd7f7cce-a3cb-4bba-a106-e5ad264959a2 1Gi RWO standard 2m47s + +$ kubectl -n default get pod foo-px487 -o yaml | grep instance-id + collaset.kusionstack.io/instance-id: "1" + +$ kubectl -n default get pvc foo-www-r4vlh -o yaml | grep instance-id + collaset.kusionstack.io/instance-id: "1" # pvc foo-www-r4vlh is retained +``` + +#### whenDelete +Edit `foo` to configure `Retain` policy for `whenDelete`, and then delete this CollaSet. +``` shell +$ kubectl -n default edit cls foo + ...... + scaleStrategy: + persistentVolumeClaimRetentionPolicy: + whenScaled: Retain ++ whenDelete: Retain # retain the pvc after collaset is deleted + ...... +collaset.apps.kusionstack.io/foo edited + +$ kubectl -n default delete cls foo +collaset.apps.kusionstack.io "foo" deleted +``` + +Now, try to recreate `foo` with 2 replicas, and the result shows both PVCs are retained. +``` shell +$ kubectl -n default apply -f collaset-pvc.yaml +collaset.apps.kusionstack.io/foo created + +$ kubectl -n default get pod +NAME READY STATUS RESTARTS AGE +foo-qhh8t 1/1 Running 0 2s +foo-ss255 1/1 Running 0 2s + +$ kubectl -n default get pvc +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +foo-www-d48gx Bound pvc-1884ee45-5cc9-48ee-b01a-20f5ad63d6d4 1Gi RWO standard 4m29s +foo-www-r4vlh Bound pvc-dd7f7cce-a3cb-4bba-a106-e5ad264959a2 1Gi RWO standard 6m47s + +$ kubectl -n default get pod foo-px487 -o yaml | grep instance-id + collaset.kusionstack.io/instance-id: "0" + collaset.kusionstack.io/instance-id: "1" + +$ kubectl -n default get pvc foo-www-r4vlh -o yaml | grep instance-id + collaset.kusionstack.io/instance-id: "0" # pvc foo-www-d48gx is retained + collaset.kusionstack.io/instance-id: "1" # pvc foo-www-r4vlh is retained +``` \ No newline at end of file diff --git a/kuperator_versioned_docs/version-v0.5/manuals/poddecoration.md b/kuperator_versioned_docs/version-v0.5/manuals/poddecoration.md new file mode 100644 index 00000000..9c3114f8 --- /dev/null +++ b/kuperator_versioned_docs/version-v0.5/manuals/poddecoration.md @@ -0,0 +1,401 @@ +--- +sidebar_position: 4 +--- + +# PodDecoration +PodDecoration works in conjunction with CollaSet to selectively inject specific configurations to Pods that meet certain criteria. + +PodDecoration not only allows injecting sidecar containers to Pods but also enables modifying existing container configurations, metadata, and scheduling parameters etc. +The PodDecoration controller does not control the upgrade of Pods. The actual upgrade process is fully controlled by the CollaSet controller. This means that the injection upgrade of PodDecoration can also be performed `InPlaceIfPossible`. + +About [CollaSet](collaset.md). +# Example + +## 1. Create CollaSet + +```yaml +# collaset.yaml +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: foo + namespace: default +spec: + replicas: 3 + selector: + matchLabels: + app: foo + template: + metadata: + labels: + app: foo + spec: + containers: + - image: nginx:1.25.2 + name: foo +``` +Use `collaset.yaml` to create three pods under CollaSet `foo` management. +```shell +$ kubectl apply -f collaset.yaml +collaset.apps.kusionstack.io/foo created + +$ kubectl get cls +NAME DESIRED CURRENT AVAILABLE UPDATED UPDATED_READY UPDATED_AVAILABLE CURRENT_REVISION UPDATED_REVISION AGE +foo 3 3 3 3 3 3 foo-7bdb974bc7 foo-7bdb974bc7 7s + +$ kubectl get pod +NAME READY STATUS RESTARTS AGE +foo-2wnnf 1/1 Running 0 41s +foo-hqpx7 1/1 Running 0 41s +foo-mqt48 1/1 Running 0 41s +``` +## 2. Create PodDecoration + +The following `poddecoration.yaml` file describes a PodDecoration, which selects the pod under CollaSet `foo` and injects the content in `template` into the pod with `instance-id=0`. + +```yaml +# poddecoration.yaml +apiVersion: apps.kusionstack.io/v1alpha1 +kind: PodDecoration +metadata: + name: poddecoration +spec: + selector: # selected pod range in which PodDecoration takes effect + matchLabels: + app: foo + updateStrategy: + rollingUpdate: + selector: # select pod to upgrade in effect range + matchLabels: + collaset.kusionstack.io/instance-id: "0" + template: + metadata: + - patchPolicy: Overwrite + labels: + custom.io/sidecar-version: "v1" + containers: + - injectPolicy: AfterPrimaryContainer + name: sidecar-a + image: ubuntu:22.04 + command: ["sleep", "2h"] + volumeMounts: + - name: sample-volume + mountPath: /vol/sample + volumes: + - name: sample-volume + emptyDir: {} +``` + +Create PodDecoration `sample-pd` to upgrade selected pod +```shell +$ kubectl apply -f poddecoration.yaml +poddecoration.apps.kusionstack.io/sample-pd created +``` +The status of PodDecoration is updated, and one pod is injected with sidecar through recreate. +```shell +$ kubectl get pd +NAME EFFECTIVE MATCHED INJECTED UPDATED UPDATED_READY CURRENT_REVISION UPDATED_REVISION AGE +sample-pd true 3 1 1 1 sample-pd-9465f4c84 20s + +$ kubectl get pod +NAME READY STATUS RESTARTS AGE +foo-2gnnl 2/2 Running 0 15s +foo-2wnnf 1/1 Running 0 2m +foo-hqpx7 1/1 Running 0 2m + +$ kubectl get pd sample-pd -o yaml | grep -A20 status +status: + details: + - affectedReplicas: 3 + collaSet: foo + pods: + - name: foo-2gnnl + revision: sample-pd-9465f4c84 + - name: foo-2wnnf + escaped: true + - name: foo-hqpx7 + escaped: true + matchedPods: 3 + injectedPods: 1 + updatedPods: 1 + updatedReadyPods: 1 + updatedAvailablePods: 1 + isEffective: true + updatedRevision: sample-pd-9465f4c84 +``` + +## 3. Update PodDecoration + +### 3.1. Rolling update v1 + +Edit `sample-pd` to expand the upgrade scope. +```shell +$ kubectl edit pd sample-pd +``` + +```yaml +# poddecoration.yaml +# Edit updateStrategy to select instance-id in [0, 1, 2] +... +spec: + ... + updateStrategy: + rollingUpdate: + selector: + matchExpressions: + - key: collaset.kusionstack.io/instance-id + operator: In + values: + - "0" + - "1" # add + - "2" # add + template: + ... +``` + +All pods updated. +```shell +$ kubectl get pd +NAME EFFECTIVE MATCHED INJECTED UPDATED UPDATED_READY CURRENT_REVISION UPDATED_REVISION AGE +sample-pd true 3 3 3 3 sample-pd-9465f4c84 sample-pd-9465f4c84 3m + +$ kubectl get pod +NAME READY STATUS RESTARTS AGE +foo-2gnnl 2/2 Running 0 3m +foo-lftw8 2/2 Running 0 8s +foo-n57rr 2/2 Running 0 8s + +$ kubectl get pd sample-pd -o yaml | grep -A20 status +status: + currentRevision: sample-pd-9465f4c84 + details: + - affectedReplicas: 3 + collaSet: foo + pods: + - name: foo-2gnnl + revision: sample-pd-9465f4c84 + - name: foo-lftw8 + revision: sample-pd-9465f4c84 + - name: foo-n57rr + revision: sample-pd-9465f4c84 + matchedPods: 3 + injectedPods: 3 + updatedPods: 3 + updatedReadyPods: 3 + updatedAvailablePods: 3 + isEffective: true + currentRevision: sample-pd-9465f4c84 + updatedRevision: sample-pd-9465f4c84 +``` +### 3.2. Rolling update v1 -> v2 + + +Update `sample-pd`'s sidecar container image and `updateStrategy`. +```shell +$ kubectl edit pd sample-pd +``` +```yaml +# poddecoration.yaml +# Update sidecar-a's image with ubuntu:22.10 +# Edit updateStrategy to select instance-id in [0] +apiVersion: apps.kusionstack.io/v1alpha1 +kind: PodDecoration +metadata: + name: poddecoration +spec: + ... + updateStrategy: + rollingUpdate: + selector: + - key: collaset.kusionstack.io/instance-id + operator: In + values: + - "0" + template: + ... + containers: + - injectPolicy: AfterPrimaryContainer + name: sidecar-a + image: ubuntu:22.10 + ... +``` +Pod `foo-2gnnl` in-place upgrade sidecar container image. +```shell +$ kubectl get pd +NAME EFFECTIVE MATCHED INJECTED UPDATED UPDATED_READY CURRENT_REVISION UPDATED_REVISION AGE +sample-pd true 3 3 1 1 sample-pd-9465f4c84 sample-pd-8697d4bf8c 6min + +$ kubectl get pod +NAME READY STATUS RESTARTS AGE +foo-2gnnl 2/2 Running 1 (12s ago) 6m +foo-lftw8 2/2 Running 0 3min +foo-n57rr 2/2 Running 0 3min + +$ kubectl get pod foo-2gnnl -o yaml | grep "image: ubuntu" + image: ubuntu:22.10 + +$ kubectl get pd sample-pd -o yaml | grep -A20 status +status: + details: + - affectedReplicas: 3 + collaSet: foo + pods: + - name: foo-2gnnl + revision: sample-pd-8697d4bf8c + - name: foo-lftw8 + revision: sample-pd-9465f4c84 + - name: foo-n57rr + revision: sample-pd-9465f4c84 + matchedPods: 3 + injectedPods: 3 + updatedPods: 1 + updatedReadyPods: 1 + updatedAvailablePods: 1 + isEffective: true + currentRevision: sample-pd-9465f4c84 + updatedRevision: sample-pd-8697d4bf8c +``` + + +# Features + +## Injection + +### Metadata +```yaml +apiVersion: apps.kusionstack.io/v1alpha1 +kind: PodDecoration +metadata: + name: poddecoration +spec: + template: + metadata: + - patchPolicy: MergePatchJson + annotations: + cafe.sofastack.io/decoration-version: '[{"name":"sample-pd","version":"v2"}]' + - patchPolicy: Overwrite + labels: + custom.io/sidecar-version: "v2" + annotations: + cafe.sofastack.io/decoration-name: sample-pd +``` +`patchPolicy` is the injected policy, as follows: +- `Retain`: The original value of annotations and labels will be retained. +- `Overwrite`: The value of annotations and labels corresponding to the existing key will be overwritten. +- `MergePatchJson`: It only takes effect for annotation. If the key does not exist, the value will be written directly. Otherwise, the json value will be merged. + +For example: +```yaml +# Old pod metadata +metadata: + labels: + custom.io/sidecar-version: "v1" + annotations: + cafe.sofastack.io/decoration-version: '[{"name":"old-pd","version":"v1"}]' + +# After metadata injected +metadata: + labels: + custom.io/sidecar-version: "v2" + annotations: + cafe.sofastack.io/decoration-type: sample-pd + cafe.sofastack.io/decoration-version: '[{"name":"old-pd","version":"v1"}, {"name":"sample-pd","version":"v2"}]' +``` +### Primary Container + +```yaml +apiVersion: apps.kusionstack.io/v1alpha1 +kind: PodDecoration +metadata: + name: poddecoration +spec: + # ... + template: + primaryContainers: + - targetPolicy: ByName + name: foo + image: foo:v2 + env: + - name: APP_NAME + value: foo + volumeMounts: + - name: sample-volume + mountPath: /vol/sample + volumes: + - name: sample-volume + emptyDir: {} +``` +Injection into the primary containers only supports limited fields: `image`, `env` and `volumeMounts`. + +`targetPolicy` indicates which existed container these configuration should inject into, as follows: +- `ByName`: Only inject containers matching `name`. +- `All`: Inject all primary containers. +- `First`: Inject into first primary container. +- `Last`: Inject into last primary container. + +### Sidecar Container + +```yaml +apiVersion: apps.kusionstack.io/v1alpha1 +kind: PodDecoration +metadata: + name: poddecoration +spec: + # ... + template: + containers: + - injectPolicy: AfterPrimaryContainer # Container injected policy, AfterPrimaryContainer or BeforePrimaryContainer + name: sidecar-a + image: ubuntu:22.04 + ... +``` +Inject a new sidecar container. Optional, it can be placed in front or behind the primary container. +### InitContainer + +```yaml +apiVersion: apps.kusionstack.io/v1alpha1 +kind: PodDecoration +metadata: + name: poddecoration +spec: + # ... + template: + initContainers: + - name: init + image: custom-init-image:v1 + ... +``` + +## Upgrade strategy + +### selector +You can use `selector` to select the pod. The `CollaSet` provides a unique `instance-id` for each pod. Of course, custom labels can also be used to label pods for triggering upgrades. +```yaml +apiVersion: apps.kusionstack.io/v1alpha1 +kind: PodDecoration +metadata: + name: poddecoration +spec: + # ... + updateStrategy: + rollingUpdate: + selector: + - key: collaset.kusionstack.io/instance-id + operator: In + values: + - "0" +``` + +### partition +Partition is the desired number or percent of Pods in **old revisions**, defaults to `0`. +```yaml +apiVersion: apps.kusionstack.io/v1alpha1 +kind: PodDecoration +metadata: + name: poddecoration +spec: + # ... + updateStrategy: + rollingUpdate: + partition: 2 # int number +``` diff --git a/kuperator_versioned_docs/version-v0.5/manuals/podtransitionrule.md b/kuperator_versioned_docs/version-v0.5/manuals/podtransitionrule.md new file mode 100644 index 00000000..de0f7d2f --- /dev/null +++ b/kuperator_versioned_docs/version-v0.5/manuals/podtransitionrule.md @@ -0,0 +1,220 @@ +--- +sidebar_position: 3 +--- + +# PodTransitionRule +In normal pod lifecycle, some phases are defined. For example, K8s Pods follow a defined lifecycle,starting in the `Pending` phase, moving through `Running` if at least one of its primary containers starts `OK`, and then through either the `Succeeded` or `Failed` phases depending on whether any container in the Pod terminated in failure. + +These phase definitions can fulfill basic Pod change scenarios, but it are ambiguous. +Actually, before pod upgrade or ready, it is necessary to have some check mechanisms in place to ensure the safety of pod changes. Fortunately, [PodOpsLifecycle](../concepts/podopslifecycle.md) extends and supports some check stages: `PreCheck` before pod upgrade and `PostCheck` before pod ready. + +To ensure a more fine-grained and controlled change process for Pods, we introduce custom rules or perform additional tasks as prerequisites for state transitions before the desired state of a Pod is achieved. Similar to the Pod `readinessGates`, where certain conditions must be met for a Pod to be considered readiness. For example, we consider a Pod ready for the `PostCheck` phase only if it has specific labels. For this purpose, we introduce the `PodTransitionRule` as a prerequisite for the state transition of a Pod. + +## Rule Definition + +You can use `PodTransitionRule` to define a set of transition rules for your workload pods. +Each rule will be executed at the corresponding stage, and it will be blocked if the conditions are not met. + +Here is an example: +```yaml +apiVersion: apps.kusionstack.io/v1alpha1 +kind: PodTransitionRule +metadata: + name: podtransitionrule-sample +spec: + rules: + - availablePolicy: + maxUnavailableValue: 50% + name: maxUnavailable + - stage: PreCheck # stages are supported by PodOpsLifecycle. Defaults to PreCheck. + labelCheck: + requires: + matchLabels: + app.custom/ready: 'true' + name: labelCheck + - stage: PostCheck + webhook: + clientConfig: + url: https://1.1.1.1:8089/post-stop + caBundle: Cg== + poll: + url: http://1.1.1.1:8089/fetch-result + rawQueryKey: task-id # URL parameter key to carry trace ID when fetching result. Defaults to task-id in form 'QueryUrl=URL?rawQueryKey=' + intervalSeconds: 5 + timeoutSeconds: 60 + failurePolicy: Fail + parameters: + - key: podIP + valueFrom: + fieldRef: + fieldPath: status.podIP + name: webhookCheck + selector: # select pods in effect + matchLabels: + app: foo +``` + + +### Available Policy +An `availablePolicy` rule defines the availability strategy during the Pod update process. + +#### maxUnavailable +```yaml +availablePolicy: + maxUnavailable: + value: 50% # int or string +``` + +`maxUnavailableValue` is the maximum number of pods that can be unavailable during the update. +Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). +Absolute number is calculated from percentage by rounding down. +This can not be 0. + +#### minAvailable +```yaml +availablePolicy: + minAvailable: + value: 5 # int or string +``` +`minAvailableValue` is the minimum number of pods that should be available during the update. + +### Label Check + +A `labelCheck` rule is used to check if labels are satisfied. +You can define your own labels as change check conditions and modify the labels according to your needs. +```yaml +labelCheck: + requires: + matchLabels: + app.custom/ready: 'true' + matchExpressions: + - key: app.custom/forbidden + operator: DoesNotExist +``` + +### Webhook +A `webhook` is an HTTP callback, based on which a external web application can determine whether a pod can pass this check. + +* An HTTP POST occurs first when pods entries the configured stage which defaults PreCheck. +* If `poll` is provided, this rule then keeps calling polling url to fetch a long running job result. This job can be located by `task-id` returned from the response of the first request. + + +```yaml +webhook: + clientConfig: # custom server config + url: https://1.1.1.1:8089/post-stop + caBundle: Cg== + poll: + url: http://1.1.1.1:8089/fetch-result + rawQueryKey: task-id + intervalSeconds: 5 + timeoutSeconds: 60 + failurePolicy: Fail + parameters: + - key: podIP + valueFrom: + fieldRef: + fieldPath: status.podIP +``` +**Protocol without poll** + +Request: +```json +// URL: https://1.1.1.1:8089/post-stop +// Method: POST + +{ + "traceId": "", // is generated by Kuperator, which can be used to track request + "stage": "PreTrafficOff", + "ruleName": "webhookCheck", + "resources": [ // Information of Pods which are in this stage + { + "apiVersion": "v1", + "kind": "Pod", + "name": "pod-a", + "parameters": { + "podIP": "1.0.0.1" // Customized information users can indicate from rule paramter + } + }, + { + "apiVersion": "v1", + "kind": "Pod", + "name": "pod-b", + "parameters": { + "podIP": "1.0.0.2" + } + } + ] +} +``` +Response: +```json +{ + "success": false, + "message": "msg", + "finishedNames": ["pod-a", "pod-b"] +} +``` +Response `success` indicating all pods approved or not. If it's `false`, the `finishedNames` field can be used to approve partial pods. + +**Protocol with poll** + +Request: +```json +// URL: https://1.1.1.1:8089/post-stop +// Method: POST + +{ + "traceId": "", // is generated by Kuperator, which can be used to track request + "stage": "PreTrafficOff", + "ruleName": "webhookCheck", + "resources": [ // Information of Pods which are in this stage + { + "apiVersion": "v1", + "kind": "Pod", + "name": "pod-a", + "parameters": { + "podIP": "1.0.0.1" // Customized information users can indicate from rule paramter + } + }, + { + "apiVersion": "v1", + "kind": "Pod", + "name": "pod-b", + "parameters": { + "podIP": "1.0.0.2" + } + } + ] +} +``` + +Response: + +```json +{ + "success": true, + "poll": true, // required to indicate polling calls is necessary + "taskId": , // required to to fetch polling result + "message": "msg" +} +``` +Response `success` indicating whether the first request is success or not. If true and field `poll` in response is `true` (or field `async` in response is `true`), PodTransisionRule will then begin to keep calling poll URL to fetch process result. +Field `taskId` is required for polling. + +The request for polling is GET method and in form of `QueryUrl=URL?task-id=`. The parameter key in this URL defaults `task-id`, if using `poll` in above response. It would be `trace-id` if using `async` in above response. +Users can also indicate the key by field `poll.rawQueryKey`. + +The response from polling call is expected like following: + +```json +{ + "success": true, + "message": "msg", + "finished": false, + "finishedNames": ["pod-a", "pod-b"] +} +``` + +`success` is supposed to be true, if there is no error. If all pods is approved, `finished` should be `true`. +If `finished` is `false`, `finishedNames` can be used to allow partial pods to be approved. diff --git a/kuperator_versioned_docs/version-v0.5/manuals/resourceconsist.md b/kuperator_versioned_docs/version-v0.5/manuals/resourceconsist.md new file mode 100644 index 00000000..19aa12f6 --- /dev/null +++ b/kuperator_versioned_docs/version-v0.5/manuals/resourceconsist.md @@ -0,0 +1,437 @@ +--- +sidebar_position: 2 +--- + +# ResourceConsist +[**ResourceConsist**](https://github.com/KusionStack/resourceconsist/blob/main/README.md) aims to make a customized controller can be realized easily, and offering the ability of following +**PodOpsLifecycle** for controllers. + +## Tutorials +**kusionstack.io/resourceconsit** mainly consists of frame, experimental/adapters and adapters. + +The frame, ```kusionstack.io/resourceconsist/pkg/frame```, is used for adapters starting a controller, which handles +Reconcile and Employer/Employees' spec&status. If you wrote an adapter in your own repo, you can import +```kusionstack.io/resourceconsist/pkg/frame/controller``` and ```kusionstack.io/resourceconsist/pkg/frame/webhook```, +]and call AddToMgr to start a controller. + +>webhookAdapter is only necessary to be implemented for controllers following PodOpsLifecycle. + +```go +package main + +import ( + controllerframe "kusionstack.io/resourceconsist/pkg/frame/controller" + webhookframe "kusionstack.io/resourceconsist/pkg/frame/webhook" +) + +func main() { + controllerframe.AddToMgr(manager, yourOwnControllerAdapter) + webhookframe.AddToMgr(manager, yourOwnWebhookAdapter) +} +``` +### adapters +The adapters, ```kusionstack.io/resourceconsist/pkg/adapters```, consists of built-in adapters. You can start a +controller with built-in adapters just calling AddBuiltinControllerAdaptersToMgr and AddBuiltinWebhookAdaptersToMgr, +passing built-in adapters' names. Currently, an aliababacloudslb adapter has released. You can use it as follows: +```go +import ( + "kusionstack.io/resourceconsist/pkg/adapters" +) + +func main() { + adapters.AddBuiltinControllerAdaptersToMgr(manager, []adapters.AdapterName{adapters.AdapterAlibabaCloudSlb}) + adapters.AddBuiltinWebhookAdaptersToMgr(manager, []adapters.AdapterName{adapters.AdapterAlibabaCloudSlb}) +} +``` +Built-in adapters can also be used like how frame used. You can call NewAdapter from a certain built-in adapter pkg +and the call frame.AddToMgr to start a controller/webhook + +More built-in adapters will be implemented in the future. To make this repo stable, all new built-in adapters will +be added to ```kusionstack.io/resourceconsist/pkg/experimental/adapters``` first, and then moved to +```kusionstack.io/resourceconsist/pkg/adapters``` until ready to be released. +#### alibabacloudslb adapter +```pkg/adapters/alibabacloudslb``` is an adapter that implements ReconcileAdapter. It follows **PodOpsLifecycle** to +handle various scenarios during pod operations, such as creating a new pod, deleting an existing pod, or handling +changes to pod configurations. This adapter ensures minimal traffic loss and provides a seamless experience for users +accessing services load balanced by Alibaba Cloud SLB. + +In ```pkg/adapters/alibabacloudslb```, the real server is removed from SLB before pod operation in ACK. The LB +management and real server management are handled by CCM in ACK. Since alibabacloudslb adapter follows PodOpsLifecycle +and real servers are managed by CCM, ReconcileLifecycleOptions should be implemented. If the cluster is not in ACK or +CCM is not working in the cluster, the alibabacloudslb controller should implement additional methods of ReconcileAdapter. +### experimental/adapters +The experimental/adapters is more like a pre-release pkg for built-in adapters. Usage of experimental/adapters is same +with built-in adapters, and be aware that **DO NOT USE EXPERIMENTAL/ADAPTERS IN PRODUCTION** +### demo adapter +A demo is implemented in ```resource_controller_suite_test.go```. In the demo controller, the employer is represented +as a service and is expected to have the following **DemoServiceStatus**: +``` +DemoServiceStatus{ + EmployerId: employer.GetName(), + EmployerStatuses: DemoServiceDetails{ + RemoteVIP: "demo-remote-VIP", + RemoteVIPQPS: 100, + } +} +``` +The employee is represented as a pod and is expected to have the following **DemoPodStatus**: +``` +DemoPodStatus{ + EmployeeId: pod.Name, + EmployeeName: pod.Name, + EmployeeStatuses: PodEmployeeStatuses{ + Ip: string, + Ipv6: string, + LifecycleReady: bool, + ExtraStatus: PodExtraStatus{ + TrafficOn: bool, + TrafficWeight: int, + }, + } +} +``` +The DemoResourceProviderClient is a fake client that handles backend provider resources related to the employer/employee +(service/pods). In the Demo Controller, ```demoResourceVipStatusInProvider``` and ```demoResourceRsStatusInProvider``` +are mocked as resources in the backend provider. + +How the demo controller adapter realized will be introduced in detail as follows, +```DemoControllerAdapter``` was defined, including a kubernetes client and a resourceProviderClient. What included in +the Adapter struct can be defined as needed. +```go +type DemoControllerAdapter struct { + client.Client + resourceProviderClient *DemoResourceProviderClient +} +``` +Declaring that the DemoControllerAdapter implemented ```ReconcileAdapter``` and ```ReconcileLifecycleOptions```. +Implementing ```RconcileAdapter``` is a must action, while ```ReconcileLifecycleOptions``` isn't, check the remarks +for ```ReconcileLifecycleOptions``` in ```kusionstack.io/resourceconsist/pkg/frame/controller/types.go``` to find why. +```go +var _ ReconcileAdapter = &DemoControllerAdapter{} +var _ ReconcileLifecycleOptions = &DemoControllerAdapter{} +``` +Following two methods for DemoControllerAdapter inplementing ```ReconcileLifecycleOptions```, defines whether +DemoControllerAdapter following PodOpsLifecycle and need record employees. +```go +func (r *DemoControllerAdapter) FollowPodOpsLifeCycle() bool { + return true +} + +func (r *DemoControllerAdapter) NeedRecordEmployees() bool { + return needRecordEmployees +} +``` +```IEmployer``` and ```IEmployee``` are interfaces that includes several methods indicating the status employer and +employee. +```go +type IEmployer interface { + GetEmployerId() string + GetEmployerStatuses() interface{} + EmployerEqual(employer IEmployer) (bool, error) +} + +type IEmployee interface { + GetEmployeeId() string + GetEmployeeName() string + GetEmployeeStatuses() interface{} + EmployeeEqual(employee IEmployee) (bool, error) +} + +type DemoServiceStatus struct { + EmployerId string + EmployerStatuses DemoServiceDetails +} + +type DemoServiceDetails struct { + RemoteVIP string + RemoteVIPQPS int +} + +type DemoPodStatus struct { + EmployeeId string + EmployeeName string + EmployeeStatuses PodEmployeeStatuses +} +``` +```GetSelectedEmployeeNames``` returns all employees' names selected by employer, here is pods' names selected by +service. ```GetSelectedEmployeeNames``` is used for ensuring LifecycleFinalizer and ExpectedFinalizer, so you can give +it an empty return if your adapter doesn't follow PodOpsLifecycle. +```go +func (r *DemoControllerAdapter) GetSelectedEmployeeNames(ctx context.Context, employer client.Object) ([]string, error) { + svc, ok := employer.(*corev1.Service) + if !ok { + return nil, fmt.Errorf("expect employer kind is Service") + } + selector := labels.Set(svc.Spec.Selector).AsSelectorPreValidated() + var podList corev1.PodList + err := r.List(ctx, &podList, &client.ListOptions{Namespace: svc.Namespace, LabelSelector: selector}) + if err != nil { + return nil, err + } + + selected := make([]string, len(podList.Items)) + for idx, pod := range podList.Items { + selected[idx] = pod.Name + } + + return selected, nil +} +``` +```GetExpectedEmployer``` and ```GetCurrentEmployer``` defines what is expected under the spec of employer and what is +current status, like the load balancer from a cloud provider. Here in the demo adapter, expected is defined by hardcode +and current is retrieved from a fake resource provider ```demoResourceVipStatusInProvider```. +```go +func (r *DemoControllerAdapter) GetExpectedEmployer(ctx context.Context, employer client.Object) ([]IEmployer, error) { + if !employer.GetDeletionTimestamp().IsZero() { + return nil, nil + } + var expect []IEmployer + expect = append(expect, DemoServiceStatus{ + EmployerId: employer.GetName(), + EmployerStatuses: DemoServiceDetails{ + RemoteVIP: "demo-remote-VIP", + RemoteVIPQPS: 100, + }, + }) + return expect, nil +} + +func (r *DemoControllerAdapter) GetCurrentEmployer(ctx context.Context, employer client.Object) ([]IEmployer, error) { + var current []IEmployer + + req := &DemoResourceVipOps{} + resp, err := r.resourceProviderClient.QueryVip(req) + if err != nil { + return current, err + } + if resp == nil { + return current, fmt.Errorf("demo resource vip query resp is nil") + } + + for _, employerStatus := range resp.VipStatuses { + current = append(current, employerStatus) + } + return current, nil +} +``` +```CreateEmployer/UpdateEmployer/DeleteEmployer``` handles creation/update/deletion of resources related to employer on +related backend provider. Here in the demo adapter, ```CreateEmployer/UpdateEmployer/DeleteEmployer``` handles +```demoResourceVipStatusInProvider```. +```go +func (r *DemoControllerAdapter) CreateEmployer(ctx context.Context, employer client.Object, toCreates []IEmployer) ([]IEmployer, []IEmployer, error) { + if toCreates == nil || len(toCreates) == 0 { + return toCreates, nil, nil + } + + toCreateDemoServiceStatus := make([]DemoServiceStatus, len(toCreates)) + for idx, create := range toCreates { + createDemoServiceStatus, ok := create.(DemoServiceStatus) + if !ok { + return nil, toCreates, fmt.Errorf("toCreates employer is not DemoServiceStatus") + } + toCreateDemoServiceStatus[idx] = createDemoServiceStatus + } + + _, err := r.resourceProviderClient.CreateVip(&DemoResourceVipOps{ + VipStatuses: toCreateDemoServiceStatus, + }) + if err != nil { + return nil, toCreates, err + } + return toCreates, nil, nil +} + +func (r *DemoControllerAdapter) UpdateEmployer(ctx context.Context, employer client.Object, toUpdates []IEmployer) ([]IEmployer, []IEmployer, error) { + if toUpdates == nil || len(toUpdates) == 0 { + return toUpdates, nil, nil + } + + toUpdateDemoServiceStatus := make([]DemoServiceStatus, len(toUpdates)) + for idx, update := range toUpdates { + updateDemoServiceStatus, ok := update.(DemoServiceStatus) + if !ok { + return nil, toUpdates, fmt.Errorf("toUpdates employer is not DemoServiceStatus") + } + toUpdateDemoServiceStatus[idx] = updateDemoServiceStatus + } + + _, err := r.resourceProviderClient.UpdateVip(&DemoResourceVipOps{ + VipStatuses: toUpdateDemoServiceStatus, + }) + if err != nil { + return nil, toUpdates, err + } + return toUpdates, nil, nil +} + +func (r *DemoControllerAdapter) DeleteEmployer(ctx context.Context, employer client.Object, toDeletes []IEmployer) ([]IEmployer, []IEmployer, error) { + if toDeletes == nil || len(toDeletes) == 0 { + return toDeletes, nil, nil + } + + toDeleteDemoServiceStatus := make([]DemoServiceStatus, len(toDeletes)) + for idx, update := range toDeletes { + deleteDemoServiceStatus, ok := update.(DemoServiceStatus) + if !ok { + return nil, toDeletes, fmt.Errorf("toDeletes employer is not DemoServiceStatus") + } + toDeleteDemoServiceStatus[idx] = deleteDemoServiceStatus + } + + _, err := r.resourceProviderClient.DeleteVip(&DemoResourceVipOps{ + VipStatuses: toDeleteDemoServiceStatus, + }) + if err != nil { + return nil, toDeletes, err + } + return toDeletes, nil, nil +} +``` +```GetExpectedEmployee```and```GetCurrentEmployee``` defines what is expected under the spec of employer and employees +and what is current status, like real servers under the load balancer from a cloud provider. Here in the demo adapter, +expected is calculated from pods and current is retrieved from a fake resource provider ```demoResourceRsStatusInProvider```. +```go +// GetExpectEmployeeStatus return expect employee status +func (r *DemoControllerAdapter) GetExpectedEmployee(ctx context.Context, employer client.Object) ([]IEmployee, error) { + if !employer.GetDeletionTimestamp().IsZero() { + return []IEmployee{}, nil + } + + svc, ok := employer.(*corev1.Service) + if !ok { + return nil, fmt.Errorf("expect employer kind is Service") + } + selector := labels.Set(svc.Spec.Selector).AsSelectorPreValidated() + + var podList corev1.PodList + err := r.List(ctx, &podList, &client.ListOptions{Namespace: svc.Namespace, LabelSelector: selector}) + if err != nil { + return nil, err + } + + expected := make([]IEmployee, len(podList.Items)) + expectIdx := 0 + for _, pod := range podList.Items { + if !pod.DeletionTimestamp.IsZero() { + continue + } + status := DemoPodStatus{ + EmployeeId: pod.Name, + EmployeeName: pod.Name, + } + employeeStatuses, err := GetCommonPodEmployeeStatus(&pod) + if err != nil { + return nil, err + } + extraStatus := PodExtraStatus{} + if employeeStatuses.LifecycleReady { + extraStatus.TrafficOn = true + extraStatus.TrafficWeight = 100 + } else { + extraStatus.TrafficOn = false + extraStatus.TrafficWeight = 0 + } + employeeStatuses.ExtraStatus = extraStatus + status.EmployeeStatuses = employeeStatuses + expected[expectIdx] = status + expectIdx++ + } + + return expected[:expectIdx], nil +} + +func (r *DemoControllerAdapter) GetCurrentEmployee(ctx context.Context, employer client.Object) ([]IEmployee, error) { + var current []IEmployee + req := &DemoResourceRsOps{} + resp, err := r.resourceProviderClient.QueryRealServer(req) + if err != nil { + return current, err + } + if resp == nil { + return current, fmt.Errorf("demo resource rs query resp is nil") + } + + for _, rsStatus := range resp.RsStatuses { + current = append(current, rsStatus) + } + return current, nil +} +``` +```CreateEmployees/UpdateEmployees/DeleteEmployees``` handles creation/update/deletion of resources related to employee +on related backend provider. Here in the demo adapter, ```CreateEmployees/UpdateEmployees/DeleteEmployees``` +handles ```demoResourceRsStatusInProvider```. +```go +func (r *DemoControllerAdapter) CreateEmployees(ctx context.Context, employer client.Object, toCreates []IEmployee) ([]IEmployee, []IEmployee, error) { + if toCreates == nil || len(toCreates) == 0 { + return toCreates, nil, nil + } + toCreateDemoPodStatuses := make([]DemoPodStatus, len(toCreates)) + + for idx, toCreate := range toCreates { + podStatus, ok := toCreate.(DemoPodStatus) + if !ok { + return nil, toCreates, fmt.Errorf("toCreate is not DemoPodStatus") + } + toCreateDemoPodStatuses[idx] = podStatus + } + + _, err := r.resourceProviderClient.CreateRealServer(&DemoResourceRsOps{ + RsStatuses: toCreateDemoPodStatuses, + }) + if err != nil { + return nil, toCreates, err + } + + return toCreates, nil, nil +} + +func (r *DemoControllerAdapter) UpdateEmployees(ctx context.Context, employer client.Object, toUpdates []IEmployee) ([]IEmployee, []IEmployee, error) { + if toUpdates == nil || len(toUpdates) == 0 { + return toUpdates, nil, nil + } + + toUpdateDemoPodStatuses := make([]DemoPodStatus, len(toUpdates)) + + for idx, toUpdate := range toUpdates { + podStatus, ok := toUpdate.(DemoPodStatus) + if !ok { + return nil, toUpdates, fmt.Errorf("toUpdate is not DemoPodStatus") + } + toUpdateDemoPodStatuses[idx] = podStatus + } + + _, err := r.resourceProviderClient.UpdateRealServer(&DemoResourceRsOps{ + RsStatuses: toUpdateDemoPodStatuses, + }) + if err != nil { + return nil, toUpdates, err + } + + return toUpdates, nil, nil +} + +func (r *DemoControllerAdapter) DeleteEmployees(ctx context.Context, employer client.Object, toDeletes []IEmployee) ([]IEmployee, []IEmployee, error) { + if toDeletes == nil || len(toDeletes) == 0 { + return toDeletes, nil, nil + } + + toDeleteDemoPodStatuses := make([]DemoPodStatus, len(toDeletes)) + + for idx, toDelete := range toDeletes { + podStatus, ok := toDelete.(DemoPodStatus) + if !ok { + return nil, toDeletes, fmt.Errorf("toDelete is not DemoPodStatus") + } + toDeleteDemoPodStatuses[idx] = podStatus + } + + _, err := r.resourceProviderClient.DeleteRealServer(&DemoResourceRsOps{ + RsStatuses: toDeleteDemoPodStatuses, + }) + if err != nil { + return nil, toDeletes, err + } + + return toDeletes, nil, nil +} +``` diff --git a/kuperator_versioned_docs/version-v0.5/started/_category_.json b/kuperator_versioned_docs/version-v0.5/started/_category_.json new file mode 100644 index 00000000..877a378f --- /dev/null +++ b/kuperator_versioned_docs/version-v0.5/started/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Getting Started", + "position": 2 +} diff --git a/kuperator_versioned_docs/version-v0.5/started/demo-graceful-operation.md b/kuperator_versioned_docs/version-v0.5/started/demo-graceful-operation.md new file mode 100644 index 00000000..6eb1fce9 --- /dev/null +++ b/kuperator_versioned_docs/version-v0.5/started/demo-graceful-operation.md @@ -0,0 +1,340 @@ +# Using KusionStack Kuperator to operate Pods gracefully + +Applications always provide its service along with traffic routing. +On Kubernetes, they should be a set of Pods and a corresponding Kubernetes Service resource to expose the service. + +However, during operations such as updating Pod revisions, +there is a risk that client request traffic may be lost. This can lead to a poor user experience for developers. + +This tutorial will demonstrate how to operate Pods gracefully in a KusionStack Kuperator way on Aliyun ACK +with SLB as a Service backend provider. + +> You can also get the same point from [this video](https://www.bilibili.com/video/BV1n8411q7sP/?t=15.7), +> which shows the same case using both KusionStack Kusion and Kuperator. +> The sample used in this video can be found from [KusionStack Catalog](https://github.com/KusionStack/catalog/tree/main/models/samples/wordpress). + +## Preparing + +First, ensure that you have an Aliyun ACK Kubernetes cluster set up in order to provision an Aliyun SLB. + +Next, install KusionStack Kuperator on this Kubernetes cluster +following [installation doc](https://kusionstack.io/docs/kuperator/started/install). + +## Get started + +### Create a new namespace + +To begin, create a new namespace for this tutorial: + +```shell +$ kubectl create ns kuperator-tutorial +``` + +### Provision Pods and Services + +You can create a set of Pods to run up a demo application service +by creating CollaSet resource using following command: + +``` shell +echo ' +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: server +spec: + replicas: 3 + selector: + matchLabels: + app: server + template: + metadata: + labels: + app: server + spec: + containers: + - image: wu8685/echo:1.3 + name: server + command: + - /server + resources: + limits: + cpu: "0.1" + ephemeral-storage: 100Mi + memory: 100Mi + requests: + cpu: "0.1" + ephemeral-storage: 100Mi + memory: 100Mi + readinessProbe: + httpGet: + path: /healthz + port: 8080 + initialDelaySeconds: 5 + periodSeconds: 3 +' | kubectl -n kuperator-tutorial apply -f - +``` + +There should be 3 Pods created. + +```shell +$ kubectl -n kuperator-tutorial get pod +NAME READY STATUS RESTARTS AGE +server-c5lsr 1/1 Running 0 2m23s +server-p6wrx 1/1 Running 0 2m23s +server-zn62c 1/1 Running 0 2m23s +``` + +Then create a Kubernetes Service by running following command, +which will provision Aliyun SLB to expose service. + +```shell +echo ' +apiVersion: v1 +kind: Service +metadata: + annotations: + service.beta.kubernetes.io/alibaba-cloud-loadbalancer-spec: slb.s1.small + service.beta.kubernetes.io/backend-type: eni + labels: + kusionstack.io/control: "true" # this label is required + name: server +spec: + ports: + - port: 80 + protocol: TCP + targetPort: 8080 + selector: + app: server + type: LoadBalancer +' | kubectl -n kuperator-tutorial apply -f - +``` + +A service with external IP should be provisioned. + +```shell +$ kubectl -n kuperator-tutorial get svc server +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +server LoadBalancer 192.168.225.55 47.101.49.182 80:30146/TCP 51s +``` + +The label `kusionstack.io/control: "true"` on Service is very important. +It means this service resource will be recognized by ResourceConsist framework, and then participate in PodOpsLifecycle +to control the Aliyun SLB to switch off traffic before updating each Pod and switch on traffic after it finished, +in order to protect the service. + +### Provision a client + +Then we will provision a client to access the service we created before. +Please replace `` in the following CollaSet yaml with the external IP from Kubernetes Service created above, and apply again. + +```shell +echo ' +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: client +spec: + replicas: 1 + selector: + matchLabels: + app: client + template: + metadata: + labels: + app: client + spec: + containers: + - image: wu8685/echo:1.3 + name: nginx + command: + - /client + args: + - -url + - http:///echo # EXTERNAL_IP should be replaced + - -m + - POST + - d + - kuperator-tutorial + - -qps + - "10" + - -worker + - "10" + - -timeout + - "10000" + resources: + limits: + cpu: "0.1" + ephemeral-storage: 1Gi + memory: 100Mi + requests: + cpu: "0.1" + ephemeral-storage: 1Gi + memory: 100Mi +' | kubectl -n kuperator-tutorial apply -f - +``` + +A client Pod should be created. + +```shell +$ kubectl -n kuperator-tutorial get pod +NAME READY STATUS RESTARTS AGE +client-nc426 1/1 Running 0 30s +server-c5lsr 1/1 Running 0 19m +server-p6wrx 1/1 Running 0 19m +server-zn62c 1/1 Running 0 19m +``` + +This client will continuously access the service using the configuration provided in the command. +You can monitor the response codes from its logs: + +```shell +kubectl -n kuperator-tutorial logs -f client-nc426 +worker-0 another loop, request: 50, failed: 0 +worker-1 another loop, request: 50, failed: 0 +worker-0 another loop, request: 50, failed: 0 +worker-1 another loop, request: 50, failed: 0 +worker-0 another loop, request: 50, failed: 0 +worker-1 another loop, request: 50, failed: 0 +worker-0 another loop, request: 50, failed: 0 +worker-1 another loop, request: 50, failed: 0 +``` + +The accesses are all successful. + +### Update Pod revision + +To trigger a Pod revision update, run the following command +to edit the container image and command in the PodTemplate of CollaSet: + +```shell +echo ' +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: server +spec: + replicas: 3 + selector: + matchLabels: + app: server + template: + metadata: + labels: + app: server + spec: + containers: + - image: wu8685/echo:1.2 + name: server + command: + - /app/echo + resources: + limits: + cpu: "0.1" + ephemeral-storage: 100Mi + memory: 100Mi + requests: + cpu: "0.1" + ephemeral-storage: 100Mi + memory: 100Mi + readinessProbe: + httpGet: + path: /healthz + port: 8080 + initialDelaySeconds: 5 + periodSeconds: 3 +' | kubectl -n kuperator-tutorial apply -f - +``` + +It will trigger all Pods updated simultaneously. So the application `server` has no Pod to serve. +We can observe the error from client logs. + +```shell +worker-1 fails to request POST http://47.101.49.182/echo : Post "http://47.101.49.182/echo": read tcp 10.244.1.11:54040->47.101.49.182:80: read: connection reset by peer +worker-0 fails to request POST http://47.101.49.182/echo : Post "http://47.101.49.182/echo": read tcp 10.244.1.11:34438->47.101.49.182:80: read: connection reset by peer +worker-1 fails to request POST http://47.101.49.182/echo : Post "http://47.101.49.182/echo": context deadline exceeded (Client.Timeout exceeded while awaiting headers) +worker-0 fails to request POST http://47.101.49.182/echo : Post "http://47.101.49.182/echo": context deadline exceeded (Client.Timeout exceeded while awaiting headers) +worker-1 fails to request POST http://47.101.49.182/echo : Post "http://47.101.49.182/echo": context deadline exceeded (Client.Timeout exceeded while awaiting headers) +worker-1 another loop, request: 20, failed: 3 +worker-0 fails to request POST http://47.101.49.182/echo : Post "http://47.101.49.182/echo": context deadline exceeded (Client.Timeout exceeded while awaiting headers) +worker-0 another loop, request: 20, failed: 3 +worker-1 fails to request POST http://47.101.49.182/echo : Post "http://47.101.49.182/echo": context deadline exceeded (Client.Timeout exceeded while awaiting headers) +``` + +### Provision PodTransistionRule + +To avoid this problem, provision a PodTransitionRule with a maxUnavailable 50% rule by running the following command: + +```shell +echo ' +apiVersion: apps.kusionstack.io/v1alpha1 +kind: PodTransitionRule +metadata: + labels: + name: server +spec: + rules: + - availablePolicy: + maxUnavailableValue: 50% + name: maxUnavailable + selector: + matchLabels: + app: server +' | kubectl -n kuperator-tutorial apply -f - +``` + +After updating the CollaSet of the server to trigger an update, you will see the Pods rolling update one by one, +ensuring that at least one Pod is always available to serve. + +```shell +kubectl -n kuperator-tutorial get pod +NAME READY STATUS RESTARTS AGE +client-rrfbj 1/1 Running 0 25s +server-457sn 0/1 Running 0 5s +server-bd5sz 0/1 Running 0 5s +server-l842s 1/1 Running 0 2m4s +``` + +You can see from the client logs that no access requests fail during this update. + +```shell +worker-0 another loop, request: 50, failed: 0 +worker-1 another loop, request: 50, failed: 0 +worker-0 another loop, request: 50, failed: 0 +worker-1 another loop, request: 50, failed: 0 +worker-0 another loop, request: 50, failed: 0 +worker-1 another loop, request: 50, failed: 0 +worker-0 another loop, request: 50, failed: 0 +worker-0 another loop, request: 50, failed: 0 +worker-1 another loop, request: 50, failed: 0 +worker-1 another loop, request: 50, failed: 0 +worker-0 another loop, request: 50, failed: 0 +``` + +### Clean tutorial namespace + +At the end of this tutorial, you can clean up the resources by deleting the namespace: + +```shell +$ kubectl delete ns kuperator-tutorial +``` + +## Comparison with the Native Approach + +Kubernetes provides `preStop` and `postStart` hook in each container, by which users can also interact with service outside +Kubernetes like Aliyun SLB service. However, KusionStack Kuperator offers several advantages: + +* Pod level vs Container level + +Kuperator offers a Pod level hooks which have more complete information than one container, +especially there are several containers in one Pod. + +* Plugin-able + +Through KusionStack Kuperator, you can decouple operations executed before or after Pods actually change. +For example, traffic control can be added or removed without modifying the Pod's preStop configuration. + +* Rollback option + +In case of issues, rollback becomes a viable option when using the Kuperator approach to update Pods. +Since Kuperator does not modify the Pods or their containers during the update, +if the traffic service experiences problems, there is an opportunity to cancel the update. \ No newline at end of file diff --git a/kuperator_versioned_docs/version-v0.5/started/install.md b/kuperator_versioned_docs/version-v0.5/started/install.md new file mode 100644 index 00000000..1ac6a89c --- /dev/null +++ b/kuperator_versioned_docs/version-v0.5/started/install.md @@ -0,0 +1,55 @@ +--- +sidebar_position: 2 +--- + +# Installation + +## Install with helm +KusionStack Kuperator requires **Kubernetes version >= 1.18** +```shell +# Firstly add charts repository if you haven't do this. +$ helm repo add kusionstack https://kusionstack.github.io/charts + +# To update the kusionstack repo. +$ helm repo update kusionstack + +# Install the latest version. +$ helm install kuperator kusionstack/kuperator +``` + + +[Helm](https://github.com/helm/helm) is a tool for managing packages of pre-configured Kubernetes resources. +### Optional: chart parameters + +The following table lists the configurable parameters of the chart and their default values. + +| Parameter | Description | Default | +|-------------|----------------|----------------| +| `namespace` | namespace for Kuperator installation | `kusionstack-system` | +| `namespaceEnabled` | Whether to create the installation.namespace | `true` | +| `managerReplicas`| Replicas of Kuperator deployment | `3` | +| `image.repo` | Repository for kuperator image | `kusionstack/kuperator`| +| `image.pullPolicy`| Image pull policy for kuperator-manager container | `IfNotPresent` | +| `image.tag` | Tag for kuperator-manager image | `v0.1.0` | +| `resources.limits.cpu` | CPU resource limit of kuperator-manager container | `500m` | +| `resources.limits.memory` | Memory resource limit of kuperator-manager container | `128Mi` | +| `resources.requests.cpu` | CPU resource request of kuperator-manager container | `10m` | +| `resources.requests.memory` | Memory resource request of kuperator-manager container | `64Mi` | + +### Upgrade + +Run following command to upgrade KusionStack Kuperator to the latest version. + +```shell +# Upgrade to the latest version +$ helm upgrade kuperator kusionstack/kuperator +``` + +### Uninstall + +Run following command to uninstall KusionStack Kuperator. + +```shell +# Uninstall +$ helm uninstall kuperator +``` \ No newline at end of file diff --git a/kuperator_versioned_docs/version-v0.6/concepts/_category_.json b/kuperator_versioned_docs/version-v0.6/concepts/_category_.json new file mode 100644 index 00000000..1d3167d4 --- /dev/null +++ b/kuperator_versioned_docs/version-v0.6/concepts/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Concepts", + "position": 3 +} diff --git a/kuperator_versioned_docs/version-v0.6/concepts/podopslifecycle.md b/kuperator_versioned_docs/version-v0.6/concepts/podopslifecycle.md new file mode 100644 index 00000000..88068133 --- /dev/null +++ b/kuperator_versioned_docs/version-v0.6/concepts/podopslifecycle.md @@ -0,0 +1,232 @@ +--- +sidebar_position: 2 +--- + +# PodOpsLifecycle + +## Background + +Kubernetes provides a set of default controllers for workload management, such as StatefulSet, Deployment, and DaemonSet, which are responsible for Pod operations. +Meanwhile, application users may also have some services outside the Kubernetes cluster that are closely related to the Pod Lifecycle, including traffic routing, service discovery, or alert monitoring. +However, they face challenges in participating in the operational lifecycle of a Pod, even if they are connected to Kubernetes by developing a controller that watches the Pods. + +PodOpsLifecycle aims to offer Kubernetes administrators and developers finer-grained control over the entire lifecycle of a Pod. +It enables developers to execute necessary actions before, during, and after specific phases of a Pod operation. +For instance, removing the Pod's IP from the traffic route before initiating the Pod operation, performing the actual Pod operations, and adding it back after the Pod operation is completed to achieve a smooth and graceful Pod operation, and prevent any traffic loss. + +## Introduction + +In PodOpsLifecycle, participants are classified into two roles: `operation controllers` and `cooperation controllers`. +- **Operation controllers** are responsible for operating Pods, such as Deployments and StatefulSets from Kubernetes, and CollaSets from Kuperator which intend to scale, update, or recreate Pods. +- **Cooperation controllers** are sensitive with Pod status. They handle resources or configurations around Pods, which may include traffic controller, alert monitoring controller, etc. These controllers typically reconcile Kubernetes resources around Pods with external services, such as sync Pod IPs with the LB provider, or maintaining Pods' metadata with application monitoring system. + +The two types of controllers do not need to be aware of each other. All controllers are organized by PodOpsLifecycle. Additionally, KusionStack Kuperator introduces extra phases around the native Kubernetes Pod Lifecycle: ServiceAvailable, Preparing, and Completing. + +![pod-ops-lifecycle](/img/kuperator/concepts/podopslifecycle/pod-ops-lifecycle.png) + +- **Completing**: After a Pod is created or updated and becomes ready, Kuperator marks its PodOpsLifecycle as the `Completing` phase. During this phase, the Pod is in a ready condition, prompting cooperation controllers to perform actions such as registering the Pod IP in the traffic route. Once all cooperation controllers complete their tasks, Kuperator sets the PodOpsLifecycle to the `ServiceAvailable` phase. +- **ServiceAvailable**: This phase indicates that the Pod is in a normal state and ready to serve. If everything goes smoothly, the Pod remains in the `ServiceAvailable` phase until the next operation. +- **Preparing**: When an operation controller needs to operate the Pod, it triggers a new PodOpsLifecycle. The Pod then transitions from the `ServiceAvailable` phase to the `Preparing` phase. During this phase, the Pod is initially marked as Unready by setting ReadinessGate to false. All cooperation controllers then begin preparing tasks, such as removing the Pod's IP from the traffic route. After completing these tasks, the Pod enters the `Operating` phase. +- **Operating**: If a Pod enters the `Operating` phase, it is expected to accept any kind of operation without any damage, including recreation, scaling-in, upgrading, etc. Operation controllers are permitted to apply any changes to this Pod. Once all these operations are completed, the Pod advances to the next phase — `Completing`, and the PodOpsLifecycle continues. + +The PodOpsLifecycle detail and the relationship with Kubernetes native Pod Lifecycle is showed by following sequence diagram. + +![pod-ops-lifecycle-sequence-diagram](/img/kuperator/concepts/podopslifecycle/pod-ops-lifecycle-sequence-diagram.png) + +## Developer's Guide + +This section introduces how to develop operation controllers and cooperation controllers to interact with PodOpsLifecycle. +- The operation controller is responsible for a set of Pod operation tasks. KusionStack Kuperator has already provided various types of operation controllers. Users only need to develop a new operation controller if a new kind of Pod operation needs to be added. +- The cooperation controller participates in PodOpsLifecycle before and after operating on a Pod, such as the Traffic controller, alert monitoring controller, and other controllers responsible for maintaining the Pod and application status. Users should develop a new cooperation controller only when there is a new type of service or status around the Pod that needs to be maintained, such as integrating with a new traffic provider. + +### Operation Controller + +The operation controller is responsible for Pod operations. The tasks that an operation controller needs to perform during PodOpsLifecycle include triggering a PodOpsLifecycle, checking whether the Pod has entered the Operating phase, performing Pod operations, and marking Pod operations as finished. These actions interacting with PodOpsLifecycle are provided in the package `kusionstack.io/kuperator/pkg/controllers/utils/podopslifecycle/utils.go`. + +A simple operation controller reconcile method would look like this: + +```go +import ( + "context" + + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/client" + + "kusionstack.io/kuperator/pkg/controllers/utils/podopslifecycle" +) + +var operationAdapter = &OperationOpsLifecycleAdapter{} + +type OperationOpsLifecycleAdapter struct { +} + +// GetID indicates ID of the PodOpsLifecycle +func (a *OperationOpsLifecycleAdapter) GetID() string { + return "new-id" +} + +// GetType indicates type for this Operation Controller +func (a *OperationOpsLifecycleAdapter) GetType() podopslifecycle.OperationType { + return "new-type" +} + +// AllowMultiType indicates whether multiple IDs which have the same Type are allowed +func (a *OperationOpsLifecycleAdapter) AllowMultiType() bool { + return true +} + +// WhenBegin is a hook, which will be executed when begin a lifecycle +func (a *OperationOpsLifecycleAdapter) WhenBegin(pod client.Object) (bool, error) { + return false, nil +} + +// WhenFinish is a hook, which will be executed when finish a lifecycle +func (a *OperationOpsLifecycleAdapter) WhenFinish(pod client.Object) (bool, error) { + return false, nil +} + +...... +func (r *PodOperationReconciler) Reconcile(ctx context.Context, req reconcile.Request) (ctrl.Result, error) { + // get the Pod + pod := &corev1.Pod{} + if err := r.Get(ctx, req.NamespacedName, pod); err != nil { + if !errors.IsNotFound(err) { + return reconcile.Result{}, err + } + return reconcile.Result{}, nil + } + + // check if the Pod needs operation + if !r.needOperation(pod) { + return reconcile.Result{}, nil + } + + // if PodOpsLifecycle has not been triggered, trigger it + if !podopslifecycle.IsDuringOps(OpsLifecycleAdapter, pod) { + if updated, err := podopslifecycle.Begin(r, operationAdapter, pod); err != nil { + return reconcile.Result{}, err + } else if updated { + return reconcile.Result{}, nil + } + } + + // waiting until Pod enters operating phase + if _, allowed := podopslifecycle.AllowOps(operationAdapter, 0, pod); !allowed { + return reconcile.Result{}, nil + } + + // do operation works + if completed := r.doPodOperation(pod); !completed { + return reconcile.Result{}, nil + } + + // after operation works completed, finish operating phase to continue PodOpsLifecycle + if _, err := podopslifecycle.Finish(r, operationAdapter, pod); err != nil { + return reconcile.Result{}, err + } +} +``` + +### Pod Cooperation Controller + +There are two ways to develop a cooperation controller. +One way is to develop a controller using the controller runtime and adhering to some conventions of PodOpsLifecycle and Kubernetes. +Another way is to take the use of [ResourceConsist](https://github.com/KusionStack/resourceconsist) framework provided by KusionStack, which can be referenced from its [documentation](https://www.kusionstack.io/docs/kuperator/manuals/resourceconsist). + +The following outlines the first approach. + +```go +import ( + "context" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + k8spod "k8s.io/kubernetes/pkg/api/v1/pod/util.go" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + appsv1alpha1 "kusionstack.io/kuperator/apis/apps/v1alpha1" +) + +const ( + // Finalizer needs to have prefix: `prot.podopslifecycle.kusionstack.io`. + // KusionStack Kuperator keeps this prefix back-compatible, + // so that it can be hard code to decouple with KusionStack Kuperator. + finalizerPrefix = appsv1alpha1.PodOperationProtectionFinalizerPrefix + + protectionFinalizer = finalizerPrefix + "/" + "unique-id" +) + +...... +func (r *PodResourceReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { + // get the Pod + pod := &corev1.Pod{} + if err := r.Get(ctx, req.NamespacedName, pod); err != nil { + if !errors.IsNotFound(err) { + return reconcile.Result{}, err + } + return reconcile.Result{}, nil + } + + if k8spod.IsPodReady(pod) { + // do resource reconcile like add Pod IP to traffic route + r.trafficOn(pod.status.PodIP) + // It is important to add a unique finalizer on this Pod + return reconcile.Result{}, r.addFinalizer(ctx, pod, protectionFinalizer) + } + + if !k8spod.IsPodReady(pod) { + // do resource reconcile like remove Pod IP from traffic route + r.trafficOff(pod.status.PodIP) + // It is important to remove the unique finalizer from this Pod + return reconcile.Result{}, r.removeFinalizer(ctx, pod, protectionFinalizer) + } +} + +func (r *PodResourceReconciler) addFinalizer(ctx context.Context, pod *corev1.Pod, finalizer string) error { + if controllerutil.ContainsFinalizer(pod, finalizer) { + return nil + } + + controllerutil.AddFinalizer(pod, finalizer) + return r.Update(ctx, pod) +} + +func (r *PodResourceReconciler) removeFinalizer(ctx context.Context, pod *corev1.Pod, finalizer string) error { + if !controllerutil.ContainsFinalizer(pod, finalizer) { + return nil + } + + controllerutil.RemoveFinalizer(pod, finalizer) + return r.Update(ctx, pod) +} +``` + +## Key Features + +### Concurrency Support + +PodOpsLifecycle in KusionStack Kuperator supports concurrency. +It means PodOpsLifecycle is able to organize and track multi controllers operating the same pod at the same time. +For example, when a controller is going to update Pod, other controllers are allowed to do other operations at the same time, like delete, restart, recreate it, +although the result may not be meaningful. + +### General Workload Support + +PodOpsLifecycle offers seamless integration with various workload types, including Deployment and StatefulSet. +To enable this functionality, ensure the feature gate for `GraceDeleteWebhook` is enabled when starting the KusionStack Kuperator controller: + +```shell +# Enable the GraceDeleteWebhook feature when starting the controller with this argument +$ /manager --feature-gates=GraceDeleteWebhook=true +``` + +Once enabled, any Pod labeled with `kusionstack.io/control=true` under a general workload, such as Deployment, becomes manageable by PodOpsLifecycle. +This feature provides workloads a way to work closer with Pod Cooperation Controllers. + +> Due to the Kubernetes webhook mechanism, the following error will be returned when workloads or users delete a pod. This error is intentional and serves to indicate that the pod deletion process has started and is being managed by PodOpsLifecycle. +> ```shell +> $ kubectl -n default delete pod collaset-sample-74fsv +> Error from server (failed to validate GraceDeleteWebhook, pod deletion process is underway and being managed by PodOpsLifecycle): admission webhook "validating-pod.apps.kusionstack.io" denied the request: failed to validate GraceDeleteWebhook, pod deletion process is underway and being managed by PodOpsLifecycle +> ``` \ No newline at end of file diff --git a/kuperator_versioned_docs/version-v0.6/introduction/_category_.json b/kuperator_versioned_docs/version-v0.6/introduction/_category_.json new file mode 100644 index 00000000..537bad9b --- /dev/null +++ b/kuperator_versioned_docs/version-v0.6/introduction/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Introduction", + "position": 0 +} diff --git a/kuperator_versioned_docs/version-v0.6/introduction/introduction.md b/kuperator_versioned_docs/version-v0.6/introduction/introduction.md new file mode 100644 index 00000000..5adf228c --- /dev/null +++ b/kuperator_versioned_docs/version-v0.6/introduction/introduction.md @@ -0,0 +1,49 @@ +# What is KusionStack Kuperator? + +KusionStack Kuperator consists of workloads and operators built on Kubernetes Custom Resource Definitions, +with a primary aim of bridging the gap between platform development and Kubernetes. + +By keeping more operation works finished in Kubernetes layer, +KusionStack Kuperator reduces complexity when interacting with Kubernetes +and enhances convenience for platform developers. + +## Key features + +KusionStack Kuperator currently provides the following features, +streamlining application operations when developing platforms based on Kubernetes: + +### Fine-grained operation + +KusionStack Kuperator introduces PodOpsLifecycle to extend native Pod lifecycle with additional phases such as PreCheck, Preparing, etc. +All operators within KusionStack Kuperator will respect PodOpsLifecycle, +so that PodOpsLifecycle is able to orchestrate all of these operators to operate each Pod coordinately. + +### Advanced workloads + +KusionStack Kuperator offers several workloads to ensure it is convenient and effective to delivery and operate application resources. + +Recently, Kuperator provides the workload CollaSet. +Besides the basic ability of scaling and updating Pods like Deployment and StatefulSet of Kubernetes, +CollaSet also provides a range of scale and update strategies, +like in-place update with container image and pod revision consistency. + +### Streamlined Pod Operation + +KusionStack Kuperator introduces resource consist framework that offers a graceful way +to integrate resource management around Pods, including traffic control, into the PodOpsLifecycle. +This simplifies the works for platform developers dealing with Pod operation details. +KusionStack also integrates some resources by default, such as Aliyun SLB. + +### Risk management + +Building upon the PodOpsLifecycle, KusionStack Kuperator introduces the workload named PodTransitionRule +which will keep risks of pod operation under control. +By providing a MaxUnavailable rule similar to Kubernetes' PodDisruptionBudget (PDB), +it ensures there are always enough Pods available for service. +Furthermore, it allows for custom rules through extension via webhooks and label hooks. + +## Future works + +KusionStack Kuperator project is currently in its early stages. +Our goal is to simplify platform development. We will continue building in areas such as application operations, +observability, and insight. We hope the Kuperator will make it easier for you to build platforms. \ No newline at end of file diff --git a/kuperator_versioned_docs/version-v0.6/manuals/_category_.json b/kuperator_versioned_docs/version-v0.6/manuals/_category_.json new file mode 100644 index 00000000..795f138a --- /dev/null +++ b/kuperator_versioned_docs/version-v0.6/manuals/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Manuals", + "position": 4 +} diff --git a/kuperator_versioned_docs/version-v0.6/manuals/collaset.md b/kuperator_versioned_docs/version-v0.6/manuals/collaset.md new file mode 100644 index 00000000..b5ebcfaf --- /dev/null +++ b/kuperator_versioned_docs/version-v0.6/manuals/collaset.md @@ -0,0 +1,942 @@ +--- +sidebar_position: 1 +--- + +# CollaSet +CollaSet is responsible for managing a set of Pods. Similar to Kubernetes Deployment and StatefulSet, it also supports scaling and updating Pods. Additionally, CollaSet offers advanced features to provide users with more granular control over managing Pods. + +A basic CollaSet configuration is represented in the following YAML format: + +``` yaml +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: collaset-sample +spec: + replicas: 2 + selector: + matchLabels: + app: foo + template: + metadata: + labels: + app: foo + spec: + containers: + - image: nginx:1.25.2 + name: nginx +``` +Let's explore the features of CollaSet. + +## Basic Features +### Scaling Pods +CollaSet utilizes the field spec.replicas to indicate the number of Pods under management. + +``` yaml +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: collaset-sample +spec: + replicas: 3 # indicate the number of Pods to manage + selector: + matchLabels: + app: foo + template: + metadata: + labels: + app: foo + spec: + containers: + - image: nginx:1.25.2 + name: nginx +... +``` +Pods can be provisioned by CollaSet. + +``` shell +$ kubectl -n default apply -f ./config/samples/apps_v1alpha1_collaset.yaml +collaset.apps.kusionstack.io/collaset-sample created + +$ kubectl -n default get pod +NAME READY STATUS RESTARTS AGE +collaset-sample-85q7g 1/1 Running 0 57s +collaset-sample-vx5ws 1/1 Running 0 57s +collaset-sample-hr7pv 1/1 Running 0 57s + +$ kubectl -n default get cls +NAME DESIRED CURRENT UPDATED UPDATED_READY UPDATED_AVAILABLE CURRENT_REVISION UPDATED_REVISION AGE +collaset-sample 3 3 3 3 3 collaset-sample-6d7b7c58f collaset-sample-6d7b7c58f 64s +``` + +By default, CollaSet always creates new Pods using the latest template specified in `spec.template`. CollaSet establishes ownership over a set of Pods through the label selector defined in `spec.selector`. Thus, it's important to ensure that the labels provided in `spec.selector` match those in `spec.template.metadata.labels`. + +CollaSet status provides general information about this CollaSet and all Pods under it. + +``` shell +$ kubectl -n default get cls collaset-sample -o yaml +...... +status: + availableReplicas: 3 + collisionCount: 0 + conditions: + - lastTransitionTime: "2023-09-01T03:56:09Z" + reason: Updated + status: "True" + type: Update + currentRevision: collaset-sample-6d7b7c58f + observedGeneration: 1 + operatingReplicas: 0 + readyReplicas: 3 + replicas: 3 + scheduledReplicas: 3 + updatedAvailableReplicas: 3 + updatedReadyReplicas: 3 + updatedReplicas: 3 + updatedRevision: collaset-sample-6d7b7c58f +``` + +Some fields in CollaSet status are explained here: + +`updatedRevision` indicates the latest revision that CollaSet uses to create or update Pods. + +`currentRevision` indicates the last updated revision. It will be set to updatedRevision after all Pods are updated, and their PodReady conditions become True. + +`replicas` indicates the count of Pods under this CollaSet. + +`scheduledReplicas` indicates the count of Pods under this CollaSet that successfully got scheduled. + +`availableReplicas` indicates the count of Pods under this CollaSet that have all expected finalizers attached. + +`updatedReplicas` indicates the count of Pods under this CollaSet that have the updated revision. + +`updatedReadyReplicas` indicates the count of Pods under this CollaSet that are counted in `updatedReplicas` and have their PodReady conditions set to True. + +`updatedAvailableReplicas` indicates the count of Pods under this CollaSet that is counted in `updatedReadyReplicas` and have all expected finalizers attached. + +### Updating Pods +CollaSet generates Pods according to the pod template described in `spec.template`. This template can be updated to signal CollaSet to update each owned Pod: + +``` shell +$ kubectl -n default edit cls collaset-sample +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: +...... +spec: +...... + template: + ...... + spec: + containers: + - image: nginx:1.24.0 # changed from nginx:1.25.2 +...... +``` + +CollaSet immediately updates all Pods it owns with the new Pod template by default. + +``` shell +$ kubectl -n default get pod -o yaml | grep "image: nginx" + - image: nginx:1.24.0 + - image: nginx:1.24.0 + - image: nginx:1.24.0 +``` + +The update progress can be controlled using partition. + +#### Partition +Similar to StatefulSet, `partition` is used to control the upgrade progress. + +By default, if not indicated, all Pods will be updated when spec.template changes. The `partition` can be adjusted from 0 to `spec.replicas` to specify how many Pods CollaSet should update. **Unlike StatefulSet, the partition in CollaSet represents the number of Pods to update.** + +Let's update the image back to nginx:1.25.2: + +``` shell +$ kubectl -n default edit cls collaset-sample +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: collaset-sample +spec: + template: + ...... + spec: + containers: + - image: nginx:1.25.2 # changed from nginx:1.24.0 + ... + updateStrategy: + rollingUpdate: + byPartition: + partition: 1 # use partition to control upgrade progress +``` + +In this case, CollaSet only updates 1 Pod to the updated revision. + +``` shell +$ kubectl -n default get pod -o yaml | grep "image: nginx" + - image: nginx:1.24.0 + - image: nginx:1.25.2 # only 1 Pod updated + - image: nginx:1.24.0 +``` + +#### Update by Label +By configuring the `byLabel` rolling update policy, users can precisely specify which Pods they want to update by using labels. + +If you go back to the sample in the [section Partition](#Partition) and change `byPartition` to `byLabel` like the following: + +``` shell +$ kubectl -n default edit cls collaset-sample +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: collaset-sample +spec: + ... + updateStrategy: + rollingUpdate: +- byPartition: +- partition: 1 ++ byLabel: {} +``` + +Subsequently, each Pod will only be updated if it's marked with the label `collaset.kusionstack.io/update-included`. + +## Advanced Features +### Pod Instance ID +Each Pod created by CollaSet has a unique ID held by the label `collaset.kusionstack.io/instance-id`, which can be used to identify each individual Pod. + +``` yaml +apiVersion: v1 +kind: Pod +metadata: + labels: + collaset.kusionstack.io/instance-id: "0" # Pod instance ID +... +``` + +CollaSet provides a context to specify an ID pool, which defaults to the same name as the CollaSet and is immutable. + +``` yaml +... +spec: + scaleStrategy: + context: +``` + +The same ID pool name can be indicated for multiple CollaSets, allowing them to share a single ID pool. Consequently, each Pod created by these CollaSets will be assigned a unique ID. + +For example, these are two CollaSets with the same context: + +``` shell +$ cat ~/sample.yaml +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: collaset-sample-a +spec: + replicas: 2 + scaleStrategy: + context: foo # with the same context foo + selector: + matchLabels: + app: foo + template: + metadata: + labels: + app: foo + spec: + containers: + - image: nginx:1.25.2 + name: nginx +--- + +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: collaset-sample-b +spec: + replicas: 2 + scaleStrategy: + context: foo # with the same context foo + selector: + matchLabels: + app: foo + template: + metadata: + labels: + app: foo + spec: + containers: + - image: nginx:1.25.2 + name: nginx +``` + +Then create these CollaSets with the sample file: + +``` shell +$ kubectl -n default apply -f ~/sample.yaml +collaset.apps.kusionstack.io/collaset-sample-a created +collaset.apps.kusionstack.io/collaset-sample-b created + +$ kubectl -n default get pod +NAME READY STATUS RESTARTS AGE +collaset-sample-a-g4sjj 1/1 Running 0 42s +collaset-sample-a-ph9vc 1/1 Running 0 42s +collaset-sample-b-fqkq4 1/1 Running 0 42s +collaset-sample-b-lqg8f 1/1 Running 0 42s + +$ kubectl -n default get pod -o yaml | grep collaset.kusionstack.io/instance-id + collaset.kusionstack.io/instance-id: "0" + collaset.kusionstack.io/instance-id: "1" + collaset.kusionstack.io/instance-id: "3" + collaset.kusionstack.io/instance-id: "2" +``` + +Now, the 4 Pods created by these 2 CollaSets will have a unique instance ID. + +### Revision Consistency +Pods within a CollaSet can utilize more than two different Pod templates simultaneously, including both the current and updated revisions. This can result from partial updates. To ensure the stability of Pod revisions over time, CollaSet records this information. When a Pod is deleted, CollaSet recreates it using its previous revision. + +It can be reproduced by following steps: + +1. Provision a new CollaSet with replicas 3. + +``` shell +$ kubectl -n default apply -f ./config/samples/apps_v1alpha1_collaset.yaml +collaset.apps.kusionstack.io/collaset-sample created + +$ kubectl get pod +NAME READY STATUS RESTARTS AGE +collaset-sample-5tgcs 1/1 Running 0 4s +collaset-sample-glgnb 1/1 Running 0 4s +collaset-sample-qs46r 1/1 Running 0 4s + +$ kubectl -n default get cls +NAME DESIRED CURRENT UPDATED UPDATED_READY UPDATED_AVAILABLE CURRENT_REVISION UPDATED_REVISION AGE +collaset-sample 3 3 3 3 3 collaset-sample-6d7b7c58f collaset-sample-6d7b7c58f 64s +``` + +2. Update the image of PodTemplate of the CollaSet to image nginx:1.24.0 and set the partition to 2. Then there will be 2 Pods with image nginx:1.24.0 and 1 Pod with image nginx:1.25.2. + +``` shell +$ kubectl -n default edit cls collaset-sample +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: collaset-sample +spec: + template: + ...... + spec: + containers: + - image: nginx:1.24.0 # changed from nginx:1.25.2 + ... + updateStrategy: + rollingUpdate: + byPartition: + partition: 2 # update 2 Pods + +# Wait until these 2 Pods are updated, and check the Pod's images. +$ kubectl get pod -o yaml | grep "image: nginx" + - image: nginx:1.25.2 + - image: nginx:1.24.0 + - image: nginx:1.24.0 +``` + +3. Update the image of PodTemplate of the CollaSet to image nginx:1.23.4 and set the partition to 1. + +``` shell +$ kubectl -n default edit cls collaset-sample +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: collaset-sample +spec: + template: + ...... + spec: + containers: + - image: nginx:1.23.4 # changed from nginx:1.24.0 + ... + updateStrategy: + rollingUpdate: + byPartition: + partition: 1 # update 1 Pod + +# Wait until the Pod is updated, and check the Pod's images. +$ kubectl get pod -o yaml | grep "image: nginx" + - image: nginx:1.25.2 + - image: nginx:1.24.0 # Pod collaset-sample-qs46r + - image: nginx:1.23.4 +``` + +Now, there are 3 Pods, each of which has an individual image. If we then delete the Pod with the image nginx:1.24.0, the new Pod replacing it will be created with the same image nginx:1.24.0 in order for the Pod to inherit the revision. + +``` shell +$ kubectl delete -n default delete pod collaset-sample-qs46r +pod "collaset-sample-qs46r" deleted + +$ kubectl get pod +NAME READY STATUS RESTARTS AGE +collaset-sample-5tgcs 1/1 Running 0 3h +collaset-sample-ht9x6 1/1 Running 0 2m27s # Pod recreated +collaset-sample-qs46r 1/1 Running 1 (3h ago) 3h + +$ kubectl get pod -o yaml | grep "image: nginx" + - image: nginx:1.25.2 + - image: nginx:1.24.0 # image has not been changed + - image: nginx:1.23.4 +``` + +### In-Place Update Pod +In addition to the `Recreate` update policy, which is identical to Deployment and StatefulSet, CollaSet offers the `InPlaceIfPossible` update policy. + +``` yaml +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: collaset-sample +spec: + ... + updateStrategy: + podUpgradePolicy: InPlaceIfPossible # Options: InPlaceIfPossible, Recreate, Replace +``` + +`InPlaceIfPossible` is the default value, which instructs CollaSets to try to update Pods in place when only container images, labels, and annotations have changed. If some other fields have changed too, the policy will back off to the `Recreate` policy. + +`Recreate` indicates CollaSets always delete the old Pod and create a new one with an updated revision. + +If update pod template with `InPlaceIfPossible` policy as following example, the Pod will not be recreated. + +``` shell +$ kubectl -n default edit cls collaset-sample +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: collaset-sample +spec: + template: + ...... + spec: + containers: + - image: nginx:1.24.0 # changed from nginx:1.25.2 + ... + updateStrategy: + podUpgradePolicy: InPlaceIfPossible # use InPlaceIfPossible policy + +$ kubectl -n default get pod -o yaml | grep "image: nginx" + - image: nginx:1.24.0 + - image: nginx:1.24.0 + - image: nginx:1.24.0 + +$ kubectl -n default get pod +NAME READY STATUS RESTARTS AGE +collaset-sample-5wvlh 1/1 Running 1 (6s ago) 2m10s +collaset-sample-ldvrg 1/1 Running 1 (6s ago) 2m10s +collaset-sample-pbz75 1/1 Running 1 (6s ago) 2m10s +``` + +### Replace Update Pod + +CollaSet provides the `Replace` policy for certain applications that are sensitive to the available number of Pods. + +``` yaml +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: collaset-sample +spec: + ... + updateStrategy: + podUpgradePolicy: Replace # Options: InPlaceIfPossible, Recreate, Replace +``` + +The `Replace` policy indicates that CollaSet should update a Pod by creating a new one to replace it. +Unlike the `Recreate` policy, which deletes the old Pod before creating a new updated one, or the `InPlaceIfPossible` policy, which updates the current Pod in place, +the `Replace` policy first creates a new Pod with the updated revision. It then deletes the old Pod once the new one becomes available for service. + +```shell +# Before updating CollaSet +$ kubectl -n default get pod +NAME READY STATUS RESTARTS AGE +collaset-sample-dwkls 1/1 Running 0 6m55s + +# After updating CollaSet, the updated Pod is created first +$ kubectl -n default get pod +NAME READY STATUS RESTARTS AGE +collaset-sample-dwkls 1/1 Running 0 6m55s +collaset-sample-rcmbv 0/1 ContainerCreating 0 0s + +# Once the created Pod is available for service, the old Pod will be deleted +$ kubectl -n default get pod +NAME READY STATUS RESTARTS AGE +collaset-sample-rcmbv 1/1 Running 0 1s +collaset-sample-dwkls 1/1 Terminating 0 7m12s +``` + +The two Pods will have a pair of labels to identify their relationship. The new Pod will have the label `collaset.kusionstack.io/replace-pair-origin-name` to indicate the name of the old Pod, and the old Pod will have the label `collaset.kusionstack.io/replace-pair-new-id` to indicate the instance ID of the new Pod. + +Additionally, the new Pod and old Pod will each begin their own PodOpsLifecycles, which are independent of each other. + +### Recreate And Replace Specified Pod + +In practice, users often need to recreate or replace specified Pods under a CollaSet. + +To delete a Pod, users can simply call the Kubernetes API, like executing `kubectl delete pod `. +However, this will bypass the [PodOpsLifecycle](https://www.kusionstack.io/docs/kuperator/concepts/podopslifecycle) Mechanism. +We provide following two options: + +1. Enable the feature `GraceDeleteWebhook` so that it is possible to delete Pods through `PodOpsLifecycle`. +```shell +# Enable the GraceDeleteWebhook feature when starting the controller with this argument +$ /manager --feature-gates=GraceDeleteWebhook=true +``` +```shell +$ kubectl -n default get pod +NAME READY STATUS RESTARTS AGE +collaset-sample-vqccr 1/1 Running 0 21s + +# Delete the pod directly. A message will respond indicating that the Pod deletion is handled by PodOpsLifecycle +kubectl -n default delete pod collaset-sample-vqccr +Error from server (failed to validate GraceDeleteWebhook, pod deletion process is underway and being managed by PodOpsLifecycle): admission webhook "validating-pod.apps.kusionstack.io" denied the request: failed to validate GraceDeleteWebhook, pod deletion process is underway and being managed by PodOpsLifecycle + +# The old Pod is deleted, and a new Pod will be created +$ kubectl -n default get pod -w +collaset-sample-vqccr 1/1 Running 0 71s +collaset-sample-vqccr 1/1 Terminating 0 71s +...... +collaset-sample-nbl6t 0/1 Pending 0 0s +collaset-sample-nbl6t 0/1 ContainerCreating 0 0s +...... +collaset-sample-nbl6t 1/1 Running 0 0s +``` +2. Label the Pod with `podopslifecycle.kusionstack.io/to-delete`, so that CollaSet will delete the Pod through PodOpsLifecycle. + +```shell +# Label Pod +$ kubectl -n default label pod collaset-sample-nbl6t podopslifecycle.kusionstack.io/to-delete=true + +# The old Pod is deleted, and a new Pod will be recreated +$ kubectl -n default get pod -w +collaset-sample-nbl6t 1/1 Running 0 5m28s +collaset-sample-nbl6t 1/1 Terminating 0 5m28s +...... +collaset-sample-w6x69 0/1 Pending 0 0s +...... +collaset-sample-w6x69 0/1 ContainerCreating 0 0s +...... +collaset-sample-w6x69 1/1 Running 0 2s +``` + +Recreating a Pod will delete the old Pod first and then create a new one. This will affect the available Pod count. +To avoid this, CollaSet provides a feature to replace Pods by labeling them with `podopslifecycle.kusionstack.io/to-replace`. + +```shell +# Replace Pod by label +$ kubectl -n echo label pod collaset-sample-w6x69 podopslifecycle.kusionstack.io/to-replace=true + +# The old Pod is deleted, and a new Pod will be created +$ kubectl -n default get pod -w +collaset-sample-w6x69 1/1 Running 0 5m29s +collaset-sample-74fsv 0/1 Pending 0 0s +collaset-sample-74fsv 0/1 ContainerCreating 0 0s +...... +collaset-sample-74fsv 1/1 Running 0 2s +...... +collaset-sample-w6x69 0/1 Terminating 0 5m33s +``` + + +### Supprting PVCs +CollaSet introduces support for PVCs, allowing user to declare `VolumeClaimTemplates` to create PVCs for each Pod. +Furthermore, in response to common issues with PVCs management, such as high modification costs and difficult control, CollaSet extends its functionality with the following advantages vs. StatefulSet: + +1. Support update, add and delete on `volumeClaimTemplates`. +2. Provide control over PVC lifecycle. + +#### Provision PVCs +The `collaset-pvc.yaml` file declares a CollaSet with `VolumeClaimTemplates` to provision a PVC with `1Gi` storage for each Pod. +These PVCs are then mounted on the container at the path `/path/mount/www`. + +``` yaml +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: foo +spec: + replicas: 2 + selector: + matchLabels: + app: foo + template: + metadata: + labels: + app: foo + spec: + containers: + - image: nginx:1.25 + name: nginx + volumeMounts: + - mountPath: /path/mount/www # path to mount PVC + name: www + volumeClaimTemplates: + - metadata: + name: www + spec: + storageClassName: standard + volumeMode: Filesystem + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: 1Gi +``` + +Pods and PVCs can be provisioned by CollaSet. + +``` shell +$ kubectl -n default apply -f collaset-pvc.yaml +collaset.apps.kusionstack.io/foo created + +$ kubectl -n default get pod +NAME READY STATUS RESTARTS AGE +foo-pw5lg 1/1 Running 0 4s +foo-5n6ts 1/1 Running 0 4s + +$ kubectl -n default get pvc +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +foo-www-h5zv7 Bound pvc-8a7d8ea0-ced0-423a-9255-bedfad0f2db6 1Gi RWO standard 7s +foo-www-lswp2 Bound pvc-9564b44b-9c99-467b-abee-4285183ff9c3 1Gi RWO standard 7s +``` + +Each Pod and its related PVC have the same value of label `collaset.kusionstack.io/instance-id`. + +``` shell +$ kubectl -n default get pod -o yaml | grep instance-id + collaset.kusionstack.io/instance-id: "1" + collaset.kusionstack.io/instance-id: "0" + +$ kubectl -n default get pvc -o yaml | grep instance-id + collaset.kusionstack.io/instance-id: "1" + collaset.kusionstack.io/instance-id: "0" +``` + +#### Update PVCs +To save the operating costs of PVCs, i.e. expand storage capacity, CollaSet supports update, add and delete on `volumeClaimTemplates`. + +To achieve this, for each PVC, CollaSet calculates a hash value based on its template, and attatch it to label `collaset.kusionstack.io/pvc-template-hash`. +Once users modify the templates, CollaSet recognizes, caculates a new hash value and attach it on new PVCs to replace old ones. + +Let's give it a try, update the storage of PVC template from `1Gi` to `2Gi`. +``` shell +$ kubectl -n default edit cls foo + ...... + volumeClaimTemplates: + - metadata: + name: www + spec: + storageClassName: standard + volumeModes: Filesystem + accessModes: [ "ReadWriteOnce" ] + resources: + requests: +- storage: 1Gi ++ storage: 2Gi # update pvc template to expand storage +...... +``` + +There are 2 new PVCs with `2Gi` storage created with different hash values. + +``` shell +$ kubectl -n default edit cls foo +collaset.apps.kusionstack.io/foo edited + +$ kubectl -n default get pod +NAME READY STATUS RESTARTS AGE +foo-pw5lg 1/1 Terminating 0 7s +foo-5n6ts 1/1 Terminating 0 7s +foo-9nhz4 0/1 Pending 0 1s +foo-xb2gd 0/1 Pending 0 1s + +$ kubectl -n default get pvc +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +foo-www-h5zv7 Terminating pvc-8a7d8ea0-ced0-423a-9255-bedfad0f2db6 1Gi RWO standard 11s +foo-www-lswp2 Terminating pvc-9564b44b-9c99-467b-abee-4285183ff9c3 1Gi RWO standard 11s +foo-www-cj2s9 Bound pvc-647e2a81-7fc6-4f37-a835-e63da9172de3 2Gi RWO standard 5s +foo-www-hp2t6 Bound pvc-03d7536e-cd3f-465f-bd30-362a9510f0c9 2Gi RWO standard 5s + +$ kubectl -n default get pvc -o yaml | grep pvc-template-hash + collaset.kusionstack.io/pvc-template-hash: 594d8857f9 # hash value of old pvc + collaset.kusionstack.io/pvc-template-hash: 594d8857f9 + collaset.kusionstack.io/pvc-template-hash: d78c5ff6b # hash value of new pvc + collaset.kusionstack.io/pvc-template-hash: d78c5ff6b +``` + +For old Pvcs, users can retain them by configuring `whenScaled` policy to `Retain` . +Then old PVCs can be re-mount on its related Pod after rolling back. +Otherwise, old PVCs can be deleted by default policy `Delete`. + + +#### Add PVCs +Add a PVC template `yyy`, which is mounted on the container at the path `/path/mount/yyy`. + +``` shell +$ kubectl -n default edit cls foo +...... + spec: + containers: + - image: nginx:1.25 + name: nginx + volumeMounts: + - mountPath: /path/mount/www # path to mount PVC + name: www ++ - mountPath: /path/mount/yyy # path to mount PVC ++ name: yyy + volumeClaimTemplates: + - metadata: + name: www + spec: + storageClassName: standard + volumeMode: Filesystem + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: 2Gi ++ - metadata: # added pvc template ++ name: yyy ++ spec: ++ storageClassName: standard ++ volumeMode: Filesystem ++ accessModes: [ "ReadWriteOnce" ] ++ resources: ++ requests: ++ storage: 2Gi +``` + +Now, each pod has two PVCs, which include a new PVCs claimed by template `yyy` and one old PVC claimed by template `www`. + +``` shell +$ kubectl -n default edit cls foo +collaset.apps.kusionstack.io/foo edited + +$ kubectl -n default get pod +NAME READY STATUS RESTARTS AGE +foo-8wwsz 0/1 Pending 0 1s +foo-9nhz4 1/1 Terminating 0 23s +foo-hd2cv 0/1 Pending 0 1s +foo-xb2gd 1/1 Terminating 0 23s + +$ kubectl -n default get pvc +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +foo-www-cj2s9 Bound pvc-647e2a81-7fc6-4f37-a835-e63da9172de3 2Gi RWO standard 25s +foo-www-hp2t6 Bound pvc-03d7536e-cd3f-465f-bd30-362a9510f0c9 2Gi RWO standard 25s +foo-yyy-c68nh Bound pvc-94ee5eff-2350-4cb7-8411-85f0928d25fc 2Gi RWO standard 3s # new pvc +foo-yyy-vpwss Bound pvc-8363dc78-3340-47d0-aa11-0adac36308d5 2Gi RWO standard 3s # new pvc +``` + +#### Delete PVCs +Delete the PVC template `yyy` on CollaSet. + +``` shell +$ kubectl -n default edit cls foo +...... + spec: + containers: + - image: nginx:1.25 + name: nginx + volumeMounts: + - mountPath: /path/mount/www # path to mount PVC + name: www +- - mountPath: /path/mount/yyy # path to mount PVC +- name: yyy + volumeClaimTemplates: + - metadata: + name: www + spec: + storageClassName: standard + volumeMode: Filesystem + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: 2Gi +- - metadata: # delete pvc template +- name: yyy +- spec: +- storageClassName: standard +- volumeMode: Filesystem +- accessModes: [ "ReadWriteOnce" ] +- resources: +- requests: +- storage: 2Gi +``` + +Now, PVCs claimed by template `yyy` are deleted and the origin PVCs claimed by template `www` are retained. + +``` shell +$ kubectl -n default edit cls foo +collaset.apps.kusionstack.io/foo edited + +$ kubectl -n default get pod +NAME READY STATUS RESTARTS AGE +foo-6qcpc 1/1 Running 0 2s +foo-z2jqv 1/1 Running 0 2s +foo-8wwsz 1/1 Terminating 0 38s +foo-hd2cv 1/1 Terminating 0 38s + +$ kubectl -n default get pvc +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +foo-www-cj2s9 Bound pvc-647e2a81-7fc6-4f37-a835-e63da9172de3 2Gi RWO standard 61s +foo-www-hp2t6 Bound pvc-03d7536e-cd3f-465f-bd30-362a9510f0c9 2Gi RWO standard 61s +foo-yyy-c68nh Terminating pvc-94ee5eff-2350-4cb7-8411-85f0928d25fc 2Gi RWO standard 39s +foo-yyy-vpwss Terminating pvc-8363dc78-3340-47d0-aa11-0adac36308d5 2Gi RWO standard 39s +``` + +#### PVC Retention Policy +CollaSet provides control over PVC lifecycle by configuring `spec.persistentVolumeClaimRetentionPolicy`. +Users can retain or delete PVCs after its related Pod is scaled down or CollaSet is deleted, respectively. +This feature is also supported by [StatefulSet](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention) since v1.27. +Basic rule is detailed as follows: +- `whenScale` : decides to delete or retain PVCs after Pod is scaled down. +- `whenDeleted`: decides to delete or retain PVCs after CollaSet is deleted. + +For each policy users can set the value to either Delete (by default) or Retain. +Note that for StatefulSet, the default policy is Retain. + +#### whenScaled +Apply `collaset-pvc.yaml` and edit foo to scale replicas to 1. +``` shell +$ kubectl apply -f collaset-pvc.yaml +collaset.apps.kusionstack.io/foo created + +$ kubectl edit cls foo + ...... + spec: +- replicas: 2 ++ replicas: 1 # scale in 1 pod + selector: + matchLabels: + app: foo + ...... +``` +As the `whenScaled` is not configured, thus its value is `Delete` by default. +Consequently, PVC `foo-www-wzwbq` is deleted as its related Pod `foo-tkc5m` is scaling down. + +``` shell +$ kubectl -n default edit cls foo +collaset.apps.kusionstack.io/foo edited + +$ kubectl -n default get pod +NAME READY STATUS RESTARTS AGE +foo-tkc5m 0/1 Terminating 0 27s # related pvc is terminating +foo-vwtcm 1/1 Running 0 27s + +$ kubectl -n default get pvc +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +foo-www-wzwbq Terminating pvc-b92c28c6-59ad-4976-810c-8d538c4a22c6 1Gi RWO standard 29s +foo-www-r4vlh Bound pvc-dd7f7cce-a3cb-4bba-a106-e5ad264959a2 1Gi RWO standard 29s +``` + +Set `Retain` to `whenScaled`, and scale replicas to 0. + +``` shell +$ kubectl -n default edit cls foo + ...... + spec: +- replicas: 1 ++ replicas: 0 # scale in 1 pod + selector: + matchLabels: + app: foo ++ scaleStrategy: ++ persistentVolumeClaimRetentionPolicy: ++ whenScaled: Retain # retain the pvc after pod is scaled down + ...... +``` + +Pod `foo-vwtcm` is terminating, while its related PVC `foo-www-r4vlh` is retained. + +``` shell +$ kubectl -n default edit cls foo +collaset.apps.kusionstack.io/foo edited + +$ kubectl -n default get pod +NAME READY STATUS RESTARTS AGE +foo-vwtcm -n default 1/1 Terminating 0 62s # related pvc is retained + +$ kubectl -n default get pvc +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +foo-www-r4vlh Bound pvc-dd7f7cce-a3cb-4bba-a106-e5ad264959a2 1Gi RWO standard 63s +``` + +To validate the retention policy, try ro scale replicas to 2, and the remaining PVC should be mounted again. + +``` shell +$ kubectl -n default edit cls foo + ...... + spec: +- replicas: 0 ++ replicas: 2 # scale out 2 pods + ...... +``` + +We can see that PVC `foo-www-r4vlh` is retained by Pod `foo-px487` as they have the same `instance-id`. + +``` shell +$ kubectl -n default edit cls foo +collaset.apps.kusionstack.io/foo edited + +$ kubectl -n default get pod +NAME READY STATUS RESTARTS AGE +foo-ld5xc 1/1 Running 0 27s +foo-px487 1/1 Running 0 27s + +$ kubectl -n default get pvc +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +foo-www-d48gx Bound pvc-1884ee45-5cc9-48ee-b01a-20f5ad63d6d4 1Gi RWO standard 29s +foo-www-r4vlh Bound pvc-dd7f7cce-a3cb-4bba-a106-e5ad264959a2 1Gi RWO standard 2m47s + +$ kubectl -n default get pod foo-px487 -o yaml | grep instance-id + collaset.kusionstack.io/instance-id: "1" + +$ kubectl -n default get pvc foo-www-r4vlh -o yaml | grep instance-id + collaset.kusionstack.io/instance-id: "1" # pvc foo-www-r4vlh is retained +``` + +#### whenDelete +Edit `foo` to configure `Retain` policy for `whenDelete`, and then delete this CollaSet. +``` shell +$ kubectl -n default edit cls foo + ...... + scaleStrategy: + persistentVolumeClaimRetentionPolicy: + whenScaled: Retain ++ whenDelete: Retain # retain the pvc after collaset is deleted + ...... +collaset.apps.kusionstack.io/foo edited + +$ kubectl -n default delete cls foo +collaset.apps.kusionstack.io "foo" deleted +``` + +Now, try to recreate `foo` with 2 replicas, and the result shows both PVCs are retained. +``` shell +$ kubectl -n default apply -f collaset-pvc.yaml +collaset.apps.kusionstack.io/foo created + +$ kubectl -n default get pod +NAME READY STATUS RESTARTS AGE +foo-qhh8t 1/1 Running 0 2s +foo-ss255 1/1 Running 0 2s + +$ kubectl -n default get pvc +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +foo-www-d48gx Bound pvc-1884ee45-5cc9-48ee-b01a-20f5ad63d6d4 1Gi RWO standard 4m29s +foo-www-r4vlh Bound pvc-dd7f7cce-a3cb-4bba-a106-e5ad264959a2 1Gi RWO standard 6m47s + +$ kubectl -n default get pod foo-px487 -o yaml | grep instance-id + collaset.kusionstack.io/instance-id: "0" + collaset.kusionstack.io/instance-id: "1" + +$ kubectl -n default get pvc foo-www-r4vlh -o yaml | grep instance-id + collaset.kusionstack.io/instance-id: "0" # pvc foo-www-d48gx is retained + collaset.kusionstack.io/instance-id: "1" # pvc foo-www-r4vlh is retained +``` diff --git a/kuperator_versioned_docs/version-v0.6/manuals/operationjob.md b/kuperator_versioned_docs/version-v0.6/manuals/operationjob.md new file mode 100644 index 00000000..70dd497f --- /dev/null +++ b/kuperator_versioned_docs/version-v0.6/manuals/operationjob.md @@ -0,0 +1,190 @@ +--- +sidebar_position: 3 +--- + +# OperationJob +The OperationJob Workload is responsible for performing one-shot operational tasks on a batch of Pods, providing scaffolding for Pod operation scenarios to reduce user development costs. + +OperationJob offers an abstract interface layer for Pod operational capabilities, supporting developers to implement operational functions as plugins. +Each operational plugin will be presented as a type of `Action` API in OperationJob, such as `Replace`. +Additionally, it optionally facilitates seamless integration with the `PodOpsLifecycle` to ensure lossless traffic changes during operations. + +# Example +Following docs will guide you to play with OperationJob, and to implement OperationJob action plugin. + +## Replace +### Prepare Pods +Given that a [CollaSet](collaset.md) with more than 2 replicas is presented in your kubernetes cluster. +```shell +$ kubectl get cls +NAME DESIRED CURRENT AVAILABLE UPDATED UPDATED_READY UPDATED_AVAILABLE CURRENT_REVISION UPDATED_REVISION AGE +foo 2 2 2 2 2 2 foo-7bdb974bc7 foo-7bdb974bc7 7s + +$ kubectl get pod +NAME READY STATUS RESTARTS AGE +foo-752sz 1/1 Running 0 41s +foo-jttd5 1/1 Running 0 41s +``` + +### Create OperationJob +The following `operationjob.yaml` file describes a `Replace` OperationJob, which will replace pods in `targets`. +For each replace operation, a new pod will be created to replace the target pod, which will not be deleted until new pod is **[ServiceAvailable](../concepts/podopslifecycle.md#introduction)**. + +```yaml +apiVersion: apps.kusionstack.io/v1alpha1 +kind: OperationJob +metadata: + name: opj-replace + namespace: default +spec: + action: Replace # Operation type is replace + activeDeadlineSeconds: 3600 # job will be forced failed after 3600s since startTime + TTLSecondsAfterFinished: 18000 # job will be deleted after 18000s since job failed or succeeded + partition: 1 # replace 1 pod at this time + targets: + - name: foo-jttd5 + - name: foo-752sz +``` + +Create OperationJob `opj-replace` to replace target pods. +```shell +$ kubectl apply -f operationjob.yaml +operationjobs.apps.kusionstack.io/opj-replace created +``` + +### Replace Pods +The status of OperationJob is updated, and target pod `foo-jttd5` is replaced by `foo-mpl7n`. +```shell +$ kubectl get opj +NAME PROGRESS AGE +opj-replace Processing 11s + +$ kubectl get pod +NAME READY STATUS RESTARTS AGE +foo-752sz 1/1 Running 0 92s +foo-jttd5 1/1 Running 0 92s +foo-mpl7n 0/1 ContainerCreating 0 4s + +$ kubectl get opj opj-replace -o yaml | grep -A20 status +status: + observedGeneration: 1 + progress: Processing # job is processing + succeededPodCount: 1 + targetDetails: + - extraInfo: + NewPod: foo-mpl7n + name: foo-jttd5 + progress: Succeeded # foo-jttd5 is replaced by foo-mpl7n suceeded + - name: foo-752sz + progress: Pending # replace is pending + totalPodCount: 2 +``` + +The ```status.progress``` can be: +- ```Pending```: operationJob is waiting to be processed +- ```Processing```: operationJob is being processed +- ```Failed```: some target pods have failed to operate +- ```Succeeded```: all target pods have succeeded to operate + +Note that if a target pod has failed to operate, `status.targetDetails[x].error` will show the `reason` and `message` for failure. +And if it has succeeded to operate, the error status will be cleared. + +The ```status.targetDetails[x].progress``` can be: +- ```Pending```: target pod is waiting to be operated +- ```Processing```: target pod is being operated +- ```Failed```: target pod has failed to operate +- ```Succeeded```: target pod has succeeded to operate + +Edit `opj-replace` to replace the other target pod. +```shell +$ kubectl edit opj opj-replace +``` + +```yaml +# operationjob.yaml +# Edit partition to 2 to replace all pods +... +spec: + ... + partition: 2 +``` + +All pods replaced. +```shell +$ kubectl get pod +NAME READY STATUS RESTARTS AGE +foo-752sz 1/1 Running 0 8m5s +foo-mpl7n 1/1 Running 0 6m37s +foo-rgxbl 0/1 ContainerCreating 0 5s + +$ kubectl get opj opj-replace -o yaml | grep -A20 status +status: + endTimestamp: "2024-09-13T08:47:43Z" + observedGeneration: 2 + progress: Succeeded # all pods are replaced, job is suceeded + succeededPodCount: 2 + targetDetails: + - extraInfo: + NewPod: foo-mpl7n + name: foo-jttd5 + progress: Succeeded + - extraInfo: + NewPod: foo-rgxbl + name: foo-752sz + progress: Succeeded # foo-752sz is replaced by foo-rgxbl suceeded + totalPodCount: 2 + +$ kubectl get opj +NAME PROGRESS AGE +opj-replace Succeeded 6m42s +``` + +The ```status.targetDetails[x].extraInfo``` is a key-value string map, which is used to store operate information for target. +Developers can define and utilize specified extraInfos for their action plugins. + +## Tutorial +### Action Plugin +Developers implement and register action plugin, then OperationJob controller is responsible for running it: + +![operationjob-framework](/img/kuperator/manuals/operationjob/operationjob-frame.png) + +Action plugin is formulated as golang adapter `ActionHandler`, which consists 4 idempotent functions: +- `Setup` sets up action in `AddToMgr`, i.e., watch resources for action, register cache index +- `OperateTarget` operates what you want to the target pod +- `GetOpsProgress` gets current operation status of target pod, i.e., "Processing", "Failed" and "Succeeded" +- `ReleaseTarget` cleans up target pod and operating environment when operation finished or job deleted + +```go +... +type ActionHandler interface { + // Setup sets up action with manager in AddToMgr, i.e., watch, cache... + Setup(controller.Controller, *mixin.ReconcilerMixin) error + + // OperateTarget do real operation to target + OperateTarget(context.Context, *OpsCandidate, *appsv1alpha1.OperationJob) error + + // GetOpsProgress returns target's current opsStatus, e.g., progress, reason, message + GetOpsProgress(context.Context, *OpsCandidate, *appsv1alpha1.OperationJob) (progress ActionProgress, err error) + + // ReleaseTarget releases the target from operation when the operationJob is deleted + ReleaseTarget(context.Context, *OpsCandidate, *appsv1alpha1.OperationJob) error +} +``` + +### Register Action +Developers can register implemented action plugins by calling `RegisterAction` before OperationJob controller `AddToMgr` is called. +The register function consists 3 parameters: +- `action`: string, name of action plugin, showed in `spec.action` +- `hander`: ActionHandler, the implemented adapter +- `enablePodOpsLifecycle`: bool, if true, target pods will be operated in the manner of **[PodOpsLifecycle](../concepts/podopslifecycle.md)** + +```go +... +// RegisterAction will register an operationJob action with handler and lifecycleAdapter +// Note: if enablePodOpsLifecycle=false, this operation will be done directly, ignoring podOpsLifecycle +func RegisterAction(action string, handler ActionHandler, enablePodOpsLifecycle bool) {...} +``` + +### Example +As an example, OperationJob natively supports Replace action. +The **[Replace ActionHandler](https://github.com/KusionStack/kuperator/blob/e43c4c0dc3bda50789988b10695e7a314cb44784/pkg/controllers/operationjob/replace/replace.go#L49)** is implemented and registered before OperationJob controller added in **[main function](https://github.com/KusionStack/kuperator/blob/e43c4c0dc3bda50789988b10695e7a314cb44784/main.go#L113)**. \ No newline at end of file diff --git a/kuperator_versioned_docs/version-v0.6/manuals/poddecoration.md b/kuperator_versioned_docs/version-v0.6/manuals/poddecoration.md new file mode 100644 index 00000000..bae6186a --- /dev/null +++ b/kuperator_versioned_docs/version-v0.6/manuals/poddecoration.md @@ -0,0 +1,352 @@ +--- +sidebar_position: 2 +--- + +# PodDecoration +PodDecoration works in conjunction with CollaSet to selectively inject specific configurations to Pods that meet certain criteria. + +PodDecoration not only allows injecting sidecar containers to Pods but also enables modifying existing container configurations, metadata, and scheduling parameters etc. +The PodDecoration controller does not control the upgrade of Pods. The actual upgrade process is fully controlled by the CollaSet controller. This means that the injection upgrade of PodDecoration can also be performed `InPlaceIfPossible`. + +About [CollaSet](collaset.md). +# Example + +## Create CollaSet + +```yaml +# collaset.yaml +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: foo + namespace: default +spec: + replicas: 3 + selector: + matchLabels: + app: foo + template: + metadata: + labels: + app: foo + spec: + containers: + - image: nginx:1.25.2 + name: foo +``` +Use `collaset.yaml` to create three pods under CollaSet `foo` management. +```shell +$ kubectl apply -f collaset.yaml +collaset.apps.kusionstack.io/foo created + +$ kubectl get cls +NAME DESIRED CURRENT AVAILABLE UPDATED UPDATED_READY UPDATED_AVAILABLE CURRENT_REVISION UPDATED_REVISION AGE +foo 3 3 3 3 3 3 foo-7bdb974bc7 foo-7bdb974bc7 7s + +$ kubectl get pod +NAME READY STATUS RESTARTS AGE +foo-2wnnf 1/1 Running 0 41s +foo-hqpx7 1/1 Running 0 41s +foo-mqt48 1/1 Running 0 41s +``` +## Create PodDecoration + +The following `poddecoration.yaml` file describes a PodDecoration, which selects the pod under CollaSet `foo` and injects the content in `template` into the pod with `instance-id=0`. + +```yaml +# poddecoration.yaml +apiVersion: apps.kusionstack.io/v1alpha1 +kind: PodDecoration +metadata: + name: sample-pd +spec: + selector: # selected pod range in which PodDecoration takes effect + matchLabels: + app: foo + updateStrategy: + rollingUpdate: + selector: # select pod to upgrade in effect range + matchLabels: + collaset.kusionstack.io/instance-id: "0" + template: + metadata: + - patchPolicy: Overwrite + labels: + custom.io/sidecar-version: "v1" + containers: + - injectPolicy: AfterPrimaryContainer + name: sidecar-a + image: ubuntu:22.04 + command: ["sleep", "2h"] + volumeMounts: + - name: sample-volume + mountPath: /vol/sample + volumes: + - name: sample-volume + emptyDir: {} +``` + +Create PodDecoration `sample-pd` to upgrade selected pod +```shell +$ kubectl apply -f poddecoration.yaml +poddecoration.apps.kusionstack.io/sample-pd created +``` +The status of PodDecoration is updated, and one pod is injected with sidecar through recreate. +```shell +$ kubectl get pd +NAME EFFECTIVE MATCHED INJECTED UPDATED UPDATED_READY CURRENT_REVISION UPDATED_REVISION AGE +sample-pd true 3 1 1 1 sample-pd-9465f4c84 20s + +$ kubectl get pod +NAME READY STATUS RESTARTS AGE +foo-2gnnl 2/2 Running 0 15s +foo-2wnnf 1/1 Running 0 2m +foo-hqpx7 1/1 Running 0 2m + +$ kubectl get pd sample-pd -o yaml | grep -A20 status +status: + details: + - affectedReplicas: 3 + collaSet: foo + pods: + - name: foo-2gnnl + revision: sample-pd-9465f4c84 + - name: foo-2wnnf + escaped: true + - name: foo-hqpx7 + escaped: true + matchedPods: 3 + injectedPods: 1 + updatedPods: 1 + updatedReadyPods: 1 + updatedAvailablePods: 1 + isEffective: true + updatedRevision: sample-pd-9465f4c84 +``` + +## Update PodDecoration + +### Rolling update v1 + +Edit `sample-pd` to expand the upgrade scope. +```shell +$ kubectl edit pd sample-pd +``` + +```yaml +# poddecoration.yaml +# Edit updateStrategy to select instance-id in [0, 1, 2] +... +spec: + ... + updateStrategy: + rollingUpdate: + selector: + matchExpressions: + - key: collaset.kusionstack.io/instance-id + operator: In + values: + - "0" + - "1" + - "2" + template: + ... +``` + +All pods updated. +```shell +$ kubectl get pd +NAME EFFECTIVE MATCHED INJECTED UPDATED UPDATED_READY CURRENT_REVISION UPDATED_REVISION AGE +sample-pd true 3 3 3 3 sample-pd-9465f4c84 sample-pd-9465f4c84 3m + +$ kubectl get pod +NAME READY STATUS RESTARTS AGE +foo-2gnnl 2/2 Running 0 3m +foo-lftw8 2/2 Running 0 8s +foo-n57rr 2/2 Running 0 8s + +$ kubectl get pd sample-pd -o yaml | grep -A20 status +status: + currentRevision: sample-pd-9465f4c84 + details: + - affectedReplicas: 3 + collaSet: foo + pods: + - name: foo-2gnnl + revision: sample-pd-9465f4c84 + - name: foo-lftw8 + revision: sample-pd-9465f4c84 + - name: foo-n57rr + revision: sample-pd-9465f4c84 + matchedPods: 3 + injectedPods: 3 + updatedPods: 3 + updatedReadyPods: 3 + updatedAvailablePods: 3 + isEffective: true + currentRevision: sample-pd-9465f4c84 + updatedRevision: sample-pd-9465f4c84 +``` +### Rolling update v1 -> v2 + + +Update `sample-pd`'s sidecar container image and `updateStrategy`. +```shell +$ kubectl edit pd sample-pd +``` +```yaml +# poddecoration.yaml +# Update sidecar-a's image with ubuntu:22.10 +# Edit updateStrategy to select instance-id in [0] +... +spec: + ... + updateStrategy: + rollingUpdate: + selector: + - key: collaset.kusionstack.io/instance-id + operator: In + values: + - "0" + template: + ... + containers: + - injectPolicy: AfterPrimaryContainer + name: sidecar-a + image: ubuntu:22.10 + ... +``` +Pod `foo-2gnnl` in-place upgrade sidecar container image. +```shell +$ kubectl get pd +NAME EFFECTIVE MATCHED INJECTED UPDATED UPDATED_READY CURRENT_REVISION UPDATED_REVISION AGE +sample-pd true 3 3 1 1 sample-pd-9465f4c84 sample-pd-8697d4bf8c 6min + +$ kubectl get pod +NAME READY STATUS RESTARTS AGE +foo-2gnnl 2/2 Running 1 (12s ago) 6m +foo-lftw8 2/2 Running 0 3min +foo-n57rr 2/2 Running 0 3min + +$ kubectl get pod foo-2gnnl -o yaml | grep "image: ubuntu" + image: ubuntu:22.10 + +$ kubectl get pd sample-pd -o yaml | grep -A20 status +status: + details: + - affectedReplicas: 3 + collaSet: foo + pods: + - name: foo-2gnnl + revision: sample-pd-8697d4bf8c + - name: foo-lftw8 + revision: sample-pd-9465f4c84 + - name: foo-n57rr + revision: sample-pd-9465f4c84 + matchedPods: 3 + injectedPods: 3 + updatedPods: 1 + updatedReadyPods: 1 + updatedAvailablePods: 1 + isEffective: true + currentRevision: sample-pd-9465f4c84 + updatedRevision: sample-pd-8697d4bf8c +``` + + +# Features + +## Injection + +### Metadata +```yaml +apiVersion: apps.kusionstack.io/v1alpha1 +kind: PodDecoration +spec: + template: + metadata: + - patchPolicy: MergePatchJson + annotations: + cafe.sofastack.io/decoration-version: '[{"name":"sample-pd","version":"v2"}]' + - patchPolicy: Overwrite + labels: + custom.io/sidecar-version: "v2" + annotations: + cafe.sofastack.io/decoration-name: sample-pd +``` +`patchPolicy` is the injected policy, as follows: +- `Retain`: The original value of annotations and labels will be retained. +- `Overwrite`: The value of annotations and labels corresponding to the existing key will be overwritten. +- `MergePatchJson`: It only takes effect for annotation. If the key does not exist, the value will be written directly. Otherwise, the json value will be merged. + +For example: +```yaml +# Old pod metadata +metadata: + labels: + custom.io/sidecar-version: "v1" + annotations: + cafe.sofastack.io/decoration-version: '[{"name":"old-pd","version":"v1"}]' + +# After metadata injected +metadata: + labels: + custom.io/sidecar-version: "v2" + annotations: + cafe.sofastack.io/decoration-type: sample-pd + cafe.sofastack.io/decoration-version: '[{"name":"old-pd","version":"v1"}, {"name":"sample-pd","version":"v2"}]' +``` +### Primary Container + +```yaml +apiVersion: apps.kusionstack.io/v1alpha1 +kind: PodDecoration +spec: + template: + primaryContainers: + - targetPolicy: ByName + name: foo + image: foo:v2 + env: + - name: APP_NAME + value: foo + volumeMounts: + - name: sample-volume + mountPath: /vol/sample + volumes: + - name: sample-volume + emptyDir: {} +``` +Injection into the primary containers only supports limited fields: `image`, `env` and `volumeMounts`. + +`targetPolicy` indicates which existed container these configuration should inject into, as follows: +- `ByName`: Only inject containers matching `name`. +- `All`: Inject all primary containers. +- `First`: Inject into first primary container. +- `Last`: Inject into last primary container. + +### Sidecar Container + +```yaml +spec: + template: + containers: + - injectPolicy: AfterPrimaryContainer # Container injected policy, AfterPrimaryContainer or BeforePrimaryContainer + name: sidecar-a + image: ubuntu:22.04 + ... +``` +Inject a new sidecar container. Optional, it can be placed in front or behind the primary container. +### InitContainer + +```yaml +spec: + template: + initContainers: + - name: init + image: custom-init-image:v1 + ... +``` + +## Upgrade strategy +Coming soon... \ No newline at end of file diff --git a/kuperator_versioned_docs/version-v0.6/manuals/podtransitionrule.md b/kuperator_versioned_docs/version-v0.6/manuals/podtransitionrule.md new file mode 100644 index 00000000..e7067fef --- /dev/null +++ b/kuperator_versioned_docs/version-v0.6/manuals/podtransitionrule.md @@ -0,0 +1,220 @@ +--- +sidebar_position: 4 +--- + +# PodTransitionRule +In normal pod lifecycle, some phases are defined. For example, K8s Pods follow a defined lifecycle,starting in the `Pending` phase, moving through `Running` if at least one of its primary containers starts `OK`, and then through either the `Succeeded` or `Failed` phases depending on whether any container in the Pod terminated in failure. + +These phase definitions can fulfill basic Pod change scenarios, but it are ambiguous. +Actually, before pod upgrade or ready, it is necessary to have some check mechanisms in place to ensure the safety of pod changes. Fortunately, [PodOpsLifecycle](../concepts/podopslifecycle.md) extends and supports some check stages: `PreCheck` before pod upgrade and `PostCheck` before pod ready. + +To ensure a more fine-grained and controlled change process for Pods, we introduce custom rules or perform additional tasks as prerequisites for state transitions before the desired state of a Pod is achieved. Similar to the Pod `readinessGates`, where certain conditions must be met for a Pod to be considered readiness. For example, we consider a Pod ready for the `PostCheck` phase only if it has specific labels. For this purpose, we introduce the `PodTransitionRule` as a prerequisite for the state transition of a Pod. + +## Rule Definition + +You can use `PodTransitionRule` to define a set of transition rules for your workload pods. +Each rule will be executed at the corresponding stage, and it will be blocked if the conditions are not met. + +Here is an example: +```yaml +apiVersion: apps.kusionstack.io/v1alpha1 +kind: PodTransitionRule +metadata: + name: podtransitionrule-sample +spec: + rules: + - availablePolicy: + maxUnavailableValue: 50% + name: maxUnavailable + - stage: PreCheck # stages are supported by PodOpsLifecycle. Defaults to PreCheck. + labelCheck: + requires: + matchLabels: + app.custom/ready: 'true' + name: labelCheck + - stage: PostCheck + webhook: + clientConfig: + url: https://1.1.1.1:8089/post-stop + caBundle: Cg== + poll: + url: http://1.1.1.1:8089/fetch-result + rawQueryKey: task-id # URL parameter key to carry trace ID when fetching result. Defaults to task-id in form 'QueryUrl=URL?rawQueryKey=' + intervalSeconds: 5 + timeoutSeconds: 60 + failurePolicy: Fail + parameters: + - key: podIP + valueFrom: + fieldRef: + fieldPath: status.podIP + name: webhookCheck + selector: # select pods in effect + matchLabels: + app: foo +``` + + +### Available Policy +An `availablePolicy` rule defines the availability strategy during the Pod update process. + +#### maxUnavailable +```yaml +availablePolicy: + maxUnavailable: + value: 50% # int or string +``` + +`maxUnavailableValue` is the maximum number of pods that can be unavailable during the update. +Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). +Absolute number is calculated from percentage by rounding down. +This can not be 0. + +#### minAvailable +```yaml +availablePolicy: + minAvailable: + value: 5 # int or string +``` +`minAvailableValue` is the minimum number of pods that should be available during the update. + +### Label Check + +A `labelCheck` rule is used to check if labels are satisfied. +You can define your own labels as change check conditions and modify the labels according to your needs. +```yaml +labelCheck: + requires: + matchLabels: + app.custom/ready: 'true' + matchExpressions: + - key: app.custom/forbidden + operator: DoesNotExist +``` + +### Webhook +A `webhook` is an HTTP callback, based on which a external web application can determine whether a pod can pass this check. + +* An HTTP POST occurs first when pods entries the configured stage which defaults PreCheck. +* If `poll` is provided, this rule then keeps calling polling url to fetch a long running job result. This job can be located by `task-id` returned from the response of the first request. + + +```yaml +webhook: + clientConfig: # custom server config + url: https://1.1.1.1:8089/post-stop + caBundle: Cg== + poll: + url: http://1.1.1.1:8089/fetch-result + rawQueryKey: task-id + intervalSeconds: 5 + timeoutSeconds: 60 + failurePolicy: Fail + parameters: + - key: podIP + valueFrom: + fieldRef: + fieldPath: status.podIP +``` +**Protocol without poll** + +Request: +```json +// URL: https://1.1.1.1:8089/post-stop +// Method: POST + +{ + "traceId": "", // is generated by Kuperator, which can be used to track request + "stage": "PreTrafficOff", + "ruleName": "webhookCheck", + "resources": [ // Information of Pods which are in this stage + { + "apiVersion": "v1", + "kind": "Pod", + "name": "pod-a", + "parameters": { + "podIP": "1.0.0.1" // Customized information users can indicate from rule paramter + } + }, + { + "apiVersion": "v1", + "kind": "Pod", + "name": "pod-b", + "parameters": { + "podIP": "1.0.0.2" + } + } + ] +} +``` +Response: +```json +{ + "success": false, + "message": "msg", + "finishedNames": ["pod-a", "pod-b"] +} +``` +Response `success` indicating all pods approved or not. If it's `false`, the `finishedNames` field can be used to approve partial pods. + +**Protocol with poll** + +Request: +```json +// URL: https://1.1.1.1:8089/post-stop +// Method: POST + +{ + "traceId": "", // is generated by Kuperator, which can be used to track request + "stage": "PreTrafficOff", + "ruleName": "webhookCheck", + "resources": [ // Information of Pods which are in this stage + { + "apiVersion": "v1", + "kind": "Pod", + "name": "pod-a", + "parameters": { + "podIP": "1.0.0.1" // Customized information users can indicate from rule paramter + } + }, + { + "apiVersion": "v1", + "kind": "Pod", + "name": "pod-b", + "parameters": { + "podIP": "1.0.0.2" + } + } + ] +} +``` + +Response: + +```json +{ + "success": true, + "poll": true, // required to indicate polling calls is necessary + "taskId": , // required to to fetch polling result + "message": "msg" +} +``` +Response `success` indicating whether the first request is success or not. If true and field `poll` in response is `true` (or field `async` in response is `true`), PodTransisionRule will then begin to keep calling poll URL to fetch process result. +Field `taskId` is required for polling. + +The request for polling is GET method and in form of `QueryUrl=URL?task-id=`. The parameter key in this URL defaults `task-id`, if using `poll` in above response. It would be `trace-id` if using `async` in above response. +Users can also indicate the key by field `poll.rawQueryKey`. + +The response from polling call is expected like following: + +```json +{ + "success": true, + "message": "msg", + "finished": false, + "finishedNames": ["pod-a", "pod-b"] +} +``` + +`success` is supposed to be true, if there is no error. If all pods is approved, `finished` should be `true`. +If `finished` is `false`, `finishedNames` can be used to allow partial pods to be approved. diff --git a/kuperator_versioned_docs/version-v0.6/manuals/resourceconsist.md b/kuperator_versioned_docs/version-v0.6/manuals/resourceconsist.md new file mode 100644 index 00000000..5517db78 --- /dev/null +++ b/kuperator_versioned_docs/version-v0.6/manuals/resourceconsist.md @@ -0,0 +1,437 @@ +--- +sidebar_position: 5 +--- + +# ResourceConsist +[**ResourceConsist**](https://github.com/KusionStack/resourceconsist/blob/main/README.md) aims to make a customized controller can be realized easily, and offering the ability of following +**PodOpsLifecycle** for controllers. + +## Tutorials +**kusionstack.io/resourceconsit** mainly consists of frame, experimental/adapters and adapters. + +The frame, ```kusionstack.io/resourceconsist/pkg/frame```, is used for adapters starting a controller, which handles +Reconcile and Employer/Employees' spec&status. If you wrote an adapter in your own repo, you can import +```kusionstack.io/resourceconsist/pkg/frame/controller``` and ```kusionstack.io/resourceconsist/pkg/frame/webhook```, +]and call AddToMgr to start a controller. + +>webhookAdapter is only necessary to be implemented for controllers following PodOpsLifecycle. + +```go +package main + +import ( + controllerframe "kusionstack.io/resourceconsist/pkg/frame/controller" + webhookframe "kusionstack.io/resourceconsist/pkg/frame/webhook" +) + +func main() { + controllerframe.AddToMgr(manager, yourOwnControllerAdapter) + webhookframe.AddToMgr(manager, yourOwnWebhookAdapter) +} +``` +### adapters +The adapters, ```kusionstack.io/resourceconsist/pkg/adapters```, consists of built-in adapters. You can start a +controller with built-in adapters just calling AddBuiltinControllerAdaptersToMgr and AddBuiltinWebhookAdaptersToMgr, +passing built-in adapters' names. Currently, an aliababacloudslb adapter has released. You can use it as follows: +```go +import ( + "kusionstack.io/resourceconsist/pkg/adapters" +) + +func main() { + adapters.AddBuiltinControllerAdaptersToMgr(manager, []adapters.AdapterName{adapters.AdapterAlibabaCloudSlb}) + adapters.AddBuiltinWebhookAdaptersToMgr(manager, []adapters.AdapterName{adapters.AdapterAlibabaCloudSlb}) +} +``` +Built-in adapters can also be used like how frame used. You can call NewAdapter from a certain built-in adapter pkg +and the call frame.AddToMgr to start a controller/webhook + +More built-in adapters will be implemented in the future. To make this repo stable, all new built-in adapters will +be added to ```kusionstack.io/resourceconsist/pkg/experimental/adapters``` first, and then moved to +```kusionstack.io/resourceconsist/pkg/adapters``` until ready to be released. +#### alibabacloudslb adapter +```pkg/adapters/alibabacloudslb``` is an adapter that implements ReconcileAdapter. It follows **PodOpsLifecycle** to +handle various scenarios during pod operations, such as creating a new pod, deleting an existing pod, or handling +changes to pod configurations. This adapter ensures minimal traffic loss and provides a seamless experience for users +accessing services load balanced by Alibaba Cloud SLB. + +In ```pkg/adapters/alibabacloudslb```, the real server is removed from SLB before pod operation in ACK. The LB +management and real server management are handled by CCM in ACK. Since alibabacloudslb adapter follows PodOpsLifecycle +and real servers are managed by CCM, ReconcileLifecycleOptions should be implemented. If the cluster is not in ACK or +CCM is not working in the cluster, the alibabacloudslb controller should implement additional methods of ReconcileAdapter. +### experimental/adapters +The experimental/adapters is more like a pre-release pkg for built-in adapters. Usage of experimental/adapters is same +with built-in adapters, and be aware that **DO NOT USE EXPERIMENTAL/ADAPTERS IN PRODUCTION** +### demo adapter +A demo is implemented in ```resource_controller_suite_test.go```. In the demo controller, the employer is represented +as a service and is expected to have the following **DemoServiceStatus**: +``` +DemoServiceStatus{ + EmployerId: employer.GetName(), + EmployerStatuses: DemoServiceDetails{ + RemoteVIP: "demo-remote-VIP", + RemoteVIPQPS: 100, + } +} +``` +The employee is represented as a pod and is expected to have the following **DemoPodStatus**: +``` +DemoPodStatus{ + EmployeeId: pod.Name, + EmployeeName: pod.Name, + EmployeeStatuses: PodEmployeeStatuses{ + Ip: string, + Ipv6: string, + LifecycleReady: bool, + ExtraStatus: PodExtraStatus{ + TrafficOn: bool, + TrafficWeight: int, + }, + } +} +``` +The DemoResourceProviderClient is a fake client that handles backend provider resources related to the employer/employee +(service/pods). In the Demo Controller, ```demoResourceVipStatusInProvider``` and ```demoResourceRsStatusInProvider``` +are mocked as resources in the backend provider. + +How the demo controller adapter realized will be introduced in detail as follows, +```DemoControllerAdapter``` was defined, including a kubernetes client and a resourceProviderClient. What included in +the Adapter struct can be defined as needed. +```go +type DemoControllerAdapter struct { + client.Client + resourceProviderClient *DemoResourceProviderClient +} +``` +Declaring that the DemoControllerAdapter implemented ```ReconcileAdapter``` and ```ReconcileLifecycleOptions```. +Implementing ```RconcileAdapter``` is a must action, while ```ReconcileLifecycleOptions``` isn't, check the remarks +for ```ReconcileLifecycleOptions``` in ```kusionstack.io/resourceconsist/pkg/frame/controller/types.go``` to find why. +```go +var _ ReconcileAdapter = &DemoControllerAdapter{} +var _ ReconcileLifecycleOptions = &DemoControllerAdapter{} +``` +Following two methods for DemoControllerAdapter inplementing ```ReconcileLifecycleOptions```, defines whether +DemoControllerAdapter following PodOpsLifecycle and need record employees. +```go +func (r *DemoControllerAdapter) FollowPodOpsLifeCycle() bool { + return true +} + +func (r *DemoControllerAdapter) NeedRecordEmployees() bool { + return needRecordEmployees +} +``` +```IEmployer``` and ```IEmployee``` are interfaces that includes several methods indicating the status employer and +employee. +```go +type IEmployer interface { + GetEmployerId() string + GetEmployerStatuses() interface{} + EmployerEqual(employer IEmployer) (bool, error) +} + +type IEmployee interface { + GetEmployeeId() string + GetEmployeeName() string + GetEmployeeStatuses() interface{} + EmployeeEqual(employee IEmployee) (bool, error) +} + +type DemoServiceStatus struct { + EmployerId string + EmployerStatuses DemoServiceDetails +} + +type DemoServiceDetails struct { + RemoteVIP string + RemoteVIPQPS int +} + +type DemoPodStatus struct { + EmployeeId string + EmployeeName string + EmployeeStatuses PodEmployeeStatuses +} +``` +```GetSelectedEmployeeNames``` returns all employees' names selected by employer, here is pods' names selected by +service. ```GetSelectedEmployeeNames``` is used for ensuring LifecycleFinalizer and ExpectedFinalizer, so you can give +it an empty return if your adapter doesn't follow PodOpsLifecycle. +```go +func (r *DemoControllerAdapter) GetSelectedEmployeeNames(ctx context.Context, employer client.Object) ([]string, error) { + svc, ok := employer.(*corev1.Service) + if !ok { + return nil, fmt.Errorf("expect employer kind is Service") + } + selector := labels.Set(svc.Spec.Selector).AsSelectorPreValidated() + var podList corev1.PodList + err := r.List(ctx, &podList, &client.ListOptions{Namespace: svc.Namespace, LabelSelector: selector}) + if err != nil { + return nil, err + } + + selected := make([]string, len(podList.Items)) + for idx, pod := range podList.Items { + selected[idx] = pod.Name + } + + return selected, nil +} +``` +```GetExpectedEmployer``` and ```GetCurrentEmployer``` defines what is expected under the spec of employer and what is +current status, like the load balancer from a cloud provider. Here in the demo adapter, expected is defined by hardcode +and current is retrieved from a fake resource provider ```demoResourceVipStatusInProvider```. +```go +func (r *DemoControllerAdapter) GetExpectedEmployer(ctx context.Context, employer client.Object) ([]IEmployer, error) { + if !employer.GetDeletionTimestamp().IsZero() { + return nil, nil + } + var expect []IEmployer + expect = append(expect, DemoServiceStatus{ + EmployerId: employer.GetName(), + EmployerStatuses: DemoServiceDetails{ + RemoteVIP: "demo-remote-VIP", + RemoteVIPQPS: 100, + }, + }) + return expect, nil +} + +func (r *DemoControllerAdapter) GetCurrentEmployer(ctx context.Context, employer client.Object) ([]IEmployer, error) { + var current []IEmployer + + req := &DemoResourceVipOps{} + resp, err := r.resourceProviderClient.QueryVip(req) + if err != nil { + return current, err + } + if resp == nil { + return current, fmt.Errorf("demo resource vip query resp is nil") + } + + for _, employerStatus := range resp.VipStatuses { + current = append(current, employerStatus) + } + return current, nil +} +``` +```CreateEmployer/UpdateEmployer/DeleteEmployer``` handles creation/update/deletion of resources related to employer on +related backend provider. Here in the demo adapter, ```CreateEmployer/UpdateEmployer/DeleteEmployer``` handles +```demoResourceVipStatusInProvider```. +```go +func (r *DemoControllerAdapter) CreateEmployer(ctx context.Context, employer client.Object, toCreates []IEmployer) ([]IEmployer, []IEmployer, error) { + if toCreates == nil || len(toCreates) == 0 { + return toCreates, nil, nil + } + + toCreateDemoServiceStatus := make([]DemoServiceStatus, len(toCreates)) + for idx, create := range toCreates { + createDemoServiceStatus, ok := create.(DemoServiceStatus) + if !ok { + return nil, toCreates, fmt.Errorf("toCreates employer is not DemoServiceStatus") + } + toCreateDemoServiceStatus[idx] = createDemoServiceStatus + } + + _, err := r.resourceProviderClient.CreateVip(&DemoResourceVipOps{ + VipStatuses: toCreateDemoServiceStatus, + }) + if err != nil { + return nil, toCreates, err + } + return toCreates, nil, nil +} + +func (r *DemoControllerAdapter) UpdateEmployer(ctx context.Context, employer client.Object, toUpdates []IEmployer) ([]IEmployer, []IEmployer, error) { + if toUpdates == nil || len(toUpdates) == 0 { + return toUpdates, nil, nil + } + + toUpdateDemoServiceStatus := make([]DemoServiceStatus, len(toUpdates)) + for idx, update := range toUpdates { + updateDemoServiceStatus, ok := update.(DemoServiceStatus) + if !ok { + return nil, toUpdates, fmt.Errorf("toUpdates employer is not DemoServiceStatus") + } + toUpdateDemoServiceStatus[idx] = updateDemoServiceStatus + } + + _, err := r.resourceProviderClient.UpdateVip(&DemoResourceVipOps{ + VipStatuses: toUpdateDemoServiceStatus, + }) + if err != nil { + return nil, toUpdates, err + } + return toUpdates, nil, nil +} + +func (r *DemoControllerAdapter) DeleteEmployer(ctx context.Context, employer client.Object, toDeletes []IEmployer) ([]IEmployer, []IEmployer, error) { + if toDeletes == nil || len(toDeletes) == 0 { + return toDeletes, nil, nil + } + + toDeleteDemoServiceStatus := make([]DemoServiceStatus, len(toDeletes)) + for idx, update := range toDeletes { + deleteDemoServiceStatus, ok := update.(DemoServiceStatus) + if !ok { + return nil, toDeletes, fmt.Errorf("toDeletes employer is not DemoServiceStatus") + } + toDeleteDemoServiceStatus[idx] = deleteDemoServiceStatus + } + + _, err := r.resourceProviderClient.DeleteVip(&DemoResourceVipOps{ + VipStatuses: toDeleteDemoServiceStatus, + }) + if err != nil { + return nil, toDeletes, err + } + return toDeletes, nil, nil +} +``` +```GetExpectedEmployee```and```GetCurrentEmployee``` defines what is expected under the spec of employer and employees +and what is current status, like real servers under the load balancer from a cloud provider. Here in the demo adapter, +expected is calculated from pods and current is retrieved from a fake resource provider ```demoResourceRsStatusInProvider```. +```go +// GetExpectEmployeeStatus return expect employee status +func (r *DemoControllerAdapter) GetExpectedEmployee(ctx context.Context, employer client.Object) ([]IEmployee, error) { + if !employer.GetDeletionTimestamp().IsZero() { + return []IEmployee{}, nil + } + + svc, ok := employer.(*corev1.Service) + if !ok { + return nil, fmt.Errorf("expect employer kind is Service") + } + selector := labels.Set(svc.Spec.Selector).AsSelectorPreValidated() + + var podList corev1.PodList + err := r.List(ctx, &podList, &client.ListOptions{Namespace: svc.Namespace, LabelSelector: selector}) + if err != nil { + return nil, err + } + + expected := make([]IEmployee, len(podList.Items)) + expectIdx := 0 + for _, pod := range podList.Items { + if !pod.DeletionTimestamp.IsZero() { + continue + } + status := DemoPodStatus{ + EmployeeId: pod.Name, + EmployeeName: pod.Name, + } + employeeStatuses, err := GetCommonPodEmployeeStatus(&pod) + if err != nil { + return nil, err + } + extraStatus := PodExtraStatus{} + if employeeStatuses.LifecycleReady { + extraStatus.TrafficOn = true + extraStatus.TrafficWeight = 100 + } else { + extraStatus.TrafficOn = false + extraStatus.TrafficWeight = 0 + } + employeeStatuses.ExtraStatus = extraStatus + status.EmployeeStatuses = employeeStatuses + expected[expectIdx] = status + expectIdx++ + } + + return expected[:expectIdx], nil +} + +func (r *DemoControllerAdapter) GetCurrentEmployee(ctx context.Context, employer client.Object) ([]IEmployee, error) { + var current []IEmployee + req := &DemoResourceRsOps{} + resp, err := r.resourceProviderClient.QueryRealServer(req) + if err != nil { + return current, err + } + if resp == nil { + return current, fmt.Errorf("demo resource rs query resp is nil") + } + + for _, rsStatus := range resp.RsStatuses { + current = append(current, rsStatus) + } + return current, nil +} +``` +```CreateEmployees/UpdateEmployees/DeleteEmployees``` handles creation/update/deletion of resources related to employee +on related backend provider. Here in the demo adapter, ```CreateEmployees/UpdateEmployees/DeleteEmployees``` +handles ```demoResourceRsStatusInProvider```. +```go +func (r *DemoControllerAdapter) CreateEmployees(ctx context.Context, employer client.Object, toCreates []IEmployee) ([]IEmployee, []IEmployee, error) { + if toCreates == nil || len(toCreates) == 0 { + return toCreates, nil, nil + } + toCreateDemoPodStatuses := make([]DemoPodStatus, len(toCreates)) + + for idx, toCreate := range toCreates { + podStatus, ok := toCreate.(DemoPodStatus) + if !ok { + return nil, toCreates, fmt.Errorf("toCreate is not DemoPodStatus") + } + toCreateDemoPodStatuses[idx] = podStatus + } + + _, err := r.resourceProviderClient.CreateRealServer(&DemoResourceRsOps{ + RsStatuses: toCreateDemoPodStatuses, + }) + if err != nil { + return nil, toCreates, err + } + + return toCreates, nil, nil +} + +func (r *DemoControllerAdapter) UpdateEmployees(ctx context.Context, employer client.Object, toUpdates []IEmployee) ([]IEmployee, []IEmployee, error) { + if toUpdates == nil || len(toUpdates) == 0 { + return toUpdates, nil, nil + } + + toUpdateDemoPodStatuses := make([]DemoPodStatus, len(toUpdates)) + + for idx, toUpdate := range toUpdates { + podStatus, ok := toUpdate.(DemoPodStatus) + if !ok { + return nil, toUpdates, fmt.Errorf("toUpdate is not DemoPodStatus") + } + toUpdateDemoPodStatuses[idx] = podStatus + } + + _, err := r.resourceProviderClient.UpdateRealServer(&DemoResourceRsOps{ + RsStatuses: toUpdateDemoPodStatuses, + }) + if err != nil { + return nil, toUpdates, err + } + + return toUpdates, nil, nil +} + +func (r *DemoControllerAdapter) DeleteEmployees(ctx context.Context, employer client.Object, toDeletes []IEmployee) ([]IEmployee, []IEmployee, error) { + if toDeletes == nil || len(toDeletes) == 0 { + return toDeletes, nil, nil + } + + toDeleteDemoPodStatuses := make([]DemoPodStatus, len(toDeletes)) + + for idx, toDelete := range toDeletes { + podStatus, ok := toDelete.(DemoPodStatus) + if !ok { + return nil, toDeletes, fmt.Errorf("toDelete is not DemoPodStatus") + } + toDeleteDemoPodStatuses[idx] = podStatus + } + + _, err := r.resourceProviderClient.DeleteRealServer(&DemoResourceRsOps{ + RsStatuses: toDeleteDemoPodStatuses, + }) + if err != nil { + return nil, toDeletes, err + } + + return toDeletes, nil, nil +} +``` diff --git a/kuperator_versioned_docs/version-v0.6/started/_category_.json b/kuperator_versioned_docs/version-v0.6/started/_category_.json new file mode 100644 index 00000000..877a378f --- /dev/null +++ b/kuperator_versioned_docs/version-v0.6/started/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Getting Started", + "position": 2 +} diff --git a/kuperator_versioned_docs/version-v0.6/started/demo-graceful-operation.md b/kuperator_versioned_docs/version-v0.6/started/demo-graceful-operation.md new file mode 100644 index 00000000..6eb1fce9 --- /dev/null +++ b/kuperator_versioned_docs/version-v0.6/started/demo-graceful-operation.md @@ -0,0 +1,340 @@ +# Using KusionStack Kuperator to operate Pods gracefully + +Applications always provide its service along with traffic routing. +On Kubernetes, they should be a set of Pods and a corresponding Kubernetes Service resource to expose the service. + +However, during operations such as updating Pod revisions, +there is a risk that client request traffic may be lost. This can lead to a poor user experience for developers. + +This tutorial will demonstrate how to operate Pods gracefully in a KusionStack Kuperator way on Aliyun ACK +with SLB as a Service backend provider. + +> You can also get the same point from [this video](https://www.bilibili.com/video/BV1n8411q7sP/?t=15.7), +> which shows the same case using both KusionStack Kusion and Kuperator. +> The sample used in this video can be found from [KusionStack Catalog](https://github.com/KusionStack/catalog/tree/main/models/samples/wordpress). + +## Preparing + +First, ensure that you have an Aliyun ACK Kubernetes cluster set up in order to provision an Aliyun SLB. + +Next, install KusionStack Kuperator on this Kubernetes cluster +following [installation doc](https://kusionstack.io/docs/kuperator/started/install). + +## Get started + +### Create a new namespace + +To begin, create a new namespace for this tutorial: + +```shell +$ kubectl create ns kuperator-tutorial +``` + +### Provision Pods and Services + +You can create a set of Pods to run up a demo application service +by creating CollaSet resource using following command: + +``` shell +echo ' +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: server +spec: + replicas: 3 + selector: + matchLabels: + app: server + template: + metadata: + labels: + app: server + spec: + containers: + - image: wu8685/echo:1.3 + name: server + command: + - /server + resources: + limits: + cpu: "0.1" + ephemeral-storage: 100Mi + memory: 100Mi + requests: + cpu: "0.1" + ephemeral-storage: 100Mi + memory: 100Mi + readinessProbe: + httpGet: + path: /healthz + port: 8080 + initialDelaySeconds: 5 + periodSeconds: 3 +' | kubectl -n kuperator-tutorial apply -f - +``` + +There should be 3 Pods created. + +```shell +$ kubectl -n kuperator-tutorial get pod +NAME READY STATUS RESTARTS AGE +server-c5lsr 1/1 Running 0 2m23s +server-p6wrx 1/1 Running 0 2m23s +server-zn62c 1/1 Running 0 2m23s +``` + +Then create a Kubernetes Service by running following command, +which will provision Aliyun SLB to expose service. + +```shell +echo ' +apiVersion: v1 +kind: Service +metadata: + annotations: + service.beta.kubernetes.io/alibaba-cloud-loadbalancer-spec: slb.s1.small + service.beta.kubernetes.io/backend-type: eni + labels: + kusionstack.io/control: "true" # this label is required + name: server +spec: + ports: + - port: 80 + protocol: TCP + targetPort: 8080 + selector: + app: server + type: LoadBalancer +' | kubectl -n kuperator-tutorial apply -f - +``` + +A service with external IP should be provisioned. + +```shell +$ kubectl -n kuperator-tutorial get svc server +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +server LoadBalancer 192.168.225.55 47.101.49.182 80:30146/TCP 51s +``` + +The label `kusionstack.io/control: "true"` on Service is very important. +It means this service resource will be recognized by ResourceConsist framework, and then participate in PodOpsLifecycle +to control the Aliyun SLB to switch off traffic before updating each Pod and switch on traffic after it finished, +in order to protect the service. + +### Provision a client + +Then we will provision a client to access the service we created before. +Please replace `` in the following CollaSet yaml with the external IP from Kubernetes Service created above, and apply again. + +```shell +echo ' +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: client +spec: + replicas: 1 + selector: + matchLabels: + app: client + template: + metadata: + labels: + app: client + spec: + containers: + - image: wu8685/echo:1.3 + name: nginx + command: + - /client + args: + - -url + - http:///echo # EXTERNAL_IP should be replaced + - -m + - POST + - d + - kuperator-tutorial + - -qps + - "10" + - -worker + - "10" + - -timeout + - "10000" + resources: + limits: + cpu: "0.1" + ephemeral-storage: 1Gi + memory: 100Mi + requests: + cpu: "0.1" + ephemeral-storage: 1Gi + memory: 100Mi +' | kubectl -n kuperator-tutorial apply -f - +``` + +A client Pod should be created. + +```shell +$ kubectl -n kuperator-tutorial get pod +NAME READY STATUS RESTARTS AGE +client-nc426 1/1 Running 0 30s +server-c5lsr 1/1 Running 0 19m +server-p6wrx 1/1 Running 0 19m +server-zn62c 1/1 Running 0 19m +``` + +This client will continuously access the service using the configuration provided in the command. +You can monitor the response codes from its logs: + +```shell +kubectl -n kuperator-tutorial logs -f client-nc426 +worker-0 another loop, request: 50, failed: 0 +worker-1 another loop, request: 50, failed: 0 +worker-0 another loop, request: 50, failed: 0 +worker-1 another loop, request: 50, failed: 0 +worker-0 another loop, request: 50, failed: 0 +worker-1 another loop, request: 50, failed: 0 +worker-0 another loop, request: 50, failed: 0 +worker-1 another loop, request: 50, failed: 0 +``` + +The accesses are all successful. + +### Update Pod revision + +To trigger a Pod revision update, run the following command +to edit the container image and command in the PodTemplate of CollaSet: + +```shell +echo ' +apiVersion: apps.kusionstack.io/v1alpha1 +kind: CollaSet +metadata: + name: server +spec: + replicas: 3 + selector: + matchLabels: + app: server + template: + metadata: + labels: + app: server + spec: + containers: + - image: wu8685/echo:1.2 + name: server + command: + - /app/echo + resources: + limits: + cpu: "0.1" + ephemeral-storage: 100Mi + memory: 100Mi + requests: + cpu: "0.1" + ephemeral-storage: 100Mi + memory: 100Mi + readinessProbe: + httpGet: + path: /healthz + port: 8080 + initialDelaySeconds: 5 + periodSeconds: 3 +' | kubectl -n kuperator-tutorial apply -f - +``` + +It will trigger all Pods updated simultaneously. So the application `server` has no Pod to serve. +We can observe the error from client logs. + +```shell +worker-1 fails to request POST http://47.101.49.182/echo : Post "http://47.101.49.182/echo": read tcp 10.244.1.11:54040->47.101.49.182:80: read: connection reset by peer +worker-0 fails to request POST http://47.101.49.182/echo : Post "http://47.101.49.182/echo": read tcp 10.244.1.11:34438->47.101.49.182:80: read: connection reset by peer +worker-1 fails to request POST http://47.101.49.182/echo : Post "http://47.101.49.182/echo": context deadline exceeded (Client.Timeout exceeded while awaiting headers) +worker-0 fails to request POST http://47.101.49.182/echo : Post "http://47.101.49.182/echo": context deadline exceeded (Client.Timeout exceeded while awaiting headers) +worker-1 fails to request POST http://47.101.49.182/echo : Post "http://47.101.49.182/echo": context deadline exceeded (Client.Timeout exceeded while awaiting headers) +worker-1 another loop, request: 20, failed: 3 +worker-0 fails to request POST http://47.101.49.182/echo : Post "http://47.101.49.182/echo": context deadline exceeded (Client.Timeout exceeded while awaiting headers) +worker-0 another loop, request: 20, failed: 3 +worker-1 fails to request POST http://47.101.49.182/echo : Post "http://47.101.49.182/echo": context deadline exceeded (Client.Timeout exceeded while awaiting headers) +``` + +### Provision PodTransistionRule + +To avoid this problem, provision a PodTransitionRule with a maxUnavailable 50% rule by running the following command: + +```shell +echo ' +apiVersion: apps.kusionstack.io/v1alpha1 +kind: PodTransitionRule +metadata: + labels: + name: server +spec: + rules: + - availablePolicy: + maxUnavailableValue: 50% + name: maxUnavailable + selector: + matchLabels: + app: server +' | kubectl -n kuperator-tutorial apply -f - +``` + +After updating the CollaSet of the server to trigger an update, you will see the Pods rolling update one by one, +ensuring that at least one Pod is always available to serve. + +```shell +kubectl -n kuperator-tutorial get pod +NAME READY STATUS RESTARTS AGE +client-rrfbj 1/1 Running 0 25s +server-457sn 0/1 Running 0 5s +server-bd5sz 0/1 Running 0 5s +server-l842s 1/1 Running 0 2m4s +``` + +You can see from the client logs that no access requests fail during this update. + +```shell +worker-0 another loop, request: 50, failed: 0 +worker-1 another loop, request: 50, failed: 0 +worker-0 another loop, request: 50, failed: 0 +worker-1 another loop, request: 50, failed: 0 +worker-0 another loop, request: 50, failed: 0 +worker-1 another loop, request: 50, failed: 0 +worker-0 another loop, request: 50, failed: 0 +worker-0 another loop, request: 50, failed: 0 +worker-1 another loop, request: 50, failed: 0 +worker-1 another loop, request: 50, failed: 0 +worker-0 another loop, request: 50, failed: 0 +``` + +### Clean tutorial namespace + +At the end of this tutorial, you can clean up the resources by deleting the namespace: + +```shell +$ kubectl delete ns kuperator-tutorial +``` + +## Comparison with the Native Approach + +Kubernetes provides `preStop` and `postStart` hook in each container, by which users can also interact with service outside +Kubernetes like Aliyun SLB service. However, KusionStack Kuperator offers several advantages: + +* Pod level vs Container level + +Kuperator offers a Pod level hooks which have more complete information than one container, +especially there are several containers in one Pod. + +* Plugin-able + +Through KusionStack Kuperator, you can decouple operations executed before or after Pods actually change. +For example, traffic control can be added or removed without modifying the Pod's preStop configuration. + +* Rollback option + +In case of issues, rollback becomes a viable option when using the Kuperator approach to update Pods. +Since Kuperator does not modify the Pods or their containers during the update, +if the traffic service experiences problems, there is an opportunity to cancel the update. \ No newline at end of file diff --git a/kuperator_versioned_docs/version-v0.6/started/install.md b/kuperator_versioned_docs/version-v0.6/started/install.md new file mode 100644 index 00000000..1ac6a89c --- /dev/null +++ b/kuperator_versioned_docs/version-v0.6/started/install.md @@ -0,0 +1,55 @@ +--- +sidebar_position: 2 +--- + +# Installation + +## Install with helm +KusionStack Kuperator requires **Kubernetes version >= 1.18** +```shell +# Firstly add charts repository if you haven't do this. +$ helm repo add kusionstack https://kusionstack.github.io/charts + +# To update the kusionstack repo. +$ helm repo update kusionstack + +# Install the latest version. +$ helm install kuperator kusionstack/kuperator +``` + + +[Helm](https://github.com/helm/helm) is a tool for managing packages of pre-configured Kubernetes resources. +### Optional: chart parameters + +The following table lists the configurable parameters of the chart and their default values. + +| Parameter | Description | Default | +|-------------|----------------|----------------| +| `namespace` | namespace for Kuperator installation | `kusionstack-system` | +| `namespaceEnabled` | Whether to create the installation.namespace | `true` | +| `managerReplicas`| Replicas of Kuperator deployment | `3` | +| `image.repo` | Repository for kuperator image | `kusionstack/kuperator`| +| `image.pullPolicy`| Image pull policy for kuperator-manager container | `IfNotPresent` | +| `image.tag` | Tag for kuperator-manager image | `v0.1.0` | +| `resources.limits.cpu` | CPU resource limit of kuperator-manager container | `500m` | +| `resources.limits.memory` | Memory resource limit of kuperator-manager container | `128Mi` | +| `resources.requests.cpu` | CPU resource request of kuperator-manager container | `10m` | +| `resources.requests.memory` | Memory resource request of kuperator-manager container | `64Mi` | + +### Upgrade + +Run following command to upgrade KusionStack Kuperator to the latest version. + +```shell +# Upgrade to the latest version +$ helm upgrade kuperator kusionstack/kuperator +``` + +### Uninstall + +Run following command to uninstall KusionStack Kuperator. + +```shell +# Uninstall +$ helm uninstall kuperator +``` \ No newline at end of file diff --git a/kuperator_versioned_sidebars/version-v0.3-sidebars.json b/kuperator_versioned_sidebars/version-v0.3-sidebars.json new file mode 100644 index 00000000..f12156e2 --- /dev/null +++ b/kuperator_versioned_sidebars/version-v0.3-sidebars.json @@ -0,0 +1,8 @@ +{ + "kuperator": [ + { + "type": "autogenerated", + "dirName": "." + } + ] +} diff --git a/kuperator_versioned_sidebars/version-v0.4-sidebars.json b/kuperator_versioned_sidebars/version-v0.4-sidebars.json new file mode 100644 index 00000000..fb5f7e08 --- /dev/null +++ b/kuperator_versioned_sidebars/version-v0.4-sidebars.json @@ -0,0 +1,8 @@ +{ + "kuperator": [ + { + "type": "autogenerated", + "dirName": "." + } + ] + } \ No newline at end of file diff --git a/kuperator_versioned_sidebars/version-v0.5-sidebars.json b/kuperator_versioned_sidebars/version-v0.5-sidebars.json new file mode 100644 index 00000000..f12156e2 --- /dev/null +++ b/kuperator_versioned_sidebars/version-v0.5-sidebars.json @@ -0,0 +1,8 @@ +{ + "kuperator": [ + { + "type": "autogenerated", + "dirName": "." + } + ] +} diff --git a/kuperator_versioned_sidebars/version-v0.6-sidebars.json b/kuperator_versioned_sidebars/version-v0.6-sidebars.json new file mode 100644 index 00000000..f12156e2 --- /dev/null +++ b/kuperator_versioned_sidebars/version-v0.6-sidebars.json @@ -0,0 +1,8 @@ +{ + "kuperator": [ + { + "type": "autogenerated", + "dirName": "." + } + ] +} diff --git a/kuperator_versions.json b/kuperator_versions.json new file mode 100644 index 00000000..2670da05 --- /dev/null +++ b/kuperator_versions.json @@ -0,0 +1,6 @@ +[ + "v0.6", + "v0.5", + "v0.4", + "v0.3" +] diff --git a/package-lock.json b/package-lock.json index 4d645f58..c4f2934d 100644 --- a/package-lock.json +++ b/package-lock.json @@ -8,14 +8,17 @@ "name": "website", "version": "0.1.0", "dependencies": { - "@docusaurus/core": "2.0.0-beta.17", - "@docusaurus/plugin-content-docs": "2.0.0-beta.17", - "@docusaurus/preset-classic": "2.0.0-beta.17", - "@docusaurus/theme-search-algolia": "^2.0.0-beta.20", + "@cmfcmf/docusaurus-search-local": "^1.2.0", + "@docusaurus/core": "^2.4.1", + "@docusaurus/plugin-content-docs": "^2.4.1", + "@docusaurus/preset-classic": "^2.4.1", + "@docusaurus/theme-search-algolia": "^2.4.1", "@mdx-js/react": "^1.6.21", + "@node-rs/jieba": "^1.10.3", "@svgr/webpack": "^6.3.1", "clsx": "^1.1.1", "file-loader": "^6.2.0", + "flat-color-icons": "^1.1.0", "hast-util-is-element": "^1.1.0", "minimist": "^1.2.6", "nodejieba": "^2.6.0", @@ -23,9 +26,13 @@ "prism-react-renderer": "^1.2.1", "react": "^17.0.1", "react-dom": "^17.0.1", + "react-github-btn": "^1.4.0", + "react-icons": "^4.9.0", + "react-player": "^2.16.0", "remark-math": "^3.0.1", "throttle-debounce": "3.0.1", "trim": "0.0.3", + "typed.js": "^2.1.0", "url-loader": "^4.1.1" }, "devDependencies": { @@ -40,6 +47,72 @@ "@algolia/autocomplete-shared": "1.7.1" } }, + "node_modules/@algolia/autocomplete-js": { + "version": "1.17.2", + "resolved": "https://registry.npmjs.org/@algolia/autocomplete-js/-/autocomplete-js-1.17.2.tgz", + "integrity": "sha512-2UP5ZMEAtIJvnJ3qLiz3AzFjJD66n4UWsAf6mFGFXSYA/UU0LuaC8Bzrfj4CnK1d/AZyPLe+rgZXr6mQtBI8jg==", + "dependencies": { + "@algolia/autocomplete-core": "1.17.2", + "@algolia/autocomplete-preset-algolia": "1.17.2", + "@algolia/autocomplete-shared": "1.17.2", + "htm": "^3.1.1", + "preact": "^10.13.2" + }, + "peerDependencies": { + "@algolia/client-search": ">= 4.5.1 < 6", + "algoliasearch": ">= 4.9.1 < 6" + } + }, + "node_modules/@algolia/autocomplete-js/node_modules/@algolia/autocomplete-core": { + "version": "1.17.2", + "resolved": "https://registry.npmjs.org/@algolia/autocomplete-core/-/autocomplete-core-1.17.2.tgz", + "integrity": "sha512-Fi5cPV5pzEmJgTJ/KTcccJoR/v94OkBwJFyLTsmAx9jbBg5rlgoumRXQM41cgwzY1s/eBLNduUMak2KnZYofcA==", + "dependencies": { + "@algolia/autocomplete-plugin-algolia-insights": "1.17.2", + "@algolia/autocomplete-shared": "1.17.2" + } + }, + "node_modules/@algolia/autocomplete-js/node_modules/@algolia/autocomplete-preset-algolia": { + "version": "1.17.2", + "resolved": "https://registry.npmjs.org/@algolia/autocomplete-preset-algolia/-/autocomplete-preset-algolia-1.17.2.tgz", + "integrity": "sha512-pXOD059R1giNJkcFpPEWI20XdQevHlmuTxPisKk/XkqjOCFnMmyNq2O7AWJylkcOeb62o2Ord166tJ90vNTSvw==", + "dependencies": { + "@algolia/autocomplete-shared": "1.17.2" + }, + "peerDependencies": { + "@algolia/client-search": ">= 4.9.1 < 6", + "algoliasearch": ">= 4.9.1 < 6" + } + }, + "node_modules/@algolia/autocomplete-js/node_modules/@algolia/autocomplete-shared": { + "version": "1.17.2", + "resolved": "https://registry.npmjs.org/@algolia/autocomplete-shared/-/autocomplete-shared-1.17.2.tgz", + "integrity": "sha512-L9gmDgv2J6cXXefV4tg/xlfomd+jjbzKmoc6kcvtS2USkxowoLNvqkLRNQP8bHvX+RXXGNLJBwJj+Ul7JIpv8A==", + "peerDependencies": { + "@algolia/client-search": ">= 4.9.1 < 6", + "algoliasearch": ">= 4.9.1 < 6" + } + }, + "node_modules/@algolia/autocomplete-plugin-algolia-insights": { + "version": "1.17.2", + "resolved": "https://registry.npmjs.org/@algolia/autocomplete-plugin-algolia-insights/-/autocomplete-plugin-algolia-insights-1.17.2.tgz", + "integrity": "sha512-bgVuThYaY9NSQMHOE/GMvlEzQxFzqDH3Lbls7fWuei8iIfcBWGtRUH01m/w5LY1mAw1wv8SyZ9xwuvfdXt8XkA==", + "dependencies": { + "@algolia/autocomplete-shared": "1.17.2" + }, + "peerDependencies": { + "search-insights": ">= 1 < 3" + } + }, + "node_modules/@algolia/autocomplete-plugin-algolia-insights/node_modules/@algolia/autocomplete-shared": { + "version": "1.17.2", + "resolved": "https://registry.npmjs.org/@algolia/autocomplete-shared/-/autocomplete-shared-1.17.2.tgz", + "integrity": "sha512-L9gmDgv2J6cXXefV4tg/xlfomd+jjbzKmoc6kcvtS2USkxowoLNvqkLRNQP8bHvX+RXXGNLJBwJj+Ul7JIpv8A==", + "peerDependencies": { + "@algolia/client-search": ">= 4.9.1 < 6", + "algoliasearch": ">= 4.9.1 < 6" + } + }, "node_modules/@algolia/autocomplete-preset-algolia": { "version": "1.7.1", "resolved": "https://registry.npmjs.org/@algolia/autocomplete-preset-algolia/-/autocomplete-preset-algolia-1.7.1.tgz", @@ -57,6 +130,11 @@ "resolved": "https://registry.npmjs.org/@algolia/autocomplete-shared/-/autocomplete-shared-1.7.1.tgz", "integrity": "sha512-eTmGVqY3GeyBTT8IWiB2K5EuURAqhnumfktAEoHxfDY2o7vg2rSnO16ZtIG0fMgt3py28Vwgq42/bVEuaQV7pg==" }, + "node_modules/@algolia/autocomplete-theme-classic": { + "version": "1.17.2", + "resolved": "https://registry.npmjs.org/@algolia/autocomplete-theme-classic/-/autocomplete-theme-classic-1.17.2.tgz", + "integrity": "sha512-aPH4uJAl4HDnodAWg3+zWoBp+m2+5FFHvWm5qLFfr6CxgytdVfEam5bBTGsv1oCWB5YYrPvtYrh9XfTTxKqP0g==" + }, "node_modules/@algolia/cache-browser-local-storage": { "version": "4.14.2", "resolved": "https://registry.npmjs.org/@algolia/cache-browser-local-storage/-/cache-browser-local-storage-4.14.2.tgz", @@ -190,11 +268,12 @@ } }, "node_modules/@babel/code-frame": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.18.6.tgz", - "integrity": "sha512-TDCmlK5eOvH+eH7cdAFlNXeVJqWIQ7gW9tY1GJIpUtFb6CmjVyq2VM3u71bOyR8CRihcCgMUYoDNyLXao3+70Q==", + "version": "7.22.13", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.22.13.tgz", + "integrity": "sha512-XktuhWlJ5g+3TJXc5upd9Ks1HutSArik6jf2eAjYFyIOf4ej3RN+184cZbzDvbPnuTJIUhPKKJE3cIsYTiAT3w==", "dependencies": { - "@babel/highlight": "^7.18.6" + "@babel/highlight": "^7.22.13", + "chalk": "^2.4.2" }, "engines": { "node": ">=6.9.0" @@ -238,20 +317,21 @@ } }, "node_modules/@babel/core/node_modules/semver": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", "bin": { "semver": "bin/semver.js" } }, "node_modules/@babel/generator": { - "version": "7.19.0", - "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.19.0.tgz", - "integrity": "sha512-S1ahxf1gZ2dpoiFgA+ohK9DIpz50bJ0CWs7Zlzb54Z4sG8qmdIrGrVqmy1sAtTVRb+9CU6U8VqT9L0Zj7hxHVg==", + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.23.0.tgz", + "integrity": "sha512-lN85QRR+5IbYrMWM6Y4pE/noaQtg4pNiqeNGX60eqOfo6gtEj6uw/JagelB8vVztSd7R6M5n1+PQkDbHbBRU4g==", "dependencies": { - "@babel/types": "^7.19.0", + "@babel/types": "^7.23.0", "@jridgewell/gen-mapping": "^0.3.2", + "@jridgewell/trace-mapping": "^0.3.17", "jsesc": "^2.5.1" }, "engines": { @@ -312,9 +392,9 @@ } }, "node_modules/@babel/helper-compilation-targets/node_modules/semver": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", "bin": { "semver": "bin/semver.js" } @@ -371,17 +451,17 @@ } }, "node_modules/@babel/helper-define-polyfill-provider/node_modules/semver": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", "bin": { "semver": "bin/semver.js" } }, "node_modules/@babel/helper-environment-visitor": { - "version": "7.18.9", - "resolved": "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.18.9.tgz", - "integrity": "sha512-3r/aACDJ3fhQ/EVgFy0hpj8oHyHpQc+LPtJoY9SzTThAsStm4Ptegq92vqKoE3vD706ZVFWITnMnxucw+S9Ipg==", + "version": "7.22.20", + "resolved": "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.22.20.tgz", + "integrity": "sha512-zfedSIzFhat/gFhWfHtgWvlec0nqB9YEIVrpuwjruLlXfUSnA8cJB0miHKwqDnQ7d32aKo2xt88/xZptwxbfhA==", "engines": { "node": ">=6.9.0" } @@ -398,23 +478,23 @@ } }, "node_modules/@babel/helper-function-name": { - "version": "7.19.0", - "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.19.0.tgz", - "integrity": "sha512-WAwHBINyrpqywkUH0nTnNgI5ina5TFn85HKS0pbPDfxFfhyR/aNQEn4hGi1P1JyT//I0t4OgXUlofzWILRvS5w==", + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.23.0.tgz", + "integrity": "sha512-OErEqsrxjZTJciZ4Oo+eoZqeW9UIiOcuYKRJA4ZAgV9myA+pOXhhmpfNCKjEH/auVfEYVFJ6y1Tc4r0eIApqiw==", "dependencies": { - "@babel/template": "^7.18.10", - "@babel/types": "^7.19.0" + "@babel/template": "^7.22.15", + "@babel/types": "^7.23.0" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-hoist-variables": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.18.6.tgz", - "integrity": "sha512-UlJQPkFqFULIcyW5sbzgbkxn2FKRgwWiRexcuaR8RNJRy8+LLveqPjwZV/bwrLZCN0eUHD/x8D0heK1ozuoo6Q==", + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.22.5.tgz", + "integrity": "sha512-wGjk9QZVzvknA6yKIUURb8zY3grXCcOZt+/7Wcy8O2uctxhplmUPkOdlgoNhmdVee2c92JXbf1xpMtVNbfoxRw==", "dependencies": { - "@babel/types": "^7.18.6" + "@babel/types": "^7.22.5" }, "engines": { "node": ">=6.9.0" @@ -534,28 +614,28 @@ } }, "node_modules/@babel/helper-split-export-declaration": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.18.6.tgz", - "integrity": "sha512-bde1etTx6ZyTmobl9LLMMQsaizFVZrquTEHOqKeQESMKo4PlObf+8+JA25ZsIpZhT/WEd39+vOdLXAFG/nELpA==", + "version": "7.22.6", + "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.22.6.tgz", + "integrity": "sha512-AsUnxuLhRYsisFiaJwvp1QF+I3KjD5FOxut14q/GzovUe6orHLesW2C7d754kRm53h5gqrz6sFl6sxc4BVtE/g==", "dependencies": { - "@babel/types": "^7.18.6" + "@babel/types": "^7.22.5" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-string-parser": { - "version": "7.18.10", - "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.18.10.tgz", - "integrity": "sha512-XtIfWmeNY3i4t7t4D2t02q50HvqHybPqW2ki1kosnvWCwuCMeo81Jf0gwr85jy/neUdg5XDdeFE/80DXiO+njw==", + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.22.5.tgz", + "integrity": "sha512-mM4COjgZox8U+JcXQwPijIZLElkgEpO5rsERVDJTc2qfCDfERyob6k5WegS14SX18IIjv+XD+GrqNumY5JRCDw==", "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-validator-identifier": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.18.6.tgz", - "integrity": "sha512-MmetCkz9ej86nJQV+sFCxoGGrUbU3q02kgLciwkrt9QqEB7cP39oKEY0PakknEO0Gu20SskMRi+AYZ3b1TpN9g==", + "version": "7.22.20", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.22.20.tgz", + "integrity": "sha512-Y4OZ+ytlatR8AI+8KZfKuL5urKp7qey08ha31L8b3BwewJAoJamTzyvxPR/5D+KkdJCGPq/+8TukHBlY10FX9A==", "engines": { "node": ">=6.9.0" } @@ -596,12 +676,12 @@ } }, "node_modules/@babel/highlight": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.18.6.tgz", - "integrity": "sha512-u7stbOuYjaPezCuLj29hNW1v64M2Md2qupEKP1fHc7WdOA3DgLh37suiSrZYY7haUB7iBeQZ9P1uiRF359do3g==", + "version": "7.22.20", + "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.22.20.tgz", + "integrity": "sha512-dkdMCN3py0+ksCgYmGG8jKeGA/8Tk+gJwSYYlFGxG5lmhfKNoAy004YpLxpS1W2J8m/EK2Ew+yOs9pVRwO89mg==", "dependencies": { - "@babel/helper-validator-identifier": "^7.18.6", - "chalk": "^2.0.0", + "@babel/helper-validator-identifier": "^7.22.20", + "chalk": "^2.4.2", "js-tokens": "^4.0.0" }, "engines": { @@ -609,9 +689,9 @@ } }, "node_modules/@babel/parser": { - "version": "7.19.0", - "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.19.0.tgz", - "integrity": "sha512-74bEXKX2h+8rrfQUfsBfuZZHzsEs6Eql4pqy/T4Nn6Y9wNPggQOqD6z6pn5Bl8ZfysKouFZT/UXEH94ummEeQw==", + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.23.0.tgz", + "integrity": "sha512-vvPKKdMemU85V9WE/l5wZEmImpCtLqbnTvqDS2U1fJ96KrxoW7KrXhNsNCblQlg8Ck4b85yxdTyelsMUgFUXiw==", "bin": { "parser": "bin/babel-parser.js" }, @@ -1314,14 +1394,6 @@ "@babel/core": "^7.0.0-0" } }, - "node_modules/@babel/plugin-transform-modules-amd/node_modules/babel-plugin-dynamic-import-node": { - "version": "2.3.3", - "resolved": "https://registry.npmjs.org/babel-plugin-dynamic-import-node/-/babel-plugin-dynamic-import-node-2.3.3.tgz", - "integrity": "sha512-jZVI+s9Zg3IqA/kdi0i6UDCybUI3aSBLnglhYbSSjKlV7yF1F/5LWv8MakQmvYpnbJDS6fcBL2KzHSxNCMtWSQ==", - "dependencies": { - "object.assign": "^4.1.0" - } - }, "node_modules/@babel/plugin-transform-modules-commonjs": { "version": "7.18.6", "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.18.6.tgz", @@ -1339,14 +1411,6 @@ "@babel/core": "^7.0.0-0" } }, - "node_modules/@babel/plugin-transform-modules-commonjs/node_modules/babel-plugin-dynamic-import-node": { - "version": "2.3.3", - "resolved": "https://registry.npmjs.org/babel-plugin-dynamic-import-node/-/babel-plugin-dynamic-import-node-2.3.3.tgz", - "integrity": "sha512-jZVI+s9Zg3IqA/kdi0i6UDCybUI3aSBLnglhYbSSjKlV7yF1F/5LWv8MakQmvYpnbJDS6fcBL2KzHSxNCMtWSQ==", - "dependencies": { - "object.assign": "^4.1.0" - } - }, "node_modules/@babel/plugin-transform-modules-systemjs": { "version": "7.19.0", "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-systemjs/-/plugin-transform-modules-systemjs-7.19.0.tgz", @@ -1365,14 +1429,6 @@ "@babel/core": "^7.0.0-0" } }, - "node_modules/@babel/plugin-transform-modules-systemjs/node_modules/babel-plugin-dynamic-import-node": { - "version": "2.3.3", - "resolved": "https://registry.npmjs.org/babel-plugin-dynamic-import-node/-/babel-plugin-dynamic-import-node-2.3.3.tgz", - "integrity": "sha512-jZVI+s9Zg3IqA/kdi0i6UDCybUI3aSBLnglhYbSSjKlV7yF1F/5LWv8MakQmvYpnbJDS6fcBL2KzHSxNCMtWSQ==", - "dependencies": { - "object.assign": "^4.1.0" - } - }, "node_modules/@babel/plugin-transform-modules-umd": { "version": "7.18.6", "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-umd/-/plugin-transform-modules-umd-7.18.6.tgz", @@ -1584,9 +1640,9 @@ } }, "node_modules/@babel/plugin-transform-runtime/node_modules/semver": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", "bin": { "semver": "bin/semver.js" } @@ -1796,9 +1852,9 @@ } }, "node_modules/@babel/preset-env/node_modules/semver": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", "bin": { "semver": "bin/semver.js" } @@ -1854,11 +1910,11 @@ } }, "node_modules/@babel/runtime": { - "version": "7.19.0", - "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.19.0.tgz", - "integrity": "sha512-eR8Lo9hnDS7tqkO7NsV+mKvCmv5boaXFSZ70DnfhcgiEne8hv9oCEd36Klw74EtizEqLsy4YnW8UWwpBVolHZA==", + "version": "7.22.6", + "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.22.6.tgz", + "integrity": "sha512-wDb5pWm4WDdF6LFUde3Jl8WzPA+3ZbxYqkC6xAXuD3irdEHN1k0NfTRrJD8ZD378SJ61miMLCqIOXYhd8x+AJQ==", "dependencies": { - "regenerator-runtime": "^0.13.4" + "regenerator-runtime": "^0.13.11" }, "engines": { "node": ">=6.9.0" @@ -1877,31 +1933,31 @@ } }, "node_modules/@babel/template": { - "version": "7.18.10", - "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.18.10.tgz", - "integrity": "sha512-TI+rCtooWHr3QJ27kJxfjutghu44DLnasDMwpDqCXVTal9RLp3RSYNh4NdBrRP2cQAoG9A8juOQl6P6oZG4JxA==", + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.22.15.tgz", + "integrity": "sha512-QPErUVm4uyJa60rkI73qneDacvdvzxshT3kksGqlGWYdOTIUOwJ7RDUL8sGqslY1uXWSL6xMFKEXDS3ox2uF0w==", "dependencies": { - "@babel/code-frame": "^7.18.6", - "@babel/parser": "^7.18.10", - "@babel/types": "^7.18.10" + "@babel/code-frame": "^7.22.13", + "@babel/parser": "^7.22.15", + "@babel/types": "^7.22.15" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/traverse": { - "version": "7.19.0", - "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.19.0.tgz", - "integrity": "sha512-4pKpFRDh+utd2mbRC8JLnlsMUii3PMHjpL6a0SZ4NMZy7YFP9aXORxEhdMVOc9CpWtDF09IkciQLEhK7Ml7gRA==", - "dependencies": { - "@babel/code-frame": "^7.18.6", - "@babel/generator": "^7.19.0", - "@babel/helper-environment-visitor": "^7.18.9", - "@babel/helper-function-name": "^7.19.0", - "@babel/helper-hoist-variables": "^7.18.6", - "@babel/helper-split-export-declaration": "^7.18.6", - "@babel/parser": "^7.19.0", - "@babel/types": "^7.19.0", + "version": "7.23.2", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.23.2.tgz", + "integrity": "sha512-azpe59SQ48qG6nu2CzcMLbxUudtN+dOM9kDbUqGq3HXUJRlo7i8fvPoxQUzYgLZ4cMVmuZgm8vvBpNeRhd6XSw==", + "dependencies": { + "@babel/code-frame": "^7.22.13", + "@babel/generator": "^7.23.0", + "@babel/helper-environment-visitor": "^7.22.20", + "@babel/helper-function-name": "^7.23.0", + "@babel/helper-hoist-variables": "^7.22.5", + "@babel/helper-split-export-declaration": "^7.22.6", + "@babel/parser": "^7.23.0", + "@babel/types": "^7.23.0", "debug": "^4.1.0", "globals": "^11.1.0" }, @@ -1910,18 +1966,44 @@ } }, "node_modules/@babel/types": { - "version": "7.19.0", - "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.19.0.tgz", - "integrity": "sha512-YuGopBq3ke25BVSiS6fgF49Ul9gH1x70Bcr6bqRLjWCkcX8Hre1/5+z+IiWOIerRMSSEfGZVB9z9kyq7wVs9YA==", + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.23.0.tgz", + "integrity": "sha512-0oIyUfKoI3mSqMvsxBdclDwxXKXAUA8v/apZbc+iSyARYou1o8ZGDxbUYyLFoW2arqS2jDGqJuZvv1d/io1axg==", "dependencies": { - "@babel/helper-string-parser": "^7.18.10", - "@babel/helper-validator-identifier": "^7.18.6", + "@babel/helper-string-parser": "^7.22.5", + "@babel/helper-validator-identifier": "^7.22.20", "to-fast-properties": "^2.0.0" }, "engines": { "node": ">=6.9.0" } }, + "node_modules/@cmfcmf/docusaurus-search-local": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@cmfcmf/docusaurus-search-local/-/docusaurus-search-local-1.2.0.tgz", + "integrity": "sha512-Tc0GhRBsfZAiB+f6BoPB8YCQap6JzzcDyJ0dLSCSzWQ6wdWvDlTBrHc1YqR8q8AZ+STRszL5eZpZFi5dbTCdYg==", + "license": "MIT", + "dependencies": { + "@algolia/autocomplete-js": "^1.8.2", + "@algolia/autocomplete-theme-classic": "^1.8.2", + "@algolia/client-search": "^4.12.0", + "algoliasearch": "^4.12.0", + "cheerio": "^1.0.0-rc.9", + "clsx": "^1.1.1", + "lunr-languages": "^1.4.0", + "mark.js": "^8.11.1", + "tslib": "^2.6.3" + }, + "peerDependencies": { + "@docusaurus/core": "^2.0.0", + "nodejieba": "^2.5.0" + }, + "peerDependenciesMeta": { + "nodejieba": { + "optional": true + } + } + }, "node_modules/@colors/colors": { "version": "1.5.0", "resolved": "https://registry.npmjs.org/@colors/colors/-/colors-1.5.0.tgz", @@ -2219,81 +2301,79 @@ } }, "node_modules/@docusaurus/core": { - "version": "2.0.0-beta.17", - "resolved": "https://registry.npmjs.org/@docusaurus/core/-/core-2.0.0-beta.17.tgz", - "integrity": "sha512-iNdW7CsmHNOgc4PxD9BFxa+MD8+i7ln7erOBkF3FSMMPnsKUeVqsR3rr31aLmLZRlTXMITSPLxlXwtBZa3KPCw==", + "version": "2.4.3", + "resolved": "https://registry.npmjs.org/@docusaurus/core/-/core-2.4.3.tgz", + "integrity": "sha512-dWH5P7cgeNSIg9ufReX6gaCl/TmrGKD38Orbwuz05WPhAQtFXHd5B8Qym1TiXfvUNvwoYKkAJOJuGe8ou0Z7PA==", "dependencies": { - "@babel/core": "^7.17.5", - "@babel/generator": "^7.17.3", + "@babel/core": "^7.18.6", + "@babel/generator": "^7.18.7", "@babel/plugin-syntax-dynamic-import": "^7.8.3", - "@babel/plugin-transform-runtime": "^7.17.0", - "@babel/preset-env": "^7.16.11", - "@babel/preset-react": "^7.16.7", - "@babel/preset-typescript": "^7.16.7", - "@babel/runtime": "^7.17.2", - "@babel/runtime-corejs3": "^7.17.2", - "@babel/traverse": "^7.17.3", - "@docusaurus/cssnano-preset": "2.0.0-beta.17", - "@docusaurus/logger": "2.0.0-beta.17", - "@docusaurus/mdx-loader": "2.0.0-beta.17", + "@babel/plugin-transform-runtime": "^7.18.6", + "@babel/preset-env": "^7.18.6", + "@babel/preset-react": "^7.18.6", + "@babel/preset-typescript": "^7.18.6", + "@babel/runtime": "^7.18.6", + "@babel/runtime-corejs3": "^7.18.6", + "@babel/traverse": "^7.18.8", + "@docusaurus/cssnano-preset": "2.4.3", + "@docusaurus/logger": "2.4.3", + "@docusaurus/mdx-loader": "2.4.3", "@docusaurus/react-loadable": "5.5.2", - "@docusaurus/utils": "2.0.0-beta.17", - "@docusaurus/utils-common": "2.0.0-beta.17", - "@docusaurus/utils-validation": "2.0.0-beta.17", - "@slorber/static-site-generator-webpack-plugin": "^4.0.1", + "@docusaurus/utils": "2.4.3", + "@docusaurus/utils-common": "2.4.3", + "@docusaurus/utils-validation": "2.4.3", + "@slorber/static-site-generator-webpack-plugin": "^4.0.7", "@svgr/webpack": "^6.2.1", - "autoprefixer": "^10.4.2", - "babel-loader": "^8.2.3", - "babel-plugin-dynamic-import-node": "2.3.0", + "autoprefixer": "^10.4.7", + "babel-loader": "^8.2.5", + "babel-plugin-dynamic-import-node": "^2.3.3", "boxen": "^6.2.1", + "chalk": "^4.1.2", "chokidar": "^3.5.3", - "clean-css": "^5.2.4", - "cli-table3": "^0.6.1", + "clean-css": "^5.3.0", + "cli-table3": "^0.6.2", "combine-promises": "^1.1.0", "commander": "^5.1.0", - "copy-webpack-plugin": "^10.2.4", - "core-js": "^3.21.1", - "css-loader": "^6.6.0", - "css-minimizer-webpack-plugin": "^3.4.1", - "cssnano": "^5.0.17", - "del": "^6.0.0", + "copy-webpack-plugin": "^11.0.0", + "core-js": "^3.23.3", + "css-loader": "^6.7.1", + "css-minimizer-webpack-plugin": "^4.0.0", + "cssnano": "^5.1.12", + "del": "^6.1.1", "detect-port": "^1.3.0", "escape-html": "^1.0.3", - "eta": "^1.12.3", + "eta": "^2.0.0", "file-loader": "^6.2.0", - "fs-extra": "^10.0.1", + "fs-extra": "^10.1.0", "html-minifier-terser": "^6.1.0", - "html-tags": "^3.1.0", + "html-tags": "^3.2.0", "html-webpack-plugin": "^5.5.0", "import-fresh": "^3.3.0", - "is-root": "^2.1.0", "leven": "^3.1.0", "lodash": "^4.17.21", - "mini-css-extract-plugin": "^2.5.3", - "nprogress": "^0.2.0", - "postcss": "^8.4.7", - "postcss-loader": "^6.2.1", + "mini-css-extract-plugin": "^2.6.1", + "postcss": "^8.4.14", + "postcss-loader": "^7.0.0", "prompts": "^2.4.2", - "react-dev-utils": "^12.0.0", - "react-helmet-async": "^1.2.3", + "react-dev-utils": "^12.0.1", + "react-helmet-async": "^1.3.0", "react-loadable": "npm:@docusaurus/react-loadable@5.5.2", "react-loadable-ssr-addon-v5-slorber": "^1.0.1", - "react-router": "^5.2.0", + "react-router": "^5.3.3", "react-router-config": "^5.1.1", - "react-router-dom": "^5.2.0", - "remark-admonitions": "^1.2.1", + "react-router-dom": "^5.3.3", "rtl-detect": "^1.0.4", - "semver": "^7.3.4", + "semver": "^7.3.7", "serve-handler": "^6.1.3", "shelljs": "^0.8.5", - "terser-webpack-plugin": "^5.3.1", - "tslib": "^2.3.1", + "terser-webpack-plugin": "^5.3.3", + "tslib": "^2.4.0", "update-notifier": "^5.1.0", "url-loader": "^4.1.1", "wait-on": "^6.0.1", - "webpack": "^5.69.1", + "webpack": "^5.73.0", "webpack-bundle-analyzer": "^4.5.0", - "webpack-dev-server": "^4.7.4", + "webpack-dev-server": "^4.9.3", "webpack-merge": "^5.8.0", "webpackbar": "^5.0.2" }, @@ -2301,33 +2381,111 @@ "docusaurus": "bin/docusaurus.mjs" }, "engines": { - "node": ">=14" + "node": ">=16.14" }, "peerDependencies": { "react": "^16.8.4 || ^17.0.0", "react-dom": "^16.8.4 || ^17.0.0" } }, + "node_modules/@docusaurus/core/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/@docusaurus/core/node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/@docusaurus/core/node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/@docusaurus/core/node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" + }, + "node_modules/@docusaurus/core/node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "engines": { + "node": ">=8" + } + }, + "node_modules/@docusaurus/core/node_modules/react-loadable": { + "name": "@docusaurus/react-loadable", + "version": "5.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/react-loadable/-/react-loadable-5.5.2.tgz", + "integrity": "sha512-A3dYjdBGuy0IGT+wyLIGIKLRE+sAk1iNk0f1HjNDysO7u8lhL4N3VEm+FAubmJbAztn94F7MxBTPmnixbiyFdQ==", + "dependencies": { + "@types/react": "*", + "prop-types": "^15.6.2" + } + }, + "node_modules/@docusaurus/core/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, "node_modules/@docusaurus/cssnano-preset": { - "version": "2.0.0-beta.17", - "resolved": "https://registry.npmjs.org/@docusaurus/cssnano-preset/-/cssnano-preset-2.0.0-beta.17.tgz", - "integrity": "sha512-DoBwtLjJ9IY9/lNMHIEdo90L4NDayvU28nLgtjR2Sc6aBIMEB/3a5Ndjehnp+jZAkwcDdNASA86EkZVUyz1O1A==", + "version": "2.4.3", + "resolved": "https://registry.npmjs.org/@docusaurus/cssnano-preset/-/cssnano-preset-2.4.3.tgz", + "integrity": "sha512-ZvGSRCi7z9wLnZrXNPG6DmVPHdKGd8dIn9pYbEOFiYihfv4uDR3UtxogmKf+rT8ZlKFf5Lqne8E8nt08zNM8CA==", "dependencies": { - "cssnano-preset-advanced": "^5.1.12", - "postcss": "^8.4.7", - "postcss-sort-media-queries": "^4.2.1" + "cssnano-preset-advanced": "^5.3.8", + "postcss": "^8.4.14", + "postcss-sort-media-queries": "^4.2.1", + "tslib": "^2.4.0" + }, + "engines": { + "node": ">=16.14" } }, "node_modules/@docusaurus/logger": { - "version": "2.0.0-beta.17", - "resolved": "https://registry.npmjs.org/@docusaurus/logger/-/logger-2.0.0-beta.17.tgz", - "integrity": "sha512-F9JDl06/VLg+ylsvnq9NpILSUeWtl0j4H2LtlLzX5gufEL4dGiCMlnUzYdHl7FSHSzYJ0A/R7vu0SYofsexC4w==", + "version": "2.4.3", + "resolved": "https://registry.npmjs.org/@docusaurus/logger/-/logger-2.4.3.tgz", + "integrity": "sha512-Zxws7r3yLufk9xM1zq9ged0YHs65mlRmtsobnFkdZTxWXdTYlWWLWdKyNKAsVC+D7zg+pv2fGbyabdOnyZOM3w==", "dependencies": { "chalk": "^4.1.2", - "tslib": "^2.3.1" + "tslib": "^2.4.0" }, "engines": { - "node": ">=14" + "node": ">=16.14" } }, "node_modules/@docusaurus/logger/node_modules/ansi-styles": { @@ -2395,29 +2553,30 @@ } }, "node_modules/@docusaurus/mdx-loader": { - "version": "2.0.0-beta.17", - "resolved": "https://registry.npmjs.org/@docusaurus/mdx-loader/-/mdx-loader-2.0.0-beta.17.tgz", - "integrity": "sha512-AhJ3GWRmjQYCyINHE595pff5tn3Rt83oGpdev5UT9uvG9lPYPC8nEmh1LI6c0ogfw7YkNznzxWSW4hyyVbYQ3A==", - "dependencies": { - "@babel/parser": "^7.17.3", - "@babel/traverse": "^7.17.3", - "@docusaurus/logger": "2.0.0-beta.17", - "@docusaurus/utils": "2.0.0-beta.17", + "version": "2.4.3", + "resolved": "https://registry.npmjs.org/@docusaurus/mdx-loader/-/mdx-loader-2.4.3.tgz", + "integrity": "sha512-b1+fDnWtl3GiqkL0BRjYtc94FZrcDDBV1j8446+4tptB9BAOlePwG2p/pK6vGvfL53lkOsszXMghr2g67M0vCw==", + "dependencies": { + "@babel/parser": "^7.18.8", + "@babel/traverse": "^7.18.8", + "@docusaurus/logger": "2.4.3", + "@docusaurus/utils": "2.4.3", "@mdx-js/mdx": "^1.6.22", "escape-html": "^1.0.3", "file-loader": "^6.2.0", - "fs-extra": "^10.0.1", + "fs-extra": "^10.1.0", "image-size": "^1.0.1", "mdast-util-to-string": "^2.0.0", - "remark-emoji": "^2.1.0", + "remark-emoji": "^2.2.0", "stringify-object": "^3.3.0", - "tslib": "^2.3.1", - "unist-util-visit": "^2.0.2", + "tslib": "^2.4.0", + "unified": "^9.2.2", + "unist-util-visit": "^2.0.3", "url-loader": "^4.1.1", - "webpack": "^5.69.1" + "webpack": "^5.73.0" }, "engines": { - "node": ">=14" + "node": ">=16.14" }, "peerDependencies": { "react": "^16.8.4 || ^17.0.0", @@ -2425,44 +2584,61 @@ } }, "node_modules/@docusaurus/module-type-aliases": { - "version": "2.0.0-beta.17", - "resolved": "https://registry.npmjs.org/@docusaurus/module-type-aliases/-/module-type-aliases-2.0.0-beta.17.tgz", - "integrity": "sha512-Tu+8geC/wyygBudbSwvWIHEvt5RwyA7dEoE1JmPbgQtmqUxOZ9bgnfemwXpJW5mKuDiJASbN4of1DhbLqf4sPg==", + "version": "2.4.3", + "resolved": "https://registry.npmjs.org/@docusaurus/module-type-aliases/-/module-type-aliases-2.4.3.tgz", + "integrity": "sha512-cwkBkt1UCiduuvEAo7XZY01dJfRn7UR/75mBgOdb1hKknhrabJZ8YH+7savd/y9kLExPyrhe0QwdS9GuzsRRIA==", "dependencies": { - "@docusaurus/types": "2.0.0-beta.17", + "@docusaurus/react-loadable": "5.5.2", + "@docusaurus/types": "2.4.3", + "@types/history": "^4.7.11", "@types/react": "*", "@types/react-router-config": "*", "@types/react-router-dom": "*", - "react-helmet-async": "*" + "react-helmet-async": "*", + "react-loadable": "npm:@docusaurus/react-loadable@5.5.2" }, "peerDependencies": { "react": "*", "react-dom": "*" } }, + "node_modules/@docusaurus/module-type-aliases/node_modules/react-loadable": { + "name": "@docusaurus/react-loadable", + "version": "5.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/react-loadable/-/react-loadable-5.5.2.tgz", + "integrity": "sha512-A3dYjdBGuy0IGT+wyLIGIKLRE+sAk1iNk0f1HjNDysO7u8lhL4N3VEm+FAubmJbAztn94F7MxBTPmnixbiyFdQ==", + "dependencies": { + "@types/react": "*", + "prop-types": "^15.6.2" + }, + "peerDependencies": { + "react": "*" + } + }, "node_modules/@docusaurus/plugin-content-blog": { - "version": "2.0.0-beta.17", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-blog/-/plugin-content-blog-2.0.0-beta.17.tgz", - "integrity": "sha512-gcX4UR+WKT4bhF8FICBQHy+ESS9iRMeaglSboTZbA/YHGax/3EuZtcPU3dU4E/HFJeZ866wgUdbLKpIpsZOidg==", - "dependencies": { - "@docusaurus/core": "2.0.0-beta.17", - "@docusaurus/logger": "2.0.0-beta.17", - "@docusaurus/mdx-loader": "2.0.0-beta.17", - "@docusaurus/utils": "2.0.0-beta.17", - "@docusaurus/utils-common": "2.0.0-beta.17", - "@docusaurus/utils-validation": "2.0.0-beta.17", - "cheerio": "^1.0.0-rc.10", + "version": "2.4.3", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-blog/-/plugin-content-blog-2.4.3.tgz", + "integrity": "sha512-PVhypqaA0t98zVDpOeTqWUTvRqCEjJubtfFUQ7zJNYdbYTbS/E/ytq6zbLVsN/dImvemtO/5JQgjLxsh8XLo8Q==", + "dependencies": { + "@docusaurus/core": "2.4.3", + "@docusaurus/logger": "2.4.3", + "@docusaurus/mdx-loader": "2.4.3", + "@docusaurus/types": "2.4.3", + "@docusaurus/utils": "2.4.3", + "@docusaurus/utils-common": "2.4.3", + "@docusaurus/utils-validation": "2.4.3", + "cheerio": "^1.0.0-rc.12", "feed": "^4.2.2", - "fs-extra": "^10.0.1", + "fs-extra": "^10.1.0", "lodash": "^4.17.21", "reading-time": "^1.5.0", - "remark-admonitions": "^1.2.1", - "tslib": "^2.3.1", + "tslib": "^2.4.0", + "unist-util-visit": "^2.0.3", "utility-types": "^3.10.0", - "webpack": "^5.69.1" + "webpack": "^5.73.0" }, "engines": { - "node": ">=14" + "node": ">=16.14" }, "peerDependencies": { "react": "^16.8.4 || ^17.0.0", @@ -2470,27 +2646,29 @@ } }, "node_modules/@docusaurus/plugin-content-docs": { - "version": "2.0.0-beta.17", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-docs/-/plugin-content-docs-2.0.0-beta.17.tgz", - "integrity": "sha512-YYrBpuRfTfE6NtENrpSHTJ7K7PZifn6j6hcuvdC0QKE+WD8pS+O2/Ws30yoyvHwLnAnfhvaderh1v9Kaa0/ANg==", - "dependencies": { - "@docusaurus/core": "2.0.0-beta.17", - "@docusaurus/logger": "2.0.0-beta.17", - "@docusaurus/mdx-loader": "2.0.0-beta.17", - "@docusaurus/utils": "2.0.0-beta.17", - "@docusaurus/utils-validation": "2.0.0-beta.17", + "version": "2.4.3", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-docs/-/plugin-content-docs-2.4.3.tgz", + "integrity": "sha512-N7Po2LSH6UejQhzTCsvuX5NOzlC+HiXOVvofnEPj0WhMu1etpLEXE6a4aTxrtg95lQ5kf0xUIdjX9sh3d3G76A==", + "dependencies": { + "@docusaurus/core": "2.4.3", + "@docusaurus/logger": "2.4.3", + "@docusaurus/mdx-loader": "2.4.3", + "@docusaurus/module-type-aliases": "2.4.3", + "@docusaurus/types": "2.4.3", + "@docusaurus/utils": "2.4.3", + "@docusaurus/utils-validation": "2.4.3", + "@types/react-router-config": "^5.0.6", "combine-promises": "^1.1.0", - "fs-extra": "^10.0.1", + "fs-extra": "^10.1.0", "import-fresh": "^3.3.0", "js-yaml": "^4.1.0", "lodash": "^4.17.21", - "remark-admonitions": "^1.2.1", - "tslib": "^2.3.1", + "tslib": "^2.4.0", "utility-types": "^3.10.0", - "webpack": "^5.69.1" + "webpack": "^5.73.0" }, "engines": { - "node": ">=14" + "node": ">=16.14" }, "peerDependencies": { "react": "^16.8.4 || ^17.0.0", @@ -2498,21 +2676,21 @@ } }, "node_modules/@docusaurus/plugin-content-pages": { - "version": "2.0.0-beta.17", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-pages/-/plugin-content-pages-2.0.0-beta.17.tgz", - "integrity": "sha512-d5x0mXTMJ44ojRQccmLyshYoamFOep2AnBe69osCDnwWMbD3Or3pnc2KMK9N7mVpQFnNFKbHNCLrX3Rv0uwEHA==", - "dependencies": { - "@docusaurus/core": "2.0.0-beta.17", - "@docusaurus/mdx-loader": "2.0.0-beta.17", - "@docusaurus/utils": "2.0.0-beta.17", - "@docusaurus/utils-validation": "2.0.0-beta.17", - "fs-extra": "^10.0.1", - "remark-admonitions": "^1.2.1", - "tslib": "^2.3.1", - "webpack": "^5.69.1" + "version": "2.4.3", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-pages/-/plugin-content-pages-2.4.3.tgz", + "integrity": "sha512-txtDVz7y3zGk67q0HjG0gRttVPodkHqE0bpJ+7dOaTH40CQFLSh7+aBeGnPOTl+oCPG+hxkim4SndqPqXjQ8Bg==", + "dependencies": { + "@docusaurus/core": "2.4.3", + "@docusaurus/mdx-loader": "2.4.3", + "@docusaurus/types": "2.4.3", + "@docusaurus/utils": "2.4.3", + "@docusaurus/utils-validation": "2.4.3", + "fs-extra": "^10.1.0", + "tslib": "^2.4.0", + "webpack": "^5.73.0" }, "engines": { - "node": ">=14" + "node": ">=16.14" }, "peerDependencies": { "react": "^16.8.4 || ^17.0.0", @@ -2520,18 +2698,19 @@ } }, "node_modules/@docusaurus/plugin-debug": { - "version": "2.0.0-beta.17", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-debug/-/plugin-debug-2.0.0-beta.17.tgz", - "integrity": "sha512-p26fjYFRSC0esEmKo/kRrLVwXoFnzPCFDumwrImhPyqfVxbj+IKFaiXkayb2qHnyEGE/1KSDIgRF4CHt/pyhiw==", + "version": "2.4.3", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-debug/-/plugin-debug-2.4.3.tgz", + "integrity": "sha512-LkUbuq3zCmINlFb+gAd4ZvYr+bPAzMC0hwND4F7V9bZ852dCX8YoWyovVUBKq4er1XsOwSQaHmNGtObtn8Av8Q==", "dependencies": { - "@docusaurus/core": "2.0.0-beta.17", - "@docusaurus/utils": "2.0.0-beta.17", - "fs-extra": "^10.0.1", + "@docusaurus/core": "2.4.3", + "@docusaurus/types": "2.4.3", + "@docusaurus/utils": "2.4.3", + "fs-extra": "^10.1.0", "react-json-view": "^1.21.3", - "tslib": "^2.3.1" + "tslib": "^2.4.0" }, "engines": { - "node": ">=14" + "node": ">=16.14" }, "peerDependencies": { "react": "^16.8.4 || ^17.0.0", @@ -2539,16 +2718,17 @@ } }, "node_modules/@docusaurus/plugin-google-analytics": { - "version": "2.0.0-beta.17", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-analytics/-/plugin-google-analytics-2.0.0-beta.17.tgz", - "integrity": "sha512-jvgYIhggYD1W2jymqQVAAyjPJUV1xMCn70bAzaCMxriureMWzhQ/kQMVQpop0ijTMvifOxaV9yTcL1VRXev++A==", + "version": "2.4.3", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-analytics/-/plugin-google-analytics-2.4.3.tgz", + "integrity": "sha512-KzBV3k8lDkWOhg/oYGxlK5o9bOwX7KpPc/FTWoB+SfKhlHfhq7qcQdMi1elAaVEIop8tgK6gD1E58Q+XC6otSQ==", "dependencies": { - "@docusaurus/core": "2.0.0-beta.17", - "@docusaurus/utils-validation": "2.0.0-beta.17", - "tslib": "^2.3.1" + "@docusaurus/core": "2.4.3", + "@docusaurus/types": "2.4.3", + "@docusaurus/utils-validation": "2.4.3", + "tslib": "^2.4.0" }, "engines": { - "node": ">=14" + "node": ">=16.14" }, "peerDependencies": { "react": "^16.8.4 || ^17.0.0", @@ -2556,91 +2736,85 @@ } }, "node_modules/@docusaurus/plugin-google-gtag": { - "version": "2.0.0-beta.17", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-gtag/-/plugin-google-gtag-2.0.0-beta.17.tgz", - "integrity": "sha512-1pnWHtIk1Jfeqwvr8PlcPE5SODWT1gW4TI+ptmJbJ296FjjyvL/pG0AcGEJmYLY/OQc3oz0VQ0W2ognw9jmFIw==", + "version": "2.4.3", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-gtag/-/plugin-google-gtag-2.4.3.tgz", + "integrity": "sha512-5FMg0rT7sDy4i9AGsvJC71MQrqQZwgLNdDetLEGDHLfSHLvJhQbTCUGbGXknUgWXQJckcV/AILYeJy+HhxeIFA==", "dependencies": { - "@docusaurus/core": "2.0.0-beta.17", - "@docusaurus/utils-validation": "2.0.0-beta.17", - "tslib": "^2.3.1" + "@docusaurus/core": "2.4.3", + "@docusaurus/types": "2.4.3", + "@docusaurus/utils-validation": "2.4.3", + "tslib": "^2.4.0" }, "engines": { - "node": ">=14" + "node": ">=16.14" }, "peerDependencies": { "react": "^16.8.4 || ^17.0.0", "react-dom": "^16.8.4 || ^17.0.0" } }, - "node_modules/@docusaurus/plugin-sitemap": { - "version": "2.0.0-beta.17", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-sitemap/-/plugin-sitemap-2.0.0-beta.17.tgz", - "integrity": "sha512-19/PaGCsap6cjUPZPGs87yV9e1hAIyd0CTSeVV6Caega8nmOKk20FTrQGFJjZPeX8jvD9QIXcdg6BJnPxcKkaQ==", - "dependencies": { - "@docusaurus/core": "2.0.0-beta.17", - "@docusaurus/utils": "2.0.0-beta.17", - "@docusaurus/utils-common": "2.0.0-beta.17", - "@docusaurus/utils-validation": "2.0.0-beta.17", - "fs-extra": "^10.0.1", - "sitemap": "^7.1.1", - "tslib": "^2.3.1" + "node_modules/@docusaurus/plugin-google-tag-manager": { + "version": "2.4.3", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-tag-manager/-/plugin-google-tag-manager-2.4.3.tgz", + "integrity": "sha512-1jTzp71yDGuQiX9Bi0pVp3alArV0LSnHXempvQTxwCGAEzUWWaBg4d8pocAlTpbP9aULQQqhgzrs8hgTRPOM0A==", + "dependencies": { + "@docusaurus/core": "2.4.3", + "@docusaurus/types": "2.4.3", + "@docusaurus/utils-validation": "2.4.3", + "tslib": "^2.4.0" }, "engines": { - "node": ">=14" + "node": ">=16.14" }, "peerDependencies": { "react": "^16.8.4 || ^17.0.0", "react-dom": "^16.8.4 || ^17.0.0" } }, - "node_modules/@docusaurus/preset-classic": { - "version": "2.0.0-beta.17", - "resolved": "https://registry.npmjs.org/@docusaurus/preset-classic/-/preset-classic-2.0.0-beta.17.tgz", - "integrity": "sha512-7YUxPEgM09aZWr25/hpDEp1gPl+1KsCPV1ZTRW43sbQ9TinPm+9AKR3rHVDa8ea8MdiS7BpqCVyK+H/eiyQrUw==", - "dependencies": { - "@docusaurus/core": "2.0.0-beta.17", - "@docusaurus/plugin-content-blog": "2.0.0-beta.17", - "@docusaurus/plugin-content-docs": "2.0.0-beta.17", - "@docusaurus/plugin-content-pages": "2.0.0-beta.17", - "@docusaurus/plugin-debug": "2.0.0-beta.17", - "@docusaurus/plugin-google-analytics": "2.0.0-beta.17", - "@docusaurus/plugin-google-gtag": "2.0.0-beta.17", - "@docusaurus/plugin-sitemap": "2.0.0-beta.17", - "@docusaurus/theme-classic": "2.0.0-beta.17", - "@docusaurus/theme-common": "2.0.0-beta.17", - "@docusaurus/theme-search-algolia": "2.0.0-beta.17" + "node_modules/@docusaurus/plugin-sitemap": { + "version": "2.4.3", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-sitemap/-/plugin-sitemap-2.4.3.tgz", + "integrity": "sha512-LRQYrK1oH1rNfr4YvWBmRzTL0LN9UAPxBbghgeFRBm5yloF6P+zv1tm2pe2hQTX/QP5bSKdnajCvfnScgKXMZQ==", + "dependencies": { + "@docusaurus/core": "2.4.3", + "@docusaurus/logger": "2.4.3", + "@docusaurus/types": "2.4.3", + "@docusaurus/utils": "2.4.3", + "@docusaurus/utils-common": "2.4.3", + "@docusaurus/utils-validation": "2.4.3", + "fs-extra": "^10.1.0", + "sitemap": "^7.1.1", + "tslib": "^2.4.0" }, "engines": { - "node": ">=14" + "node": ">=16.14" }, "peerDependencies": { "react": "^16.8.4 || ^17.0.0", "react-dom": "^16.8.4 || ^17.0.0" } }, - "node_modules/@docusaurus/preset-classic/node_modules/@docusaurus/theme-search-algolia": { - "version": "2.0.0-beta.17", - "resolved": "https://registry.npmjs.org/@docusaurus/theme-search-algolia/-/theme-search-algolia-2.0.0-beta.17.tgz", - "integrity": "sha512-W12XKM7QC5Jmrec359bJ7aDp5U8DNkCxjVKsMNIs8rDunBoI/N+R35ERJ0N7Bg9ONAWO6o7VkUERQsfGqdvr9w==", - "dependencies": { - "@docsearch/react": "^3.0.0", - "@docusaurus/core": "2.0.0-beta.17", - "@docusaurus/logger": "2.0.0-beta.17", - "@docusaurus/theme-common": "2.0.0-beta.17", - "@docusaurus/theme-translations": "2.0.0-beta.17", - "@docusaurus/utils": "2.0.0-beta.17", - "@docusaurus/utils-validation": "2.0.0-beta.17", - "algoliasearch": "^4.12.1", - "algoliasearch-helper": "^3.7.0", - "clsx": "^1.1.1", - "eta": "^1.12.3", - "fs-extra": "^10.0.1", - "lodash": "^4.17.21", - "tslib": "^2.3.1", - "utility-types": "^3.10.0" + "node_modules/@docusaurus/preset-classic": { + "version": "2.4.3", + "resolved": "https://registry.npmjs.org/@docusaurus/preset-classic/-/preset-classic-2.4.3.tgz", + "integrity": "sha512-tRyMliepY11Ym6hB1rAFSNGwQDpmszvWYJvlK1E+md4SW8i6ylNHtpZjaYFff9Mdk3i/Pg8ItQq9P0daOJAvQw==", + "dependencies": { + "@docusaurus/core": "2.4.3", + "@docusaurus/plugin-content-blog": "2.4.3", + "@docusaurus/plugin-content-docs": "2.4.3", + "@docusaurus/plugin-content-pages": "2.4.3", + "@docusaurus/plugin-debug": "2.4.3", + "@docusaurus/plugin-google-analytics": "2.4.3", + "@docusaurus/plugin-google-gtag": "2.4.3", + "@docusaurus/plugin-google-tag-manager": "2.4.3", + "@docusaurus/plugin-sitemap": "2.4.3", + "@docusaurus/theme-classic": "2.4.3", + "@docusaurus/theme-common": "2.4.3", + "@docusaurus/theme-search-algolia": "2.4.3", + "@docusaurus/types": "2.4.3" }, "engines": { - "node": ">=14" + "node": ">=16.14" }, "peerDependencies": { "react": "^16.8.4 || ^17.0.0", @@ -2660,32 +2834,38 @@ } }, "node_modules/@docusaurus/theme-classic": { - "version": "2.0.0-beta.17", - "resolved": "https://registry.npmjs.org/@docusaurus/theme-classic/-/theme-classic-2.0.0-beta.17.tgz", - "integrity": "sha512-xfZ9kpgqo0lP9YO4rJj79wtiQJXU6ARo5wYy10IIwiWN+lg00scJHhkmNV431b05xIUjUr0cKeH9nqZmEsQRKg==", - "dependencies": { - "@docusaurus/core": "2.0.0-beta.17", - "@docusaurus/plugin-content-blog": "2.0.0-beta.17", - "@docusaurus/plugin-content-docs": "2.0.0-beta.17", - "@docusaurus/plugin-content-pages": "2.0.0-beta.17", - "@docusaurus/theme-common": "2.0.0-beta.17", - "@docusaurus/theme-translations": "2.0.0-beta.17", - "@docusaurus/utils": "2.0.0-beta.17", - "@docusaurus/utils-common": "2.0.0-beta.17", - "@docusaurus/utils-validation": "2.0.0-beta.17", + "version": "2.4.3", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-classic/-/theme-classic-2.4.3.tgz", + "integrity": "sha512-QKRAJPSGPfDY2yCiPMIVyr+MqwZCIV2lxNzqbyUW0YkrlmdzzP3WuQJPMGLCjWgQp/5c9kpWMvMxjhpZx1R32Q==", + "dependencies": { + "@docusaurus/core": "2.4.3", + "@docusaurus/mdx-loader": "2.4.3", + "@docusaurus/module-type-aliases": "2.4.3", + "@docusaurus/plugin-content-blog": "2.4.3", + "@docusaurus/plugin-content-docs": "2.4.3", + "@docusaurus/plugin-content-pages": "2.4.3", + "@docusaurus/theme-common": "2.4.3", + "@docusaurus/theme-translations": "2.4.3", + "@docusaurus/types": "2.4.3", + "@docusaurus/utils": "2.4.3", + "@docusaurus/utils-common": "2.4.3", + "@docusaurus/utils-validation": "2.4.3", "@mdx-js/react": "^1.6.22", - "clsx": "^1.1.1", + "clsx": "^1.2.1", "copy-text-to-clipboard": "^3.0.1", - "infima": "0.2.0-alpha.37", + "infima": "0.2.0-alpha.43", "lodash": "^4.17.21", - "postcss": "^8.4.7", - "prism-react-renderer": "^1.2.1", - "prismjs": "^1.27.0", - "react-router-dom": "^5.2.0", - "rtlcss": "^3.3.0" + "nprogress": "^0.2.0", + "postcss": "^8.4.14", + "prism-react-renderer": "^1.3.5", + "prismjs": "^1.28.0", + "react-router-dom": "^5.3.3", + "rtlcss": "^3.5.0", + "tslib": "^2.4.0", + "utility-types": "^3.10.0" }, "engines": { - "node": ">=14" + "node": ">=16.14" }, "peerDependencies": { "react": "^16.8.4 || ^17.0.0", @@ -2693,22 +2873,29 @@ } }, "node_modules/@docusaurus/theme-common": { - "version": "2.0.0-beta.17", - "resolved": "https://registry.npmjs.org/@docusaurus/theme-common/-/theme-common-2.0.0-beta.17.tgz", - "integrity": "sha512-LJBDhx+Qexn1JHBqZbE4k+7lBaV1LgpE33enXf43ShB7ebhC91d5HLHhBwgt0pih4+elZU4rG+BG/roAmsNM0g==", - "dependencies": { - "@docusaurus/module-type-aliases": "2.0.0-beta.17", - "@docusaurus/plugin-content-blog": "2.0.0-beta.17", - "@docusaurus/plugin-content-docs": "2.0.0-beta.17", - "@docusaurus/plugin-content-pages": "2.0.0-beta.17", - "clsx": "^1.1.1", + "version": "2.4.3", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-common/-/theme-common-2.4.3.tgz", + "integrity": "sha512-7KaDJBXKBVGXw5WOVt84FtN8czGWhM0lbyWEZXGp8AFfL6sZQfRTluFp4QriR97qwzSyOfQb+nzcDZZU4tezUw==", + "dependencies": { + "@docusaurus/mdx-loader": "2.4.3", + "@docusaurus/module-type-aliases": "2.4.3", + "@docusaurus/plugin-content-blog": "2.4.3", + "@docusaurus/plugin-content-docs": "2.4.3", + "@docusaurus/plugin-content-pages": "2.4.3", + "@docusaurus/utils": "2.4.3", + "@docusaurus/utils-common": "2.4.3", + "@types/history": "^4.7.11", + "@types/react": "*", + "@types/react-router-config": "*", + "clsx": "^1.2.1", "parse-numeric-range": "^1.3.0", - "prism-react-renderer": "^1.3.1", - "tslib": "^2.3.1", + "prism-react-renderer": "^1.3.5", + "tslib": "^2.4.0", + "use-sync-external-store": "^1.2.0", "utility-types": "^3.10.0" }, "engines": { - "node": ">=14" + "node": ">=16.14" }, "peerDependencies": { "react": "^16.8.4 || ^17.0.0", @@ -2716,22 +2903,22 @@ } }, "node_modules/@docusaurus/theme-search-algolia": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/@docusaurus/theme-search-algolia/-/theme-search-algolia-2.1.0.tgz", - "integrity": "sha512-rNBvi35VvENhucslEeVPOtbAzBdZY/9j55gdsweGV5bYoAXy4mHB6zTGjealcB4pJ6lJY4a5g75fXXMOlUqPfg==", + "version": "2.4.3", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-search-algolia/-/theme-search-algolia-2.4.3.tgz", + "integrity": "sha512-jziq4f6YVUB5hZOB85ELATwnxBz/RmSLD3ksGQOLDPKVzat4pmI8tddNWtriPpxR04BNT+ZfpPUMFkNFetSW1Q==", "dependencies": { "@docsearch/react": "^3.1.1", - "@docusaurus/core": "2.1.0", - "@docusaurus/logger": "2.1.0", - "@docusaurus/plugin-content-docs": "2.1.0", - "@docusaurus/theme-common": "2.1.0", - "@docusaurus/theme-translations": "2.1.0", - "@docusaurus/utils": "2.1.0", - "@docusaurus/utils-validation": "2.1.0", + "@docusaurus/core": "2.4.3", + "@docusaurus/logger": "2.4.3", + "@docusaurus/plugin-content-docs": "2.4.3", + "@docusaurus/theme-common": "2.4.3", + "@docusaurus/theme-translations": "2.4.3", + "@docusaurus/utils": "2.4.3", + "@docusaurus/utils-validation": "2.4.3", "algoliasearch": "^4.13.1", "algoliasearch-helper": "^3.10.0", "clsx": "^1.2.1", - "eta": "^1.12.3", + "eta": "^2.0.0", "fs-extra": "^10.1.0", "lodash": "^4.17.21", "tslib": "^2.4.0", @@ -2745,140 +2932,56 @@ "react-dom": "^16.8.4 || ^17.0.0" } }, - "node_modules/@docusaurus/theme-search-algolia/node_modules/@docusaurus/core": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/@docusaurus/core/-/core-2.1.0.tgz", - "integrity": "sha512-/ZJ6xmm+VB9Izbn0/s6h6289cbPy2k4iYFwWDhjiLsVqwa/Y0YBBcXvStfaHccudUC3OfP+26hMk7UCjc50J6Q==", + "node_modules/@docusaurus/theme-translations": { + "version": "2.4.3", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-translations/-/theme-translations-2.4.3.tgz", + "integrity": "sha512-H4D+lbZbjbKNS/Zw1Lel64PioUAIT3cLYYJLUf3KkuO/oc9e0QCVhIYVtUI2SfBCF2NNdlyhBDQEEMygsCedIg==", "dependencies": { - "@babel/core": "^7.18.6", - "@babel/generator": "^7.18.7", - "@babel/plugin-syntax-dynamic-import": "^7.8.3", - "@babel/plugin-transform-runtime": "^7.18.6", - "@babel/preset-env": "^7.18.6", - "@babel/preset-react": "^7.18.6", - "@babel/preset-typescript": "^7.18.6", - "@babel/runtime": "^7.18.6", - "@babel/runtime-corejs3": "^7.18.6", - "@babel/traverse": "^7.18.8", - "@docusaurus/cssnano-preset": "2.1.0", - "@docusaurus/logger": "2.1.0", - "@docusaurus/mdx-loader": "2.1.0", - "@docusaurus/react-loadable": "5.5.2", - "@docusaurus/utils": "2.1.0", - "@docusaurus/utils-common": "2.1.0", - "@docusaurus/utils-validation": "2.1.0", - "@slorber/static-site-generator-webpack-plugin": "^4.0.7", - "@svgr/webpack": "^6.2.1", - "autoprefixer": "^10.4.7", - "babel-loader": "^8.2.5", - "babel-plugin-dynamic-import-node": "^2.3.3", - "boxen": "^6.2.1", - "chalk": "^4.1.2", - "chokidar": "^3.5.3", - "clean-css": "^5.3.0", - "cli-table3": "^0.6.2", - "combine-promises": "^1.1.0", - "commander": "^5.1.0", - "copy-webpack-plugin": "^11.0.0", - "core-js": "^3.23.3", - "css-loader": "^6.7.1", - "css-minimizer-webpack-plugin": "^4.0.0", - "cssnano": "^5.1.12", - "del": "^6.1.1", - "detect-port": "^1.3.0", - "escape-html": "^1.0.3", - "eta": "^1.12.3", - "file-loader": "^6.2.0", "fs-extra": "^10.1.0", - "html-minifier-terser": "^6.1.0", - "html-tags": "^3.2.0", - "html-webpack-plugin": "^5.5.0", - "import-fresh": "^3.3.0", - "leven": "^3.1.0", - "lodash": "^4.17.21", - "mini-css-extract-plugin": "^2.6.1", - "postcss": "^8.4.14", - "postcss-loader": "^7.0.0", - "prompts": "^2.4.2", - "react-dev-utils": "^12.0.1", - "react-helmet-async": "^1.3.0", - "react-loadable": "npm:@docusaurus/react-loadable@5.5.2", - "react-loadable-ssr-addon-v5-slorber": "^1.0.1", - "react-router": "^5.3.3", - "react-router-config": "^5.1.1", - "react-router-dom": "^5.3.3", - "rtl-detect": "^1.0.4", - "semver": "^7.3.7", - "serve-handler": "^6.1.3", - "shelljs": "^0.8.5", - "terser-webpack-plugin": "^5.3.3", - "tslib": "^2.4.0", - "update-notifier": "^5.1.0", - "url-loader": "^4.1.1", - "wait-on": "^6.0.1", - "webpack": "^5.73.0", - "webpack-bundle-analyzer": "^4.5.0", - "webpack-dev-server": "^4.9.3", - "webpack-merge": "^5.8.0", - "webpackbar": "^5.0.2" - }, - "bin": { - "docusaurus": "bin/docusaurus.mjs" - }, - "engines": { - "node": ">=16.14" - }, - "peerDependencies": { - "react": "^16.8.4 || ^17.0.0", - "react-dom": "^16.8.4 || ^17.0.0" - } - }, - "node_modules/@docusaurus/theme-search-algolia/node_modules/@docusaurus/cssnano-preset": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/@docusaurus/cssnano-preset/-/cssnano-preset-2.1.0.tgz", - "integrity": "sha512-pRLewcgGhOies6pzsUROfmPStDRdFw+FgV5sMtLr5+4Luv2rty5+b/eSIMMetqUsmg3A9r9bcxHk9bKAKvx3zQ==", - "dependencies": { - "cssnano-preset-advanced": "^5.3.8", - "postcss": "^8.4.14", - "postcss-sort-media-queries": "^4.2.1", "tslib": "^2.4.0" }, "engines": { "node": ">=16.14" } }, - "node_modules/@docusaurus/theme-search-algolia/node_modules/@docusaurus/logger": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/@docusaurus/logger/-/logger-2.1.0.tgz", - "integrity": "sha512-uuJx2T6hDBg82joFeyobywPjSOIfeq05GfyKGHThVoXuXsu1KAzMDYcjoDxarb9CoHCI/Dor8R2MoL6zII8x1Q==", + "node_modules/@docusaurus/types": { + "version": "2.4.3", + "resolved": "https://registry.npmjs.org/@docusaurus/types/-/types-2.4.3.tgz", + "integrity": "sha512-W6zNLGQqfrp/EoPD0bhb9n7OobP+RHpmvVzpA+Z/IuU3Q63njJM24hmT0GYboovWcDtFmnIJC9wcyx4RVPQscw==", "dependencies": { - "chalk": "^4.1.2", - "tslib": "^2.4.0" + "@types/history": "^4.7.11", + "@types/react": "*", + "commander": "^5.1.0", + "joi": "^17.6.0", + "react-helmet-async": "^1.3.0", + "utility-types": "^3.10.0", + "webpack": "^5.73.0", + "webpack-merge": "^5.8.0" }, - "engines": { - "node": ">=16.14" + "peerDependencies": { + "react": "^16.8.4 || ^17.0.0", + "react-dom": "^16.8.4 || ^17.0.0" } }, - "node_modules/@docusaurus/theme-search-algolia/node_modules/@docusaurus/mdx-loader": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/@docusaurus/mdx-loader/-/mdx-loader-2.1.0.tgz", - "integrity": "sha512-i97hi7hbQjsD3/8OSFhLy7dbKGH8ryjEzOfyhQIn2CFBYOY3ko0vMVEf3IY9nD3Ld7amYzsZ8153RPkcnXA+Lg==", + "node_modules/@docusaurus/utils": { + "version": "2.4.3", + "resolved": "https://registry.npmjs.org/@docusaurus/utils/-/utils-2.4.3.tgz", + "integrity": "sha512-fKcXsjrD86Smxv8Pt0TBFqYieZZCPh4cbf9oszUq/AMhZn3ujwpKaVYZACPX8mmjtYx0JOgNx52CREBfiGQB4A==", "dependencies": { - "@babel/parser": "^7.18.8", - "@babel/traverse": "^7.18.8", - "@docusaurus/logger": "2.1.0", - "@docusaurus/utils": "2.1.0", - "@mdx-js/mdx": "^1.6.22", - "escape-html": "^1.0.3", + "@docusaurus/logger": "2.4.3", + "@svgr/webpack": "^6.2.1", + "escape-string-regexp": "^4.0.0", "file-loader": "^6.2.0", "fs-extra": "^10.1.0", - "image-size": "^1.0.1", - "mdast-util-to-string": "^2.0.0", - "remark-emoji": "^2.2.0", - "stringify-object": "^3.3.0", + "github-slugger": "^1.4.0", + "globby": "^11.1.0", + "gray-matter": "^4.0.3", + "js-yaml": "^4.1.0", + "lodash": "^4.17.21", + "micromatch": "^4.0.5", + "resolve-pathname": "^3.0.0", + "shelljs": "^0.8.5", "tslib": "^2.4.0", - "unified": "^9.2.2", - "unist-util-visit": "^2.0.3", "url-loader": "^4.1.1", "webpack": "^5.73.0" }, @@ -2886,238 +2989,128 @@ "node": ">=16.14" }, "peerDependencies": { - "react": "^16.8.4 || ^17.0.0", - "react-dom": "^16.8.4 || ^17.0.0" - } - }, - "node_modules/@docusaurus/theme-search-algolia/node_modules/@docusaurus/module-type-aliases": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/@docusaurus/module-type-aliases/-/module-type-aliases-2.1.0.tgz", - "integrity": "sha512-Z8WZaK5cis3xEtyfOT817u9xgGUauT0PuuVo85ysnFRX8n7qLN1lTPCkC+aCmFm/UcV8h/W5T4NtIsst94UntQ==", - "dependencies": { - "@docusaurus/react-loadable": "5.5.2", - "@docusaurus/types": "2.1.0", - "@types/history": "^4.7.11", - "@types/react": "*", - "@types/react-router-config": "*", - "@types/react-router-dom": "*", - "react-helmet-async": "*", - "react-loadable": "npm:@docusaurus/react-loadable@5.5.2" + "@docusaurus/types": "*" }, - "peerDependencies": { - "react": "*", - "react-dom": "*" + "peerDependenciesMeta": { + "@docusaurus/types": { + "optional": true + } } }, - "node_modules/@docusaurus/theme-search-algolia/node_modules/@docusaurus/plugin-content-blog": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-blog/-/plugin-content-blog-2.1.0.tgz", - "integrity": "sha512-xEp6jlu92HMNUmyRBEeJ4mCW1s77aAEQO4Keez94cUY/Ap7G/r0Awa6xSLff7HL0Fjg8KK1bEbDy7q9voIavdg==", - "dependencies": { - "@docusaurus/core": "2.1.0", - "@docusaurus/logger": "2.1.0", - "@docusaurus/mdx-loader": "2.1.0", - "@docusaurus/types": "2.1.0", - "@docusaurus/utils": "2.1.0", - "@docusaurus/utils-common": "2.1.0", - "@docusaurus/utils-validation": "2.1.0", - "cheerio": "^1.0.0-rc.12", - "feed": "^4.2.2", - "fs-extra": "^10.1.0", - "lodash": "^4.17.21", - "reading-time": "^1.5.0", - "tslib": "^2.4.0", - "unist-util-visit": "^2.0.3", - "utility-types": "^3.10.0", - "webpack": "^5.73.0" + "node_modules/@docusaurus/utils-common": { + "version": "2.4.3", + "resolved": "https://registry.npmjs.org/@docusaurus/utils-common/-/utils-common-2.4.3.tgz", + "integrity": "sha512-/jascp4GbLQCPVmcGkPzEQjNaAk3ADVfMtudk49Ggb+131B1WDD6HqlSmDf8MxGdy7Dja2gc+StHf01kiWoTDQ==", + "dependencies": { + "tslib": "^2.4.0" }, "engines": { "node": ">=16.14" }, "peerDependencies": { - "react": "^16.8.4 || ^17.0.0", - "react-dom": "^16.8.4 || ^17.0.0" + "@docusaurus/types": "*" + }, + "peerDependenciesMeta": { + "@docusaurus/types": { + "optional": true + } } }, - "node_modules/@docusaurus/theme-search-algolia/node_modules/@docusaurus/plugin-content-docs": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-docs/-/plugin-content-docs-2.1.0.tgz", - "integrity": "sha512-Rup5pqXrXlKGIC4VgwvioIhGWF7E/NNSlxv+JAxRYpik8VKlWsk9ysrdHIlpX+KJUCO9irnY21kQh2814mlp/Q==", - "dependencies": { - "@docusaurus/core": "2.1.0", - "@docusaurus/logger": "2.1.0", - "@docusaurus/mdx-loader": "2.1.0", - "@docusaurus/module-type-aliases": "2.1.0", - "@docusaurus/types": "2.1.0", - "@docusaurus/utils": "2.1.0", - "@docusaurus/utils-validation": "2.1.0", - "@types/react-router-config": "^5.0.6", - "combine-promises": "^1.1.0", - "fs-extra": "^10.1.0", - "import-fresh": "^3.3.0", + "node_modules/@docusaurus/utils-validation": { + "version": "2.4.3", + "resolved": "https://registry.npmjs.org/@docusaurus/utils-validation/-/utils-validation-2.4.3.tgz", + "integrity": "sha512-G2+Vt3WR5E/9drAobP+hhZQMaswRwDlp6qOMi7o7ZypB+VO7N//DZWhZEwhcRGepMDJGQEwtPv7UxtYwPL9PBw==", + "dependencies": { + "@docusaurus/logger": "2.4.3", + "@docusaurus/utils": "2.4.3", + "joi": "^17.6.0", "js-yaml": "^4.1.0", - "lodash": "^4.17.21", - "tslib": "^2.4.0", - "utility-types": "^3.10.0", - "webpack": "^5.73.0" + "tslib": "^2.4.0" }, "engines": { "node": ">=16.14" - }, - "peerDependencies": { - "react": "^16.8.4 || ^17.0.0", - "react-dom": "^16.8.4 || ^17.0.0" } }, - "node_modules/@docusaurus/theme-search-algolia/node_modules/@docusaurus/plugin-content-pages": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-pages/-/plugin-content-pages-2.1.0.tgz", - "integrity": "sha512-SwZdDZRlObHNKXTnFo7W2aF6U5ZqNVI55Nw2GCBryL7oKQSLeI0lsrMlMXdzn+fS7OuBTd3MJBO1T4Zpz0i/+g==", - "dependencies": { - "@docusaurus/core": "2.1.0", - "@docusaurus/mdx-loader": "2.1.0", - "@docusaurus/types": "2.1.0", - "@docusaurus/utils": "2.1.0", - "@docusaurus/utils-validation": "2.1.0", - "fs-extra": "^10.1.0", - "tslib": "^2.4.0", - "webpack": "^5.73.0" - }, + "node_modules/@docusaurus/utils/node_modules/escape-string-regexp": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", + "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", "engines": { - "node": ">=16.14" + "node": ">=10" }, - "peerDependencies": { - "react": "^16.8.4 || ^17.0.0", - "react-dom": "^16.8.4 || ^17.0.0" + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/@docusaurus/theme-search-algolia/node_modules/@docusaurus/theme-common": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/@docusaurus/theme-common/-/theme-common-2.1.0.tgz", - "integrity": "sha512-vT1otpVPbKux90YpZUnvknsn5zvpLf+AW1W0EDcpE9up4cDrPqfsh0QoxGHFJnobE2/qftsBFC19BneN4BH8Ag==", - "dependencies": { - "@docusaurus/mdx-loader": "2.1.0", - "@docusaurus/module-type-aliases": "2.1.0", - "@docusaurus/plugin-content-blog": "2.1.0", - "@docusaurus/plugin-content-docs": "2.1.0", - "@docusaurus/plugin-content-pages": "2.1.0", - "@docusaurus/utils": "2.1.0", - "@types/history": "^4.7.11", - "@types/react": "*", - "@types/react-router-config": "*", - "clsx": "^1.2.1", - "parse-numeric-range": "^1.3.0", - "prism-react-renderer": "^1.3.5", - "tslib": "^2.4.0", - "utility-types": "^3.10.0" - }, - "engines": { - "node": ">=16.14" - }, - "peerDependencies": { - "react": "^16.8.4 || ^17.0.0", - "react-dom": "^16.8.4 || ^17.0.0" + "node_modules/@emnapi/core": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@emnapi/core/-/core-1.2.0.tgz", + "integrity": "sha512-E7Vgw78I93we4ZWdYCb4DGAwRROGkMIXk7/y87UmANR+J6qsWusmC3gLt0H+O0KOt5e6O38U8oJamgbudrES/w==", + "optional": true, + "dependencies": { + "@emnapi/wasi-threads": "1.0.1", + "tslib": "^2.4.0" } }, - "node_modules/@docusaurus/theme-search-algolia/node_modules/@docusaurus/theme-translations": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/@docusaurus/theme-translations/-/theme-translations-2.1.0.tgz", - "integrity": "sha512-07n2akf2nqWvtJeMy3A+7oSGMuu5F673AovXVwY0aGAux1afzGCiqIFlYW3EP0CujvDJAEFSQi/Tetfh+95JNg==", + "node_modules/@emnapi/runtime": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@emnapi/runtime/-/runtime-1.2.0.tgz", + "integrity": "sha512-bV21/9LQmcQeCPEg3BDFtvwL6cwiTMksYNWQQ4KOxCZikEGalWtenoZ0wCiukJINlGCIi2KXx01g4FoH/LxpzQ==", + "optional": true, "dependencies": { - "fs-extra": "^10.1.0", "tslib": "^2.4.0" - }, - "engines": { - "node": ">=16.14" } }, - "node_modules/@docusaurus/theme-search-algolia/node_modules/@docusaurus/types": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/@docusaurus/types/-/types-2.1.0.tgz", - "integrity": "sha512-BS1ebpJZnGG6esKqsjtEC9U9qSaPylPwlO7cQ1GaIE7J/kMZI3FITnNn0otXXu7c7ZTqhb6+8dOrG6fZn6fqzQ==", + "node_modules/@emnapi/wasi-threads": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@emnapi/wasi-threads/-/wasi-threads-1.0.1.tgz", + "integrity": "sha512-iIBu7mwkq4UQGeMEM8bLwNK962nXdhodeScX4slfQnRhEMMzvYivHhutCIk8uojvmASXXPC2WNEjwxFWk72Oqw==", + "optional": true, "dependencies": { - "@types/history": "^4.7.11", - "@types/react": "*", - "commander": "^5.1.0", - "joi": "^17.6.0", - "react-helmet-async": "^1.3.0", - "utility-types": "^3.10.0", - "webpack": "^5.73.0", - "webpack-merge": "^5.8.0" - }, - "peerDependencies": { - "react": "^16.8.4 || ^17.0.0", - "react-dom": "^16.8.4 || ^17.0.0" + "tslib": "^2.4.0" } }, - "node_modules/@docusaurus/theme-search-algolia/node_modules/@docusaurus/utils": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/@docusaurus/utils/-/utils-2.1.0.tgz", - "integrity": "sha512-fPvrfmAuC54n8MjZuG4IysaMdmvN5A/qr7iFLbSGSyDrsbP4fnui6KdZZIa/YOLIPLec8vjZ8RIITJqF18mx4A==", + "node_modules/@hapi/hoek": { + "version": "9.3.0", + "resolved": "https://registry.npmjs.org/@hapi/hoek/-/hoek-9.3.0.tgz", + "integrity": "sha512-/c6rf4UJlmHlC9b5BaNvzAcFv7HZ2QHaV0D4/HNlBdvFnvQq8RI4kYdhyPCl7Xj+oWvTWQ8ujhqS53LIgAe6KQ==" + }, + "node_modules/@hapi/topo": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/@hapi/topo/-/topo-5.1.0.tgz", + "integrity": "sha512-foQZKJig7Ob0BMAYBfcJk8d77QtOe7Wo4ox7ff1lQYoNNAb6jwcY1ncdoy2e9wQZzvNy7ODZCYJkK8kzmcAnAg==", "dependencies": { - "@docusaurus/logger": "2.1.0", - "@svgr/webpack": "^6.2.1", - "file-loader": "^6.2.0", - "fs-extra": "^10.1.0", - "github-slugger": "^1.4.0", - "globby": "^11.1.0", - "gray-matter": "^4.0.3", - "js-yaml": "^4.1.0", - "lodash": "^4.17.21", - "micromatch": "^4.0.5", - "resolve-pathname": "^3.0.0", - "shelljs": "^0.8.5", - "tslib": "^2.4.0", - "url-loader": "^4.1.1", - "webpack": "^5.73.0" - }, - "engines": { - "node": ">=16.14" - }, - "peerDependencies": { - "@docusaurus/types": "*" - }, - "peerDependenciesMeta": { - "@docusaurus/types": { - "optional": true - } + "@hapi/hoek": "^9.0.0" } }, - "node_modules/@docusaurus/theme-search-algolia/node_modules/@docusaurus/utils-common": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/@docusaurus/utils-common/-/utils-common-2.1.0.tgz", - "integrity": "sha512-F2vgmt4yRFgRQR2vyEFGTWeyAdmgKbtmu3sjHObF0tjjx/pN0Iw/c6eCopaH34E6tc9nO0nvp01pwW+/86d1fg==", + "node_modules/@jest/schemas": { + "version": "29.6.0", + "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.0.tgz", + "integrity": "sha512-rxLjXyJBTL4LQeJW3aKo0M/+GkCOXsO+8i9Iu7eDb6KwtP65ayoDsitrdPBtujxQ88k4wI2FNYfa6TOGwSn6cQ==", "dependencies": { - "tslib": "^2.4.0" + "@sinclair/typebox": "^0.27.8" }, "engines": { - "node": ">=16.14" - }, - "peerDependencies": { - "@docusaurus/types": "*" - }, - "peerDependenciesMeta": { - "@docusaurus/types": { - "optional": true - } + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, - "node_modules/@docusaurus/theme-search-algolia/node_modules/@docusaurus/utils-validation": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/@docusaurus/utils-validation/-/utils-validation-2.1.0.tgz", - "integrity": "sha512-AMJzWYKL3b7FLltKtDXNLO9Y649V2BXvrnRdnW2AA+PpBnYV78zKLSCz135cuWwRj1ajNtP4onbXdlnyvCijGQ==", + "node_modules/@jest/types": { + "version": "29.6.1", + "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.1.tgz", + "integrity": "sha512-tPKQNMPuXgvdOn2/Lg9HNfUvjYVGolt04Hp03f5hAk878uwOLikN+JzeLY0HcVgKgFl9Hs3EIqpu3WX27XNhnw==", "dependencies": { - "@docusaurus/logger": "2.1.0", - "@docusaurus/utils": "2.1.0", - "joi": "^17.6.0", - "js-yaml": "^4.1.0", - "tslib": "^2.4.0" + "@jest/schemas": "^29.6.0", + "@types/istanbul-lib-coverage": "^2.0.0", + "@types/istanbul-reports": "^3.0.0", + "@types/node": "*", + "@types/yargs": "^17.0.8", + "chalk": "^4.0.0" }, "engines": { - "node": ">=16.14" + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, - "node_modules/@docusaurus/theme-search-algolia/node_modules/ansi-styles": { + "node_modules/@jest/types/node_modules/ansi-styles": { "version": "4.3.0", "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", @@ -3131,15 +3124,7 @@ "url": "https://github.com/chalk/ansi-styles?sponsor=1" } }, - "node_modules/@docusaurus/theme-search-algolia/node_modules/babel-plugin-dynamic-import-node": { - "version": "2.3.3", - "resolved": "https://registry.npmjs.org/babel-plugin-dynamic-import-node/-/babel-plugin-dynamic-import-node-2.3.3.tgz", - "integrity": "sha512-jZVI+s9Zg3IqA/kdi0i6UDCybUI3aSBLnglhYbSSjKlV7yF1F/5LWv8MakQmvYpnbJDS6fcBL2KzHSxNCMtWSQ==", - "dependencies": { - "object.assign": "^4.1.0" - } - }, - "node_modules/@docusaurus/theme-search-algolia/node_modules/chalk": { + "node_modules/@jest/types/node_modules/chalk": { "version": "4.1.2", "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", @@ -3154,7 +3139,7 @@ "url": "https://github.com/chalk/chalk?sponsor=1" } }, - "node_modules/@docusaurus/theme-search-algolia/node_modules/color-convert": { + "node_modules/@jest/types/node_modules/color-convert": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", @@ -3165,104 +3150,12 @@ "node": ">=7.0.0" } }, - "node_modules/@docusaurus/theme-search-algolia/node_modules/color-name": { + "node_modules/@jest/types/node_modules/color-name": { "version": "1.1.4", "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" }, - "node_modules/@docusaurus/theme-search-algolia/node_modules/copy-webpack-plugin": { - "version": "11.0.0", - "resolved": "https://registry.npmjs.org/copy-webpack-plugin/-/copy-webpack-plugin-11.0.0.tgz", - "integrity": "sha512-fX2MWpamkW0hZxMEg0+mYnA40LTosOSa5TqZ9GYIBzyJa9C3QUaMPSE2xAi/buNr8u89SfD9wHSQVBzrRa/SOQ==", - "dependencies": { - "fast-glob": "^3.2.11", - "glob-parent": "^6.0.1", - "globby": "^13.1.1", - "normalize-path": "^3.0.0", - "schema-utils": "^4.0.0", - "serialize-javascript": "^6.0.0" - }, - "engines": { - "node": ">= 14.15.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - }, - "peerDependencies": { - "webpack": "^5.1.0" - } - }, - "node_modules/@docusaurus/theme-search-algolia/node_modules/copy-webpack-plugin/node_modules/globby": { - "version": "13.1.2", - "resolved": "https://registry.npmjs.org/globby/-/globby-13.1.2.tgz", - "integrity": "sha512-LKSDZXToac40u8Q1PQtZihbNdTYSNMuWe+K5l+oa6KgDzSvVrHXlJy40hUP522RjAIoNLJYBJi7ow+rbFpIhHQ==", - "dependencies": { - "dir-glob": "^3.0.1", - "fast-glob": "^3.2.11", - "ignore": "^5.2.0", - "merge2": "^1.4.1", - "slash": "^4.0.0" - }, - "engines": { - "node": "^12.20.0 || ^14.13.1 || >=16.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/@docusaurus/theme-search-algolia/node_modules/css-minimizer-webpack-plugin": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/css-minimizer-webpack-plugin/-/css-minimizer-webpack-plugin-4.1.0.tgz", - "integrity": "sha512-Zd+yz4nta4GXi3pMqF6skO8kjzuCUbr62z8SLMGZZtxWxTGTLopOiabPGNDEyjHCRhnhdA1EfHmqLa2Oekjtng==", - "dependencies": { - "cssnano": "^5.1.8", - "jest-worker": "^27.5.1", - "postcss": "^8.4.13", - "schema-utils": "^4.0.0", - "serialize-javascript": "^6.0.0", - "source-map": "^0.6.1" - }, - "engines": { - "node": ">= 14.15.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - }, - "peerDependencies": { - "webpack": "^5.0.0" - }, - "peerDependenciesMeta": { - "@parcel/css": { - "optional": true - }, - "clean-css": { - "optional": true - }, - "csso": { - "optional": true - }, - "esbuild": { - "optional": true - }, - "lightningcss": { - "optional": true - } - } - }, - "node_modules/@docusaurus/theme-search-algolia/node_modules/glob-parent": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", - "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", - "dependencies": { - "is-glob": "^4.0.3" - }, - "engines": { - "node": ">=10.13.0" - } - }, - "node_modules/@docusaurus/theme-search-algolia/node_modules/has-flag": { + "node_modules/@jest/types/node_modules/has-flag": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", @@ -3270,57 +3163,7 @@ "node": ">=8" } }, - "node_modules/@docusaurus/theme-search-algolia/node_modules/postcss-loader": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/postcss-loader/-/postcss-loader-7.0.1.tgz", - "integrity": "sha512-VRviFEyYlLjctSM93gAZtcJJ/iSkPZ79zWbN/1fSH+NisBByEiVLqpdVDrPLVSi8DX0oJo12kL/GppTBdKVXiQ==", - "dependencies": { - "cosmiconfig": "^7.0.0", - "klona": "^2.0.5", - "semver": "^7.3.7" - }, - "engines": { - "node": ">= 14.15.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - }, - "peerDependencies": { - "postcss": "^7.0.0 || ^8.0.1", - "webpack": "^5.0.0" - } - }, - "node_modules/@docusaurus/theme-search-algolia/node_modules/schema-utils": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.0.0.tgz", - "integrity": "sha512-1edyXKgh6XnJsJSQ8mKWXnN/BVaIbFMLpouRUrXgVq7WYne5kw3MW7UPhO44uRXQSIpTSXoJbmrR2X0w9kUTyg==", - "dependencies": { - "@types/json-schema": "^7.0.9", - "ajv": "^8.8.0", - "ajv-formats": "^2.1.1", - "ajv-keywords": "^5.0.0" - }, - "engines": { - "node": ">= 12.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - } - }, - "node_modules/@docusaurus/theme-search-algolia/node_modules/slash": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/slash/-/slash-4.0.0.tgz", - "integrity": "sha512-3dOsAHXXUkQTpOYcoAxLIorMTp4gIQr5IW3iVb7A7lFIp0VHhnynm9izx6TssdrIcVIESAlVjtnO2K8bg+Coew==", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/@docusaurus/theme-search-algolia/node_modules/supports-color": { + "node_modules/@jest/types/node_modules/supports-color": { "version": "7.2.0", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", @@ -3331,111 +3174,6 @@ "node": ">=8" } }, - "node_modules/@docusaurus/theme-search-algolia/node_modules/unified": { - "version": "9.2.2", - "resolved": "https://registry.npmjs.org/unified/-/unified-9.2.2.tgz", - "integrity": "sha512-Sg7j110mtefBD+qunSLO1lqOEKdrwBFBrR6Qd8f4uwkhWNlbkaqwHse6e7QvD3AP/MNoJdEDLaf8OxYyoWgorQ==", - "dependencies": { - "bail": "^1.0.0", - "extend": "^3.0.0", - "is-buffer": "^2.0.0", - "is-plain-obj": "^2.0.0", - "trough": "^1.0.0", - "vfile": "^4.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/@docusaurus/theme-translations": { - "version": "2.0.0-beta.17", - "resolved": "https://registry.npmjs.org/@docusaurus/theme-translations/-/theme-translations-2.0.0-beta.17.tgz", - "integrity": "sha512-oxCX6khjZH3lgdRCL0DH06KkUM/kDr9+lzB35+vY8rpFeQruVgRdi8ekPqG3+Wr0U/N+LMhcYE5BmCb6D0Fv2A==", - "dependencies": { - "fs-extra": "^10.0.1", - "tslib": "^2.3.1" - }, - "engines": { - "node": ">=14" - } - }, - "node_modules/@docusaurus/types": { - "version": "2.0.0-beta.17", - "resolved": "https://registry.npmjs.org/@docusaurus/types/-/types-2.0.0-beta.17.tgz", - "integrity": "sha512-4o7TXu5sKlQpybfFFtsGUElBXwSpiXKsQyyWaRKj7DRBkvMtkDX6ITZNnZO9+EHfLbP/cfrokB8C/oO7mCQ5BQ==", - "dependencies": { - "commander": "^5.1.0", - "joi": "^17.6.0", - "querystring": "0.2.1", - "utility-types": "^3.10.0", - "webpack": "^5.69.1", - "webpack-merge": "^5.8.0" - } - }, - "node_modules/@docusaurus/utils": { - "version": "2.0.0-beta.17", - "resolved": "https://registry.npmjs.org/@docusaurus/utils/-/utils-2.0.0-beta.17.tgz", - "integrity": "sha512-yRKGdzSc5v6M/6GyQ4omkrAHCleevwKYiIrufCJgRbOtkhYE574d8mIjjirOuA/emcyLxjh+TLtqAA5TwhIryA==", - "dependencies": { - "@docusaurus/logger": "2.0.0-beta.17", - "@svgr/webpack": "^6.0.0", - "file-loader": "^6.2.0", - "fs-extra": "^10.0.1", - "github-slugger": "^1.4.0", - "globby": "^11.0.4", - "gray-matter": "^4.0.3", - "js-yaml": "^4.1.0", - "lodash": "^4.17.21", - "micromatch": "^4.0.4", - "resolve-pathname": "^3.0.0", - "shelljs": "^0.8.5", - "tslib": "^2.3.1", - "url-loader": "^4.1.1", - "webpack": "^5.69.1" - }, - "engines": { - "node": ">=14" - } - }, - "node_modules/@docusaurus/utils-common": { - "version": "2.0.0-beta.17", - "resolved": "https://registry.npmjs.org/@docusaurus/utils-common/-/utils-common-2.0.0-beta.17.tgz", - "integrity": "sha512-90WCVdj6zYzs7neEIS594qfLO78cUL6EVK1CsRHJgVkkGjcYlCQ1NwkyO7bOb+nIAwdJrPJRc2FBSpuEGxPD3w==", - "dependencies": { - "tslib": "^2.3.1" - }, - "engines": { - "node": ">=14" - } - }, - "node_modules/@docusaurus/utils-validation": { - "version": "2.0.0-beta.17", - "resolved": "https://registry.npmjs.org/@docusaurus/utils-validation/-/utils-validation-2.0.0-beta.17.tgz", - "integrity": "sha512-5UjayUP16fDjgd52eSEhL7SlN9x60pIhyS+K7kt7RmpSLy42+4/bSr2pns2VlATmuaoNOO6iIFdB2jgSYJ6SGA==", - "dependencies": { - "@docusaurus/logger": "2.0.0-beta.17", - "@docusaurus/utils": "2.0.0-beta.17", - "joi": "^17.6.0", - "tslib": "^2.3.1" - }, - "engines": { - "node": ">=14" - } - }, - "node_modules/@hapi/hoek": { - "version": "9.3.0", - "resolved": "https://registry.npmjs.org/@hapi/hoek/-/hoek-9.3.0.tgz", - "integrity": "sha512-/c6rf4UJlmHlC9b5BaNvzAcFv7HZ2QHaV0D4/HNlBdvFnvQq8RI4kYdhyPCl7Xj+oWvTWQ8ujhqS53LIgAe6KQ==" - }, - "node_modules/@hapi/topo": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/@hapi/topo/-/topo-5.1.0.tgz", - "integrity": "sha512-foQZKJig7Ob0BMAYBfcJk8d77QtOe7Wo4ox7ff1lQYoNNAb6jwcY1ncdoy2e9wQZzvNy7ODZCYJkK8kzmcAnAg==", - "dependencies": { - "@hapi/hoek": "^9.0.0" - } - }, "node_modules/@jridgewell/gen-mapping": { "version": "0.1.1", "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.1.1.tgz", @@ -3492,12 +3230,12 @@ "integrity": "sha512-XPSJHWmi394fuUuzDnGz1wiKqWfo1yXecHQMRf2l6hztTO+nPru658AyDngaBe7isIxEkRsPR3FZh+s7iVa4Uw==" }, "node_modules/@jridgewell/trace-mapping": { - "version": "0.3.15", - "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.15.tgz", - "integrity": "sha512-oWZNOULl+UbhsgB51uuZzglikfIKSUBO/M9W2OfEjn7cmqoAiCgmv9lyACTUacZwBz0ITnJ2NqjU8Tx0DHL88g==", + "version": "0.3.20", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.20.tgz", + "integrity": "sha512-R8LcPeWZol2zR8mmH3JeKQ6QRCFb7XgUhV9ZlGhHLGyg4wpPiPZNQOOWhFZhxKw8u//yTbNGI42Bx/3paXEQ+Q==", "dependencies": { - "@jridgewell/resolve-uri": "^3.0.3", - "@jridgewell/sourcemap-codec": "^1.4.10" + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" } }, "node_modules/@leichtgewicht/ip-codec": { @@ -3554,82 +3292,348 @@ "url": "https://opencollective.com/unified" } }, - "node_modules/@mdx-js/mdx/node_modules/@babel/core": { - "version": "7.12.9", - "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.12.9.tgz", - "integrity": "sha512-gTXYh3M5wb7FRXQy+FErKFAv90BnlOuNn1QkCK2lREoPAjrQCO49+HVSrFoe5uakFAF5eenS75KbO2vQiLrTMQ==", - "dependencies": { - "@babel/code-frame": "^7.10.4", - "@babel/generator": "^7.12.5", - "@babel/helper-module-transforms": "^7.12.1", - "@babel/helpers": "^7.12.5", - "@babel/parser": "^7.12.7", - "@babel/template": "^7.12.7", - "@babel/traverse": "^7.12.9", - "@babel/types": "^7.12.7", - "convert-source-map": "^1.7.0", - "debug": "^4.1.0", - "gensync": "^1.0.0-beta.1", - "json5": "^2.1.2", - "lodash": "^4.17.19", - "resolve": "^1.3.2", - "semver": "^5.4.1", - "source-map": "^0.5.0" - }, + "node_modules/@mdx-js/mdx/node_modules/@babel/core": { + "version": "7.12.9", + "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.12.9.tgz", + "integrity": "sha512-gTXYh3M5wb7FRXQy+FErKFAv90BnlOuNn1QkCK2lREoPAjrQCO49+HVSrFoe5uakFAF5eenS75KbO2vQiLrTMQ==", + "dependencies": { + "@babel/code-frame": "^7.10.4", + "@babel/generator": "^7.12.5", + "@babel/helper-module-transforms": "^7.12.1", + "@babel/helpers": "^7.12.5", + "@babel/parser": "^7.12.7", + "@babel/template": "^7.12.7", + "@babel/traverse": "^7.12.9", + "@babel/types": "^7.12.7", + "convert-source-map": "^1.7.0", + "debug": "^4.1.0", + "gensync": "^1.0.0-beta.1", + "json5": "^2.1.2", + "lodash": "^4.17.19", + "resolve": "^1.3.2", + "semver": "^5.4.1", + "source-map": "^0.5.0" + }, + "engines": { + "node": ">=6.9.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/babel" + } + }, + "node_modules/@mdx-js/mdx/node_modules/@babel/plugin-syntax-jsx": { + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.12.1.tgz", + "integrity": "sha512-1yRi7yAtB0ETgxdY9ti/p2TivUxJkTdhu/ZbF9MshVGqOx1TdB3b7xCXs49Fupgg50N45KcAsRP/ZqWjs9SRjg==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.10.4" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@mdx-js/mdx/node_modules/semver": { + "version": "5.7.2", + "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.2.tgz", + "integrity": "sha512-cBznnQ9KjJqU67B52RMC65CMarK2600WFnbkcaiwWq3xy/5haFJlshgnpjovMVJ+Hff49d8GEn0b87C5pDQ10g==", + "bin": { + "semver": "bin/semver" + } + }, + "node_modules/@mdx-js/mdx/node_modules/source-map": { + "version": "0.5.7", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz", + "integrity": "sha512-LbrmJOMUSdEVxIKvdcJzQC+nQhe8FUZQTXQy6+I75skNgn3OoQ0DZA8YnFa7gp8tqtL3KPf1kmo0R5DoApeSGQ==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/@mdx-js/mdx/node_modules/unified": { + "version": "9.2.0", + "resolved": "https://registry.npmjs.org/unified/-/unified-9.2.0.tgz", + "integrity": "sha512-vx2Z0vY+a3YoTj8+pttM3tiJHCwY5UFbYdiWrwBEbHmK8pvsPj2rtAX2BFfgXen8T39CJWblWRDT4L5WGXtDdg==", + "dependencies": { + "bail": "^1.0.0", + "extend": "^3.0.0", + "is-buffer": "^2.0.0", + "is-plain-obj": "^2.0.0", + "trough": "^1.0.0", + "vfile": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/@mdx-js/react": { + "version": "1.6.22", + "resolved": "https://registry.npmjs.org/@mdx-js/react/-/react-1.6.22.tgz", + "integrity": "sha512-TDoPum4SHdfPiGSAaRBw7ECyI8VaHpK8GJugbJIJuqyh6kzw9ZLJZW3HGL3NNrJGxcAixUvqROm+YuQOo5eXtg==", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + }, + "peerDependencies": { + "react": "^16.13.1 || ^17.0.0" + } + }, + "node_modules/@mdx-js/util": { + "version": "1.6.22", + "resolved": "https://registry.npmjs.org/@mdx-js/util/-/util-1.6.22.tgz", + "integrity": "sha512-H1rQc1ZOHANWBvPcW+JpGwr+juXSxM8Q8YCkm3GhZd8REu1fHR3z99CErO1p9pkcfcxZnMdIZdIsXkOHY0NilA==", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/@napi-rs/wasm-runtime": { + "version": "0.2.4", + "resolved": "https://registry.npmjs.org/@napi-rs/wasm-runtime/-/wasm-runtime-0.2.4.tgz", + "integrity": "sha512-9zESzOO5aDByvhIAsOy9TbpZ0Ur2AJbUI7UT73kcUTS2mxAMHOBaa1st/jAymNoCtvrit99kkzT1FZuXVcgfIQ==", + "optional": true, + "dependencies": { + "@emnapi/core": "^1.1.0", + "@emnapi/runtime": "^1.1.0", + "@tybys/wasm-util": "^0.9.0" + } + }, + "node_modules/@node-rs/jieba": { + "version": "1.10.3", + "resolved": "https://registry.npmjs.org/@node-rs/jieba/-/jieba-1.10.3.tgz", + "integrity": "sha512-SG0CWHmhIveH6upJURgymDKLertEPYbOc5NSFIpbZWW1W2MpqgumVteQO+5YBlkmpR6jMNDPWNQyQwkB6HoeNg==", + "engines": { + "node": ">= 10" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/Brooooooklyn" + }, + "optionalDependencies": { + "@node-rs/jieba-android-arm-eabi": "1.10.3", + "@node-rs/jieba-android-arm64": "1.10.3", + "@node-rs/jieba-darwin-arm64": "1.10.3", + "@node-rs/jieba-darwin-x64": "1.10.3", + "@node-rs/jieba-freebsd-x64": "1.10.3", + "@node-rs/jieba-linux-arm-gnueabihf": "1.10.3", + "@node-rs/jieba-linux-arm64-gnu": "1.10.3", + "@node-rs/jieba-linux-arm64-musl": "1.10.3", + "@node-rs/jieba-linux-x64-gnu": "1.10.3", + "@node-rs/jieba-linux-x64-musl": "1.10.3", + "@node-rs/jieba-wasm32-wasi": "1.10.3", + "@node-rs/jieba-win32-arm64-msvc": "1.10.3", + "@node-rs/jieba-win32-ia32-msvc": "1.10.3", + "@node-rs/jieba-win32-x64-msvc": "1.10.3" + } + }, + "node_modules/@node-rs/jieba-android-arm-eabi": { + "version": "1.10.3", + "resolved": "https://registry.npmjs.org/@node-rs/jieba-android-arm-eabi/-/jieba-android-arm-eabi-1.10.3.tgz", + "integrity": "sha512-fuqVtaYlUKZg3cqagYFxj1DSa7ZHKXLle4iGH2kbQWg7Kw6cf7aCYBHIUZuH5sliK10M/CWccZ+SGRUwcSGfbg==", + "cpu": [ + "arm" + ], + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@node-rs/jieba-android-arm64": { + "version": "1.10.3", + "resolved": "https://registry.npmjs.org/@node-rs/jieba-android-arm64/-/jieba-android-arm64-1.10.3.tgz", + "integrity": "sha512-iuZZZq5yD9lT+AgaXpFe19gtAsIecUODRLLaBFbavjgjLk5cumv38ytWjS36s/eqptwI15MQfysSYOlWtMEG5g==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@node-rs/jieba-darwin-arm64": { + "version": "1.10.3", + "resolved": "https://registry.npmjs.org/@node-rs/jieba-darwin-arm64/-/jieba-darwin-arm64-1.10.3.tgz", + "integrity": "sha512-dwPhkav1tEARskwPz91UUXL2NXy4h0lJYTuJzpGgwXxm552zBM2JJ41kjah1364j+EOq5At3NQvf5r5rH89phQ==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@node-rs/jieba-darwin-x64": { + "version": "1.10.3", + "resolved": "https://registry.npmjs.org/@node-rs/jieba-darwin-x64/-/jieba-darwin-x64-1.10.3.tgz", + "integrity": "sha512-kjxvV6G1baQo/2I3mELv5qGv4Q0rhd5srwXhypSxMWZFtSpNwCDsLcIOR5bvMBci6QVFfZOs6WD6DKiWVz0SlA==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@node-rs/jieba-freebsd-x64": { + "version": "1.10.3", + "resolved": "https://registry.npmjs.org/@node-rs/jieba-freebsd-x64/-/jieba-freebsd-x64-1.10.3.tgz", + "integrity": "sha512-QYTsn+zlWRil+MuBeLfTK5Md4GluOf2lHnFqjrOZW2oMgNOvxB3qoLV4TUf70S/E2XHeP6PUdjCKItX8C7GQPg==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@node-rs/jieba-linux-arm-gnueabihf": { + "version": "1.10.3", + "resolved": "https://registry.npmjs.org/@node-rs/jieba-linux-arm-gnueabihf/-/jieba-linux-arm-gnueabihf-1.10.3.tgz", + "integrity": "sha512-UFB43kDOvqmbRl99e3GPwaTuwJZaAvgLaMTvBkmxww4MpQH6G1k31RLzMW/S21uSQso2lj6W/Mm59gaJk2FiyA==", + "cpu": [ + "arm" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@node-rs/jieba-linux-arm64-gnu": { + "version": "1.10.3", + "resolved": "https://registry.npmjs.org/@node-rs/jieba-linux-arm64-gnu/-/jieba-linux-arm64-gnu-1.10.3.tgz", + "integrity": "sha512-bu++yWi10wZtnS5uLcwxzxKmHVT77NgQMK8JiQr1TWCl3Y1Th7CnEHQtxfVB489edDK8l644h1/4zSTe5fRnOQ==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@node-rs/jieba-linux-arm64-musl": { + "version": "1.10.3", + "resolved": "https://registry.npmjs.org/@node-rs/jieba-linux-arm64-musl/-/jieba-linux-arm64-musl-1.10.3.tgz", + "integrity": "sha512-pJh+SzrK1HaKakhdFM+ew9vXwpZqMxy9u0U7J4GT+3GvOwnAZ+KjeaHebIfgOz7ZHvp/T4YBNf8oWW4zwj3AJw==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@node-rs/jieba-linux-x64-gnu": { + "version": "1.10.3", + "resolved": "https://registry.npmjs.org/@node-rs/jieba-linux-x64-gnu/-/jieba-linux-x64-gnu-1.10.3.tgz", + "integrity": "sha512-GF5cfvu/0wXO2fVX/XV3WYH/xEGWzMBvfqLhGiA1OA1xHIufnA1T7uU3ZXkyoNi5Bzf6dmxnwtE4CJL0nvhwjQ==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@node-rs/jieba-linux-x64-musl": { + "version": "1.10.3", + "resolved": "https://registry.npmjs.org/@node-rs/jieba-linux-x64-musl/-/jieba-linux-x64-musl-1.10.3.tgz", + "integrity": "sha512-h45HMVU/hgzQ0saXNsK9fKlGdah1i1cXZULpB5vQRlRL2ZIaGp+ULtWTogS7vkoo2K8s2l4tqakWMg9eUjIJ2A==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "linux" + ], "engines": { - "node": ">=6.9.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/babel" + "node": ">= 10" } }, - "node_modules/@mdx-js/mdx/node_modules/@babel/plugin-syntax-jsx": { - "version": "7.12.1", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.12.1.tgz", - "integrity": "sha512-1yRi7yAtB0ETgxdY9ti/p2TivUxJkTdhu/ZbF9MshVGqOx1TdB3b7xCXs49Fupgg50N45KcAsRP/ZqWjs9SRjg==", + "node_modules/@node-rs/jieba-wasm32-wasi": { + "version": "1.10.3", + "resolved": "https://registry.npmjs.org/@node-rs/jieba-wasm32-wasi/-/jieba-wasm32-wasi-1.10.3.tgz", + "integrity": "sha512-vuoQ62vVoedNGcBmIi4UWdtNBOZG8B+vDYfjx3FD6rNg6g/RgwbVjYXbOVMOQwX06Ob9CfrutICXdUGHgoxzEQ==", + "cpu": [ + "wasm32" + ], + "optional": true, "dependencies": { - "@babel/helper-plugin-utils": "^7.10.4" + "@napi-rs/wasm-runtime": "^0.2.3" }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@mdx-js/mdx/node_modules/semver": { - "version": "5.7.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", - "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==", - "bin": { - "semver": "bin/semver" + "engines": { + "node": ">=14.0.0" } }, - "node_modules/@mdx-js/mdx/node_modules/source-map": { - "version": "0.5.7", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz", - "integrity": "sha512-LbrmJOMUSdEVxIKvdcJzQC+nQhe8FUZQTXQy6+I75skNgn3OoQ0DZA8YnFa7gp8tqtL3KPf1kmo0R5DoApeSGQ==", + "node_modules/@node-rs/jieba-win32-arm64-msvc": { + "version": "1.10.3", + "resolved": "https://registry.npmjs.org/@node-rs/jieba-win32-arm64-msvc/-/jieba-win32-arm64-msvc-1.10.3.tgz", + "integrity": "sha512-B8t4dh56TZnMLBoYWDkopf1ed37Ru/iU1qiIeBkbZWXGmNBChNZUOd//eaPOFjx8m9Sfc8bkj3FBRWt/kTAhmw==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "win32" + ], "engines": { - "node": ">=0.10.0" + "node": ">= 10" } }, - "node_modules/@mdx-js/react": { - "version": "1.6.22", - "resolved": "https://registry.npmjs.org/@mdx-js/react/-/react-1.6.22.tgz", - "integrity": "sha512-TDoPum4SHdfPiGSAaRBw7ECyI8VaHpK8GJugbJIJuqyh6kzw9ZLJZW3HGL3NNrJGxcAixUvqROm+YuQOo5eXtg==", - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - }, - "peerDependencies": { - "react": "^16.13.1 || ^17.0.0" + "node_modules/@node-rs/jieba-win32-ia32-msvc": { + "version": "1.10.3", + "resolved": "https://registry.npmjs.org/@node-rs/jieba-win32-ia32-msvc/-/jieba-win32-ia32-msvc-1.10.3.tgz", + "integrity": "sha512-SKuPGZJ5T+X4jOn1S8LklOSZ6HC7UBiw0hwi2z9uqX6WgElquLjGi/xfZ2gPqffeR/5K/PUu7aqYUUPL1XonVQ==", + "cpu": [ + "ia32" + ], + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 10" } }, - "node_modules/@mdx-js/util": { - "version": "1.6.22", - "resolved": "https://registry.npmjs.org/@mdx-js/util/-/util-1.6.22.tgz", - "integrity": "sha512-H1rQc1ZOHANWBvPcW+JpGwr+juXSxM8Q8YCkm3GhZd8REu1fHR3z99CErO1p9pkcfcxZnMdIZdIsXkOHY0NilA==", - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" + "node_modules/@node-rs/jieba-win32-x64-msvc": { + "version": "1.10.3", + "resolved": "https://registry.npmjs.org/@node-rs/jieba-win32-x64-msvc/-/jieba-win32-x64-msvc-1.10.3.tgz", + "integrity": "sha512-j9I4+a/tf2hsLu8Sr0NhcLBVNBBQctO2mzcjemMpRa1SlEeODyic9RIyP8Ljz3YTN6MYqKh1KA9iR1xvxjxYFg==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 10" } }, "node_modules/@nodelib/fs.scandir": { @@ -3678,15 +3682,20 @@ } }, "node_modules/@sideway/formula": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/@sideway/formula/-/formula-3.0.0.tgz", - "integrity": "sha512-vHe7wZ4NOXVfkoRb8T5otiENVlT7a3IAiw7H5M2+GO+9CDgcVUUsX1zalAztCmwyOr2RUTGJdgB+ZvSVqmdHmg==" + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@sideway/formula/-/formula-3.0.1.tgz", + "integrity": "sha512-/poHZJJVjx3L+zVD6g9KgHfYnb443oi7wLu/XKojDviHy6HOEOA6z1Trk5aR1dGcmPenJEgb2sK2I80LeS3MIg==" }, "node_modules/@sideway/pinpoint": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/@sideway/pinpoint/-/pinpoint-2.0.0.tgz", "integrity": "sha512-RNiOoTPkptFtSVzQevY/yWtZwf/RxyVnPy/OcA9HBM3MlGDnBEYL5B41H0MTn0Uec8Hi+2qUtTfG2WWZBmMejQ==" }, + "node_modules/@sinclair/typebox": { + "version": "0.27.8", + "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz", + "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==" + }, "node_modules/@sindresorhus/is": { "version": "0.14.0", "resolved": "https://registry.npmjs.org/@sindresorhus/is/-/is-0.14.0.tgz", @@ -3996,6 +4005,15 @@ "dev": true, "optional": true }, + "node_modules/@tybys/wasm-util": { + "version": "0.9.0", + "resolved": "https://registry.npmjs.org/@tybys/wasm-util/-/wasm-util-0.9.0.tgz", + "integrity": "sha512-6+7nlbMVX/PVDCwaIQ8nTOPveOcFLSt8GcXdx8hD0bt39uWxYT88uXzqTd4fTvqta7oeUJqudepapKNt2DYJFw==", + "optional": true, + "dependencies": { + "tslib": "^2.4.0" + } + }, "node_modules/@types/body-parser": { "version": "1.19.2", "resolved": "https://registry.npmjs.org/@types/body-parser/-/body-parser-1.19.2.tgz", @@ -4075,11 +4093,11 @@ } }, "node_modules/@types/hast": { - "version": "2.3.4", - "resolved": "https://registry.npmjs.org/@types/hast/-/hast-2.3.4.tgz", - "integrity": "sha512-wLEm0QvaoawEDoTRwzTXp4b4jpwiJDvR5KMnFnVodm3scufTlBOWRD6N1OBf9TZMhjlNsSfcO5V+7AF4+Vy+9g==", + "version": "2.3.10", + "resolved": "https://registry.npmjs.org/@types/hast/-/hast-2.3.10.tgz", + "integrity": "sha512-McWspRw8xx8J9HurkVBfYj0xKoE25tOFlHGdx4MJ5xORQrMGZNqJhVQWaIbm6Oyla5kYOXtDiopzKRJzEOkwJw==", "dependencies": { - "@types/unist": "*" + "@types/unist": "^2" } }, "node_modules/@types/history": { @@ -4100,17 +4118,38 @@ "@types/node": "*" } }, + "node_modules/@types/istanbul-lib-coverage": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/@types/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.4.tgz", + "integrity": "sha512-z/QT1XN4K4KYuslS23k62yDIDLwLFkzxOuMplDtObz0+y7VqJCaO2o+SPwHCvLFZh7xazvvoor2tA/hPz9ee7g==" + }, + "node_modules/@types/istanbul-lib-report": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@types/istanbul-lib-report/-/istanbul-lib-report-3.0.0.tgz", + "integrity": "sha512-plGgXAPfVKFoYfa9NpYDAkseG+g6Jr294RqeqcqDixSbU34MZVJRi/P+7Y8GDpzkEwLaGZZOpKIEmeVZNtKsrg==", + "dependencies": { + "@types/istanbul-lib-coverage": "*" + } + }, + "node_modules/@types/istanbul-reports": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@types/istanbul-reports/-/istanbul-reports-3.0.1.tgz", + "integrity": "sha512-c3mAZEuK0lvBp8tmuL74XRKn1+y2dcwOUpH7x4WrF6gk1GIgiluDRgMYQtw2OFcBvAJWlt6ASU3tSqxp0Uu0Aw==", + "dependencies": { + "@types/istanbul-lib-report": "*" + } + }, "node_modules/@types/json-schema": { "version": "7.0.11", "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.11.tgz", "integrity": "sha512-wOuvG1SN4Us4rez+tylwwwCV1psiNVOkJeM3AUWUNWg/jDQY2+HE/444y5gc+jBmRqASOm2Oeh5c1axHobwRKQ==" }, "node_modules/@types/mdast": { - "version": "3.0.10", - "resolved": "https://registry.npmjs.org/@types/mdast/-/mdast-3.0.10.tgz", - "integrity": "sha512-W864tg/Osz1+9f4lrGTZpCSO5/z4608eUp19tbozkq2HJK6i3z1kT0H9tlADXuYIb1YYOBByU4Jsqkk75q48qA==", + "version": "3.0.15", + "resolved": "https://registry.npmjs.org/@types/mdast/-/mdast-3.0.15.tgz", + "integrity": "sha512-LnwD+mUEfxWMa1QpDraczIn6k0Ee3SMicuYSSzS6ZYl2gKS09EClnJYGd8Du6rfc5r/GZEk5o1mRb8TaTj03sQ==", "dependencies": { - "@types/unist": "*" + "@types/unist": "^2" } }, "node_modules/@types/mime": { @@ -4159,22 +4198,22 @@ } }, "node_modules/@types/react-router": { - "version": "5.1.19", - "resolved": "https://registry.npmjs.org/@types/react-router/-/react-router-5.1.19.tgz", - "integrity": "sha512-Fv/5kb2STAEMT3wHzdKQK2z8xKq38EDIGVrutYLmQVVLe+4orDFquU52hQrULnEHinMKv9FSA6lf9+uNT1ITtA==", + "version": "5.1.20", + "resolved": "https://registry.npmjs.org/@types/react-router/-/react-router-5.1.20.tgz", + "integrity": "sha512-jGjmu/ZqS7FjSH6owMcD5qpq19+1RS9DeVRqfl1FeBMxTDQAGwlMWOcs52NDoXaNKyG3d1cYQFMs9rCrb88o9Q==", "dependencies": { "@types/history": "^4.7.11", "@types/react": "*" } }, "node_modules/@types/react-router-config": { - "version": "5.0.6", - "resolved": "https://registry.npmjs.org/@types/react-router-config/-/react-router-config-5.0.6.tgz", - "integrity": "sha512-db1mx37a1EJDf1XeX8jJN7R3PZABmJQXR8r28yUjVMFSjkmnQo6X6pOEEmNl+Tp2gYQOGPdYbFIipBtdElZ3Yg==", + "version": "5.0.11", + "resolved": "https://registry.npmjs.org/@types/react-router-config/-/react-router-config-5.0.11.tgz", + "integrity": "sha512-WmSAg7WgqW7m4x8Mt4N6ZyKz0BubSj/2tVUMsAHp+Yd2AMwcSbeFq9WympT19p5heCFmF97R9eD5uUR/t4HEqw==", "dependencies": { "@types/history": "^4.7.11", "@types/react": "*", - "@types/react-router": "*" + "@types/react-router": "^5.1.0" } }, "node_modules/@types/react-router-dom": { @@ -4193,9 +4232,9 @@ "integrity": "sha512-wWKOClTTiizcZhXnPY4wikVAwmdYHp8q6DmC+EJUzAMsycb7HB32Kh9RN4+0gExjmPmZSAQjgURXIGATPegAvA==" }, "node_modules/@types/sax": { - "version": "1.2.4", - "resolved": "https://registry.npmjs.org/@types/sax/-/sax-1.2.4.tgz", - "integrity": "sha512-pSAff4IAxJjfAXUG6tFkO7dsSbTmf8CtUpfhhZ5VhkRpC4628tJhh3+V6H1E+/Gs9piSzYKT5yzHO5M4GG9jkw==", + "version": "1.2.7", + "resolved": "https://registry.npmjs.org/@types/sax/-/sax-1.2.7.tgz", + "integrity": "sha512-rO73L89PJxeYM3s3pPPjiPgVVcymqU490g0YO5n5By0k2Erzj6tay/4lr1CHAAU4JyOWd1rpQ8bCf6cZfHU96A==", "dependencies": { "@types/node": "*" } @@ -4243,6 +4282,19 @@ "@types/node": "*" } }, + "node_modules/@types/yargs": { + "version": "17.0.24", + "resolved": "https://registry.npmjs.org/@types/yargs/-/yargs-17.0.24.tgz", + "integrity": "sha512-6i0aC7jV6QzQB8ne1joVZ0eSFIstHsCrobmOtghM11yGlH0j43FKL2UhWdELkyps0zuf7qVTUVCCR+tgSlyLLw==", + "dependencies": { + "@types/yargs-parser": "*" + } + }, + "node_modules/@types/yargs-parser": { + "version": "21.0.0", + "resolved": "https://registry.npmjs.org/@types/yargs-parser/-/yargs-parser-21.0.0.tgz", + "integrity": "sha512-iO9ZQHkZxHn4mSakYV0vFHAVDyEOIJQrV2uZ06HxEPcx+mt8swXoZHIbaaJ2crJYFfErySgktuTZ3BeLz+XmFA==" + }, "node_modules/@webassemblyjs/ast": { "version": "1.11.1", "resolved": "https://registry.npmjs.org/@webassemblyjs/ast/-/ast-1.11.1.tgz", @@ -4701,9 +4753,9 @@ } }, "node_modules/autoprefixer": { - "version": "10.4.10", - "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-10.4.10.tgz", - "integrity": "sha512-nMaiDARyp1e74c8IeAXkr+BmFKa8By4Zak7tyaNPF09Iu39WFpNXOWrVirmXjKr+5cOyERwvtbMOLYz6iBJYgQ==", + "version": "10.4.14", + "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-10.4.14.tgz", + "integrity": "sha512-FQzyfOsTlwVzjHxKEqRIAdJx9niO6VCBCoEwax/VLSoQF29ggECcPuBqUMZ+u8jCZOPSy8b8/8KnuFbp0SaFZQ==", "funding": [ { "type": "opencollective", @@ -4715,8 +4767,8 @@ } ], "dependencies": { - "browserslist": "^4.21.3", - "caniuse-lite": "^1.0.30001399", + "browserslist": "^4.21.5", + "caniuse-lite": "^1.0.30001464", "fraction.js": "^4.2.0", "normalize-range": "^0.1.2", "picocolors": "^1.0.0", @@ -4780,9 +4832,9 @@ "integrity": "sha512-O4KCvQA6lLiMU9l2eawBPMf1xPP8xPfB3iEQw150hOVTqj/rfXz0ThTb4HEzqQfs2Bmo5Ay8BzxfzVtBrr9dVg==" }, "node_modules/babel-plugin-dynamic-import-node": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/babel-plugin-dynamic-import-node/-/babel-plugin-dynamic-import-node-2.3.0.tgz", - "integrity": "sha512-o6qFkpeQEBxcqt0XYlWzAVxNCSCZdUgcR8IRlhD/8DylxjjO4foPcvTW0GGKa/cVt3rvxZ7o5ippJ+/0nvLhlQ==", + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/babel-plugin-dynamic-import-node/-/babel-plugin-dynamic-import-node-2.3.3.tgz", + "integrity": "sha512-jZVI+s9Zg3IqA/kdi0i6UDCybUI3aSBLnglhYbSSjKlV7yF1F/5LWv8MakQmvYpnbJDS6fcBL2KzHSxNCMtWSQ==", "dependencies": { "object.assign": "^4.1.0" } @@ -4818,9 +4870,9 @@ } }, "node_modules/babel-plugin-polyfill-corejs2/node_modules/semver": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", "bin": { "semver": "bin/semver.js" } @@ -4920,20 +4972,20 @@ } }, "node_modules/body-parser": { - "version": "1.20.0", - "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.0.tgz", - "integrity": "sha512-DfJ+q6EPcGKZD1QWUjSpqp+Q7bDQTsQIF4zfUAtZ6qk+H/3/QRhg9CEp39ss+/T2vw0+HaidC0ecJj/DRLIaKg==", + "version": "1.20.2", + "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.2.tgz", + "integrity": "sha512-ml9pReCu3M61kGlqoTm2umSXTlRTuGTx0bfYj+uIUKKYycG5NtSbeetV3faSU6R7ajOPw0g/J1PvK4qNy7s5bA==", "dependencies": { "bytes": "3.1.2", - "content-type": "~1.0.4", + "content-type": "~1.0.5", "debug": "2.6.9", "depd": "2.0.0", "destroy": "1.2.0", "http-errors": "2.0.0", "iconv-lite": "0.4.24", "on-finished": "2.4.1", - "qs": "6.10.3", - "raw-body": "2.5.1", + "qs": "6.11.0", + "raw-body": "2.5.2", "type-is": "~1.6.18", "unpipe": "1.0.0" }, @@ -5074,20 +5126,20 @@ } }, "node_modules/braces": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", - "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", + "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", "dependencies": { - "fill-range": "^7.0.1" + "fill-range": "^7.1.1" }, "engines": { "node": ">=8" } }, "node_modules/browserslist": { - "version": "4.21.3", - "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.21.3.tgz", - "integrity": "sha512-898rgRXLAyRkM1GryrrBHGkqA5hlpkV5MhtZwg9QXeiyLUYs2k00Un05aX5l2/yJIOObYKOpS2JNo8nJDE7fWQ==", + "version": "4.21.9", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.21.9.tgz", + "integrity": "sha512-M0MFoZzbUrRU4KNfCrDLnvyE7gub+peetoTid3TBIqtunaDJyXlwhakT+/VkvSXcfIzFfK/nkCs4nmyTmxdNSg==", "funding": [ { "type": "opencollective", @@ -5096,13 +5148,17 @@ { "type": "tidelift", "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" } ], "dependencies": { - "caniuse-lite": "^1.0.30001370", - "electron-to-chromium": "^1.4.202", - "node-releases": "^2.0.6", - "update-browserslist-db": "^1.0.5" + "caniuse-lite": "^1.0.30001503", + "electron-to-chromium": "^1.4.431", + "node-releases": "^2.0.12", + "update-browserslist-db": "^1.0.11" }, "bin": { "browserslist": "cli.js" @@ -5205,12 +5261,18 @@ } }, "node_modules/call-bind": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.2.tgz", - "integrity": "sha512-7O+FbCihrB5WGbFYesctwmTKae6rOiIzmz1icreWJ+0aA7LJfuqhEso2T9ncpcFtzMQtzXf2QGGueWJGTYsqrA==", + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.7.tgz", + "integrity": "sha512-GHTSNSYICQ7scH7sZ+M2rFopRoLh8t2bLSW6BbgrtLsahOIB5iyAVJf9GjWK3cYTDaMj4XdBpM1cA6pIS0Kv2w==", "dependencies": { - "function-bind": "^1.1.1", - "get-intrinsic": "^1.0.2" + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "set-function-length": "^1.2.1" + }, + "engines": { + "node": ">= 0.4" }, "funding": { "url": "https://github.com/sponsors/ljharb" @@ -5264,9 +5326,9 @@ } }, "node_modules/caniuse-lite": { - "version": "1.0.30001399", - "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001399.tgz", - "integrity": "sha512-4vQ90tMKS+FkvuVWS5/QY1+d805ODxZiKFzsU8o/RsVJz49ZSRR8EjykLJbqhzdPgadbX6wB538wOzle3JniRA==", + "version": "1.0.30001628", + "resolved": "https://registry.npmmirror.com/caniuse-lite/-/caniuse-lite-1.0.30001628.tgz", + "integrity": "sha512-S3BnR4Kh26TBxbi5t5kpbcUlLJb9lhtDXISDPwOfI+JoC+ik0QksvkZtUVyikw3hjnkgkMPSJ8oIM9yMm9vflA==", "funding": [ { "type": "opencollective", @@ -5275,6 +5337,10 @@ { "type": "tidelift", "url": "https://tidelift.com/funding/github/npm/caniuse-lite" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" } ] }, @@ -5459,9 +5525,9 @@ } }, "node_modules/cli-spinners": { - "version": "2.7.0", - "resolved": "https://registry.npmjs.org/cli-spinners/-/cli-spinners-2.7.0.tgz", - "integrity": "sha512-qu3pN8Y3qHNgE2AFweciB1IfMnmZ/fsNTEE+NOFjmGB2F/7rLhnhzppvpCnN4FovtP26k8lHyy9ptEbNwWFLzw==", + "version": "2.9.0", + "resolved": "https://registry.npmjs.org/cli-spinners/-/cli-spinners-2.9.0.tgz", + "integrity": "sha512-4/aL9X3Wh0yiMQlE+eeRhWP6vclO3QRtw1JHKIT0FFUs5FjpFmESqtMvYZ0+lbzBw900b95mS0hohy+qn2VK/g==", "dev": true, "engines": { "node": ">=6" @@ -5618,9 +5684,9 @@ } }, "node_modules/commitizen": { - "version": "4.2.5", - "resolved": "https://registry.npmjs.org/commitizen/-/commitizen-4.2.5.tgz", - "integrity": "sha512-9sXju8Qrz1B4Tw7kC5KhnvwYQN88qs2zbiB8oyMsnXZyJ24PPGiNM3nHr73d32dnE3i8VJEXddBFIbOgYSEXtQ==", + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/commitizen/-/commitizen-4.3.0.tgz", + "integrity": "sha512-H0iNtClNEhT0fotHvGV3E9tDejDeS04sN1veIebsKYGMuGscFaswRoYJKmT3eW85eIJAs0F28bG2+a/9wCOfPw==", "dev": true, "dependencies": { "cachedir": "2.3.0", @@ -5631,10 +5697,10 @@ "find-root": "1.1.0", "fs-extra": "9.1.0", "glob": "7.2.3", - "inquirer": "8.2.4", + "inquirer": "8.2.5", "is-utf8": "^0.2.1", "lodash": "4.17.21", - "minimist": "1.2.6", + "minimist": "1.2.7", "strip-bom": "4.0.0", "strip-json-comments": "3.1.1" }, @@ -5764,9 +5830,9 @@ } }, "node_modules/content-type": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.4.tgz", - "integrity": "sha512-hIP3EEPs8tB9AT1L+NUqtwOAps4mk2Zob89MWXMHjHWg9milF/j4osnnQLXBCBFBk/tvIG/tUc9mOUJiPBhPXA==", + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.5.tgz", + "integrity": "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==", "engines": { "node": ">= 0.6" } @@ -5786,9 +5852,9 @@ } }, "node_modules/cookie": { - "version": "0.5.0", - "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.5.0.tgz", - "integrity": "sha512-YZ3GUyn/o8gfKJlnlX7g7xq4gyO6OSuhGPKaaGssGB2qgDUS0gPgtTvoyZLTt9Ab6dC4hfc9dV5arkvc/OCmrw==", + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.6.0.tgz", + "integrity": "sha512-U71cyTamuh1CRNCfpGY6to28lxvNwPG4Guz/EVjgf3Jmzv0vlDp1atT9eS5dDjMYHucpHbWns6Lwf3BKz6svdw==", "engines": { "node": ">= 0.6" } @@ -5799,9 +5865,9 @@ "integrity": "sha512-QADzlaHc8icV8I7vbaJXJwod9HWYp8uCqf1xa4OfNu1T7JVxQIrUgOWtHdNDtPiywmFbiS12VjotIXLrKM3orQ==" }, "node_modules/copy-text-to-clipboard": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/copy-text-to-clipboard/-/copy-text-to-clipboard-3.0.1.tgz", - "integrity": "sha512-rvVsHrpFcL4F2P8ihsoLdFHmd404+CMg71S756oRSeQgqk51U3kicGdnvfkrxva0xXH92SjGS62B0XIJsbh+9Q==", + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/copy-text-to-clipboard/-/copy-text-to-clipboard-3.2.0.tgz", + "integrity": "sha512-RnJFp1XR/LOBDckxTib5Qjr/PMfkatD0MUCQgdpqS8MdKiNUzBjAQBEN6oUy+jW7LI93BBG3DtMB2KOOKpGs2Q==", "engines": { "node": ">=12" }, @@ -5810,19 +5876,19 @@ } }, "node_modules/copy-webpack-plugin": { - "version": "10.2.4", - "resolved": "https://registry.npmjs.org/copy-webpack-plugin/-/copy-webpack-plugin-10.2.4.tgz", - "integrity": "sha512-xFVltahqlsRcyyJqQbDY6EYTtyQZF9rf+JPjwHObLdPFMEISqkFkr7mFoVOC6BfYS/dNThyoQKvziugm+OnwBg==", + "version": "11.0.0", + "resolved": "https://registry.npmjs.org/copy-webpack-plugin/-/copy-webpack-plugin-11.0.0.tgz", + "integrity": "sha512-fX2MWpamkW0hZxMEg0+mYnA40LTosOSa5TqZ9GYIBzyJa9C3QUaMPSE2xAi/buNr8u89SfD9wHSQVBzrRa/SOQ==", "dependencies": { - "fast-glob": "^3.2.7", + "fast-glob": "^3.2.11", "glob-parent": "^6.0.1", - "globby": "^12.0.2", + "globby": "^13.1.1", "normalize-path": "^3.0.0", "schema-utils": "^4.0.0", "serialize-javascript": "^6.0.0" }, "engines": { - "node": ">= 12.20.0" + "node": ">= 14.15.0" }, "funding": { "type": "opencollective", @@ -5832,17 +5898,6 @@ "webpack": "^5.1.0" } }, - "node_modules/copy-webpack-plugin/node_modules/array-union": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/array-union/-/array-union-3.0.1.tgz", - "integrity": "sha512-1OvF9IbWwaeiM9VhzYXVQacMibxpXOMYVNIvMtKRyX9SImBXpKcFr8XvFDeEslCyuH/t6KRt7HEO94AlP8Iatw==", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, "node_modules/copy-webpack-plugin/node_modules/glob-parent": { "version": "6.0.2", "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", @@ -5855,14 +5910,13 @@ } }, "node_modules/copy-webpack-plugin/node_modules/globby": { - "version": "12.2.0", - "resolved": "https://registry.npmjs.org/globby/-/globby-12.2.0.tgz", - "integrity": "sha512-wiSuFQLZ+urS9x2gGPl1H5drc5twabmm4m2gTR27XDFyjUHJUNsS8o/2aKyIF6IoBaR630atdher0XJ5g6OMmA==", + "version": "13.2.2", + "resolved": "https://registry.npmjs.org/globby/-/globby-13.2.2.tgz", + "integrity": "sha512-Y1zNGV+pzQdh7H39l9zgB4PJqjRNqydvdYCDG4HFXM4XuvSaQQlEc91IU1yALL8gUTDomgBAfz3XJdmUS+oo0w==", "dependencies": { - "array-union": "^3.0.1", "dir-glob": "^3.0.1", - "fast-glob": "^3.2.7", - "ignore": "^5.1.9", + "fast-glob": "^3.3.0", + "ignore": "^5.2.4", "merge2": "^1.4.1", "slash": "^4.0.0" }, @@ -5874,14 +5928,14 @@ } }, "node_modules/copy-webpack-plugin/node_modules/schema-utils": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.0.0.tgz", - "integrity": "sha512-1edyXKgh6XnJsJSQ8mKWXnN/BVaIbFMLpouRUrXgVq7WYne5kw3MW7UPhO44uRXQSIpTSXoJbmrR2X0w9kUTyg==", + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.2.0.tgz", + "integrity": "sha512-L0jRsrPpjdckP3oPug3/VxNKt2trR8TcabrM6FOAAlvC/9Phcmm+cuAgTlxBqdBR1WJx7Naj9WHw+aOmheSVbw==", "dependencies": { "@types/json-schema": "^7.0.9", - "ajv": "^8.8.0", + "ajv": "^8.9.0", "ajv-formats": "^2.1.1", - "ajv-keywords": "^5.0.0" + "ajv-keywords": "^5.1.0" }, "engines": { "node": ">= 12.13.0" @@ -5979,11 +6033,11 @@ "optional": true }, "node_modules/cross-fetch": { - "version": "3.1.5", - "resolved": "https://registry.npmjs.org/cross-fetch/-/cross-fetch-3.1.5.tgz", - "integrity": "sha512-lvb1SBsI0Z7GDwmuid+mU3kWVBwTVUbe7S0H52yaaAdQOXq2YktTCZdlAcNKFzE6QtRz0snpw9bNiPeOIkkQvw==", + "version": "3.1.8", + "resolved": "https://registry.npmjs.org/cross-fetch/-/cross-fetch-3.1.8.tgz", + "integrity": "sha512-cvA+JwZoU0Xq+h6WkMvAUqPEYy92Obet6UdKLfW60qn99ftItKjB5T+BkyWOFWe2pUyfQ+IJHmpOTznqk1M6Kg==", "dependencies": { - "node-fetch": "2.6.7" + "node-fetch": "^2.6.12" } }, "node_modules/cross-spawn": { @@ -6044,19 +6098,19 @@ } }, "node_modules/css-minimizer-webpack-plugin": { - "version": "3.4.1", - "resolved": "https://registry.npmjs.org/css-minimizer-webpack-plugin/-/css-minimizer-webpack-plugin-3.4.1.tgz", - "integrity": "sha512-1u6D71zeIfgngN2XNRJefc/hY7Ybsxd74Jm4qngIXyUEk7fss3VUzuHxLAq/R8NAba4QU9OUSaMZlbpRc7bM4Q==", + "version": "4.2.2", + "resolved": "https://registry.npmjs.org/css-minimizer-webpack-plugin/-/css-minimizer-webpack-plugin-4.2.2.tgz", + "integrity": "sha512-s3Of/4jKfw1Hj9CxEO1E5oXhQAxlayuHO2y/ML+C6I9sQ7FdzfEV6QgMLN3vI+qFsjJGIAFLKtQK7t8BOXAIyA==", "dependencies": { - "cssnano": "^5.0.6", - "jest-worker": "^27.0.2", - "postcss": "^8.3.5", + "cssnano": "^5.1.8", + "jest-worker": "^29.1.2", + "postcss": "^8.4.17", "schema-utils": "^4.0.0", "serialize-javascript": "^6.0.0", "source-map": "^0.6.1" }, "engines": { - "node": ">= 12.13.0" + "node": ">= 14.15.0" }, "funding": { "type": "opencollective", @@ -6069,6 +6123,9 @@ "@parcel/css": { "optional": true }, + "@swc/css": { + "optional": true + }, "clean-css": { "optional": true }, @@ -6077,18 +6134,43 @@ }, "esbuild": { "optional": true + }, + "lightningcss": { + "optional": true } } }, - "node_modules/css-minimizer-webpack-plugin/node_modules/schema-utils": { + "node_modules/css-minimizer-webpack-plugin/node_modules/has-flag": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.0.0.tgz", - "integrity": "sha512-1edyXKgh6XnJsJSQ8mKWXnN/BVaIbFMLpouRUrXgVq7WYne5kw3MW7UPhO44uRXQSIpTSXoJbmrR2X0w9kUTyg==", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "engines": { + "node": ">=8" + } + }, + "node_modules/css-minimizer-webpack-plugin/node_modules/jest-worker": { + "version": "29.6.1", + "resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-29.6.1.tgz", + "integrity": "sha512-U+Wrbca7S8ZAxAe9L6nb6g8kPdia5hj32Puu5iOqBCMTMWFHXuK6dOV2IFrpedbTV8fjMFLdWNttQTBL6u2MRA==", + "dependencies": { + "@types/node": "*", + "jest-util": "^29.6.1", + "merge-stream": "^2.0.0", + "supports-color": "^8.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/css-minimizer-webpack-plugin/node_modules/schema-utils": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.2.0.tgz", + "integrity": "sha512-L0jRsrPpjdckP3oPug3/VxNKt2trR8TcabrM6FOAAlvC/9Phcmm+cuAgTlxBqdBR1WJx7Naj9WHw+aOmheSVbw==", "dependencies": { "@types/json-schema": "^7.0.9", - "ajv": "^8.8.0", + "ajv": "^8.9.0", "ajv-formats": "^2.1.1", - "ajv-keywords": "^5.0.0" + "ajv-keywords": "^5.1.0" }, "engines": { "node": ">= 12.13.0" @@ -6098,6 +6180,20 @@ "url": "https://opencollective.com/webpack" } }, + "node_modules/css-minimizer-webpack-plugin/node_modules/supports-color": { + "version": "8.1.1", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", + "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/supports-color?sponsor=1" + } + }, "node_modules/css-select": { "version": "5.1.0", "resolved": "https://registry.npmjs.org/css-select/-/css-select-5.1.0.tgz", @@ -6168,12 +6264,12 @@ } }, "node_modules/cssnano-preset-advanced": { - "version": "5.3.8", - "resolved": "https://registry.npmjs.org/cssnano-preset-advanced/-/cssnano-preset-advanced-5.3.8.tgz", - "integrity": "sha512-xUlLLnEB1LjpEik+zgRNlk8Y/koBPPtONZjp7JKbXigeAmCrFvq9H0pXW5jJV45bQWAlmJ0sKy+IMr0XxLYQZg==", + "version": "5.3.10", + "resolved": "https://registry.npmjs.org/cssnano-preset-advanced/-/cssnano-preset-advanced-5.3.10.tgz", + "integrity": "sha512-fnYJyCS9jgMU+cmHO1rPSPf9axbQyD7iUhLO5Df6O4G+fKIOMps+ZbU0PdGFejFBBZ3Pftf18fn1eG7MAPUSWQ==", "dependencies": { - "autoprefixer": "^10.3.7", - "cssnano-preset-default": "^5.2.12", + "autoprefixer": "^10.4.12", + "cssnano-preset-default": "^5.2.14", "postcss-discard-unused": "^5.1.0", "postcss-merge-idents": "^5.1.1", "postcss-reduce-idents": "^5.2.0", @@ -6187,24 +6283,24 @@ } }, "node_modules/cssnano-preset-default": { - "version": "5.2.12", - "resolved": "https://registry.npmjs.org/cssnano-preset-default/-/cssnano-preset-default-5.2.12.tgz", - "integrity": "sha512-OyCBTZi+PXgylz9HAA5kHyoYhfGcYdwFmyaJzWnzxuGRtnMw/kR6ilW9XzlzlRAtB6PLT/r+prYgkef7hngFew==", + "version": "5.2.14", + "resolved": "https://registry.npmjs.org/cssnano-preset-default/-/cssnano-preset-default-5.2.14.tgz", + "integrity": "sha512-t0SFesj/ZV2OTylqQVOrFgEh5uanxbO6ZAdeCrNsUQ6fVuXwYTxJPNAGvGTxHbD68ldIJNec7PyYZDBrfDQ+6A==", "dependencies": { - "css-declaration-sorter": "^6.3.0", + "css-declaration-sorter": "^6.3.1", "cssnano-utils": "^3.1.0", "postcss-calc": "^8.2.3", - "postcss-colormin": "^5.3.0", - "postcss-convert-values": "^5.1.2", + "postcss-colormin": "^5.3.1", + "postcss-convert-values": "^5.1.3", "postcss-discard-comments": "^5.1.2", "postcss-discard-duplicates": "^5.1.0", "postcss-discard-empty": "^5.1.1", "postcss-discard-overridden": "^5.1.0", - "postcss-merge-longhand": "^5.1.6", - "postcss-merge-rules": "^5.1.2", + "postcss-merge-longhand": "^5.1.7", + "postcss-merge-rules": "^5.1.4", "postcss-minify-font-values": "^5.1.0", "postcss-minify-gradients": "^5.1.1", - "postcss-minify-params": "^5.1.3", + "postcss-minify-params": "^5.1.4", "postcss-minify-selectors": "^5.2.1", "postcss-normalize-charset": "^5.1.0", "postcss-normalize-display-values": "^5.1.0", @@ -6212,11 +6308,11 @@ "postcss-normalize-repeat-style": "^5.1.1", "postcss-normalize-string": "^5.1.0", "postcss-normalize-timing-functions": "^5.1.0", - "postcss-normalize-unicode": "^5.1.0", + "postcss-normalize-unicode": "^5.1.1", "postcss-normalize-url": "^5.1.0", "postcss-normalize-whitespace": "^5.1.1", "postcss-ordered-values": "^5.1.3", - "postcss-reduce-initial": "^5.1.0", + "postcss-reduce-initial": "^5.1.2", "postcss-reduce-transforms": "^5.1.0", "postcss-svgo": "^5.1.0", "postcss-unique-selectors": "^5.1.1" @@ -6336,12 +6432,15 @@ } }, "node_modules/defaults": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/defaults/-/defaults-1.0.3.tgz", - "integrity": "sha512-s82itHOnYrN0Ib8r+z7laQz3sdE+4FP3d9Q7VLO7U+KRT+CR0GsWuyHxzdAY82I7cXv0G/twrqomTJLOssO5HA==", + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/defaults/-/defaults-1.0.4.tgz", + "integrity": "sha512-eFuaLoy/Rxalv2kr+lqMlUnrDWV+3j4pljOIJgLIhI058IQfWJ7vXhyEIHu+HtC738klGALYxOKDO0bQP3tg8A==", "dev": true, "dependencies": { "clone": "^1.0.2" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } }, "node_modules/defer-to-connect": { @@ -6349,6 +6448,22 @@ "resolved": "https://registry.npmjs.org/defer-to-connect/-/defer-to-connect-1.1.3.tgz", "integrity": "sha512-0ISdNousHvZT2EiFlZeZAHBUvSxmKswVCEf8hW7KWgG4a8MVEu/3Vb6uWYozkjylyCxe0JBIiRB1jV45S70WVQ==" }, + "node_modules/define-data-property": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.4.tgz", + "integrity": "sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A==", + "dependencies": { + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "gopd": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/define-lazy-prop": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/define-lazy-prop/-/define-lazy-prop-2.0.0.tgz", @@ -6661,9 +6776,9 @@ "integrity": "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==" }, "node_modules/electron-to-chromium": { - "version": "1.4.249", - "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.249.tgz", - "integrity": "sha512-GMCxR3p2HQvIw47A599crTKYZprqihoBL4lDSAUmr7IYekXFK5t/WgEBrGJDCa2HWIZFQEkGuMqPCi05ceYqPQ==" + "version": "1.4.459", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.459.tgz", + "integrity": "sha512-XXRS5NFv8nCrBL74Rm3qhJjA2VCsRFx0OjHKBMPI0otij56aun8UWiKTDABmd5/7GTR021pA4wivs+Ri6XCElg==" }, "node_modules/emoji-regex": { "version": "9.2.2", @@ -6734,6 +6849,25 @@ "is-arrayish": "^0.2.1" } }, + "node_modules/es-define-property": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.0.tgz", + "integrity": "sha512-jxayLKShrEqqzJ0eumQbVhTYQM27CfT1T35+gCgDFoL82JLsXqTJ76zv6A0YLOgEnLUMvLzsDsGIrl8NFpT2gQ==", + "dependencies": { + "get-intrinsic": "^1.2.4" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-errors": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", + "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", + "engines": { + "node": ">= 0.4" + } + }, "node_modules/es-module-lexer": { "version": "0.9.3", "resolved": "https://registry.npmjs.org/es-module-lexer/-/es-module-lexer-0.9.3.tgz", @@ -6828,9 +6962,9 @@ } }, "node_modules/eta": { - "version": "1.12.3", - "resolved": "https://registry.npmjs.org/eta/-/eta-1.12.3.tgz", - "integrity": "sha512-qHixwbDLtekO/d51Yr4glcaUJCIjGVJyTzuqV4GPlgZo1YpgOKG+avQynErZIYrfM6JIJdtiG2Kox8tbb+DoGg==", + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/eta/-/eta-2.2.0.tgz", + "integrity": "sha512-UVQ72Rqjy/ZKQalzV5dCCJP80GrmPrMxh6NlNf+erV6ObL0ZFkhCstWRawS85z3smdr3d2wXPsZEY7rDPfGd2g==", "engines": { "node": ">=6.0.0" }, @@ -6917,16 +7051,16 @@ } }, "node_modules/express": { - "version": "4.18.1", - "resolved": "https://registry.npmjs.org/express/-/express-4.18.1.tgz", - "integrity": "sha512-zZBcOX9TfehHQhtupq57OF8lFZ3UZi08Y97dwFCkD8p9d/d2Y3M+ykKcwaMDEL+4qyUolgBDX6AblpR3fL212Q==", + "version": "4.19.2", + "resolved": "https://registry.npmjs.org/express/-/express-4.19.2.tgz", + "integrity": "sha512-5T6nhjsT+EOMzuck8JjBHARTHfMht0POzlA60WV2pMD3gyXw2LZnZ+ueGdNxG+0calOJcWKbpFcuzLZ91YWq9Q==", "dependencies": { "accepts": "~1.3.8", "array-flatten": "1.1.1", - "body-parser": "1.20.0", + "body-parser": "1.20.2", "content-disposition": "0.5.4", "content-type": "~1.0.4", - "cookie": "0.5.0", + "cookie": "0.6.0", "cookie-signature": "1.0.6", "debug": "2.6.9", "depd": "2.0.0", @@ -6942,7 +7076,7 @@ "parseurl": "~1.3.3", "path-to-regexp": "0.1.7", "proxy-addr": "~2.0.7", - "qs": "6.10.3", + "qs": "6.11.0", "range-parser": "~1.2.1", "safe-buffer": "5.2.1", "send": "0.18.0", @@ -7054,9 +7188,9 @@ "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==" }, "node_modules/fast-glob": { - "version": "3.2.12", - "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.2.12.tgz", - "integrity": "sha512-DVj4CQIYYow0BlaelwK1pHl5n5cRSJfM60UA0zK891sVInoPri2Ekj7+e1CT3/3qxXenpI+nBBmQAcJPJgaj4w==", + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.0.tgz", + "integrity": "sha512-ChDuvbOypPuNjO8yIDf36x7BlZX1smcUMTTcyoIjycexOxd6DFsKsg21qVBzEmr3G7fUKIRy2/psii+CIUt7FA==", "dependencies": { "@nodelib/fs.stat": "^2.0.2", "@nodelib/fs.walk": "^1.2.3", @@ -7109,9 +7243,9 @@ } }, "node_modules/fbjs": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/fbjs/-/fbjs-3.0.4.tgz", - "integrity": "sha512-ucV0tDODnGV3JCnnkmoszb5lf4bNpzjv80K41wd4k798Etq+UYD0y0TIfalLjZoKgjive6/adkRnszwapiDgBQ==", + "version": "3.0.5", + "resolved": "https://registry.npmjs.org/fbjs/-/fbjs-3.0.5.tgz", + "integrity": "sha512-ztsSx77JBtkuMrEypfhgc3cI0+0h+svqeie7xHbh1k/IKdcydnvadp/mUaGgjAOXQmQSxsqgaRhS3q9fy+1kxg==", "dependencies": { "cross-fetch": "^3.1.5", "fbjs-css-vars": "^1.0.0", @@ -7119,7 +7253,7 @@ "object-assign": "^4.1.0", "promise": "^7.1.1", "setimmediate": "^1.0.5", - "ua-parser-js": "^0.7.30" + "ua-parser-js": "^1.0.35" } }, "node_modules/fbjs-css-vars": { @@ -7226,9 +7360,9 @@ } }, "node_modules/fill-range": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", - "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", + "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", "dependencies": { "to-regex-range": "^5.0.1" }, @@ -7325,10 +7459,15 @@ "node": ">= 8" } }, + "node_modules/flat-color-icons": { + "version": "1.1.0", + "resolved": "https://registry.npmmirror.com/flat-color-icons/-/flat-color-icons-1.1.0.tgz", + "integrity": "sha512-duN0PycToLxktav7Asyw0hYCSxH8dvoCbeZHw0zSTw+KXg4rAH4CcLViDrGjg//nLfoZjiDV34kw8xmJJfSfqA==" + }, "node_modules/flux": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/flux/-/flux-4.0.3.tgz", - "integrity": "sha512-yKAbrp7JhZhj6uiT1FTuVMlIAT1J4jqEyBpFApi1kxpGZCvacMVc/t1pMQyotqHhAgvoE3bNvAykhCo2CLjnYw==", + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/flux/-/flux-4.0.4.tgz", + "integrity": "sha512-NCj3XlayA2UsapRpM7va6wU1+9rE5FIL7qoMcmxWHRzbp0yujihMBm9BBHZ1MDIk5h5o2Bl6eGiCe8rYELAmYw==", "dependencies": { "fbemitter": "^3.0.0", "fbjs": "^3.0.1" @@ -7338,9 +7477,9 @@ } }, "node_modules/follow-redirects": { - "version": "1.15.2", - "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.2.tgz", - "integrity": "sha512-VQLG33o04KaQ8uYi2tVNbdrWp1QWxNNea+nmIB4EVM28v0hmP17z7aG1+wAkNzVq4KeXTq3221ye5qTJP91JwA==", + "version": "1.15.6", + "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.6.tgz", + "integrity": "sha512-wWN62YITEaOpSK584EZXJafH1AGpO8RVgElfkuXbTOrPX4fIfOyEpW/CsiNd8JdYrAoOvafRTOEnvsO++qCqFA==", "funding": [ { "type": "individual", @@ -7616,9 +7755,12 @@ } }, "node_modules/function-bind": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz", - "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==" + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } }, "node_modules/gauge": { "version": "3.0.2", @@ -7666,13 +7808,18 @@ } }, "node_modules/get-intrinsic": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.1.3.tgz", - "integrity": "sha512-QJVz1Tj7MS099PevUG5jvnt9tSkXN8K14dxQlikJuPt4uD9hHAHjLyLBiLR5zELelBdD9QNRAXZzsJx0WaDL9A==", + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.4.tgz", + "integrity": "sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ==", "dependencies": { - "function-bind": "^1.1.1", - "has": "^1.0.3", - "has-symbols": "^1.0.3" + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "has-proto": "^1.0.1", + "has-symbols": "^1.0.3", + "hasown": "^2.0.0" + }, + "engines": { + "node": ">= 0.4" }, "funding": { "url": "https://github.com/sponsors/ljharb" @@ -7694,10 +7841,15 @@ "node": ">=6" } }, + "node_modules/github-buttons": { + "version": "2.28.0", + "resolved": "https://registry.npmmirror.com/github-buttons/-/github-buttons-2.28.0.tgz", + "integrity": "sha512-KsCbYiA+MiHO3ytzdGvGt/GNde4GfG9BrrLxxc+ut2snBF9IAjrn2F5mNgHHEXdG/CfFIHOMV8Uxy4LNhxZwUA==" + }, "node_modules/github-slugger": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/github-slugger/-/github-slugger-1.4.0.tgz", - "integrity": "sha512-w0dzqw/nt51xMVmlaV1+JRzN+oCa1KfcgGEWhxUG16wbdA+Xnt/yoFO8Z8x/V82ZcZ0wy6ln9QDup5avbhiDhQ==" + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/github-slugger/-/github-slugger-1.5.0.tgz", + "integrity": "sha512-wIh+gKBI9Nshz2o46B0B3f5k/W+WI9ZAv6y5Dn5WJ5SK1t0TnDimB4WE5rmTD05ZAIn8HALCZVmCsvj0w0v0lw==" }, "node_modules/glob": { "version": "7.2.3", @@ -7809,6 +7961,17 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/gopd": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.0.1.tgz", + "integrity": "sha512-d65bNlIadxvpb/A2abVdlqKqV563juRnZ1Wtk6s1sIR8uNsXR70xqIzVqxVf1eTqDunwT2MkczEeaezCKTZhwA==", + "dependencies": { + "get-intrinsic": "^1.1.3" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/got": { "version": "9.6.0", "resolved": "https://registry.npmjs.org/got/-/got-9.6.0.tgz", @@ -7908,11 +8071,22 @@ } }, "node_modules/has-property-descriptors": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.0.tgz", - "integrity": "sha512-62DVLZGoiEBDHQyqG4w9xCuZ7eJEwNmJRWw2VY84Oedb7WFcA27fiEVe8oUQx9hAUJ4ekurquucTGwsyO1XGdQ==", + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.2.tgz", + "integrity": "sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg==", "dependencies": { - "get-intrinsic": "^1.1.1" + "es-define-property": "^1.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-proto": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/has-proto/-/has-proto-1.0.3.tgz", + "integrity": "sha512-SJ1amZAJUiZS+PhsVLf5tGydlaVB8EdFpaSO4gmiUKUOxk8qzn5AIy4ZeJUmh22znIdk/uMAUT2pl3FxzVUH+Q==", + "engines": { + "node": ">= 0.4" }, "funding": { "url": "https://github.com/sponsors/ljharb" @@ -7942,6 +8116,17 @@ "node": ">=8" } }, + "node_modules/hasown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", + "dependencies": { + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, "node_modules/hast-to-hyperscript": { "version": "9.0.1", "resolved": "https://registry.npmjs.org/hast-to-hyperscript/-/hast-to-hyperscript-9.0.1.tgz", @@ -8132,6 +8317,11 @@ "safe-buffer": "~5.1.0" } }, + "node_modules/htm": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/htm/-/htm-3.1.1.tgz", + "integrity": "sha512-983Vyg8NwUE7JkZ6NmOqpCZ+sh1bKv2iYTlUkzlWmA5JD2acKoxd4KVxbMmxX/85mtfdnDmTFoNKcg5DGAvxNQ==" + }, "node_modules/html-entities": { "version": "2.3.3", "resolved": "https://registry.npmjs.org/html-entities/-/html-entities-2.3.3.tgz", @@ -8226,9 +8416,9 @@ } }, "node_modules/http-cache-semantics": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/http-cache-semantics/-/http-cache-semantics-4.1.0.tgz", - "integrity": "sha512-carPklcUh7ROWRK7Cv27RPtdhYhUsela/ue5/jKzjegVvXDqM2ILE9Q2BGn9JZJh1g87cp56su/FgQSzcWS8cQ==" + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/http-cache-semantics/-/http-cache-semantics-4.1.1.tgz", + "integrity": "sha512-er295DKPVsV82j5kw1Gjt+ADA/XYHsajl82cGNQG2eyoPkvgUhX+nDIyelzhIWbbsXP39EHcI6l5tYs2FYqYXQ==" }, "node_modules/http-deceiver": { "version": "1.2.7", @@ -8365,17 +8555,17 @@ ] }, "node_modules/ignore": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.2.0.tgz", - "integrity": "sha512-CmxgYGiEPCLhfLnpPp1MoRmifwEIOgjcHXxOBjv7mY96c+eWScsOP9c112ZyLdWHi0FxHjI+4uVhKYp/gcdRmQ==", + "version": "5.2.4", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.2.4.tgz", + "integrity": "sha512-MAb38BcSbH0eHNBxn7ql2NH/kX33OkB3lZ1BNdh7ENeRChHTYsTvWrMubiIAMNS2llXEEgZ1MUOBtXChP3kaFQ==", "engines": { "node": ">= 4" } }, "node_modules/image-size": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/image-size/-/image-size-1.0.2.tgz", - "integrity": "sha512-xfOoWjceHntRb3qFCrh5ZFORYH8XCdYpASltMhZ/Q0KZiOwjdE/Yl2QCiWdwD+lygV5bMCvauzgu5PxBX/Yerg==", + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/image-size/-/image-size-1.1.1.tgz", + "integrity": "sha512-541xKlUw6jr/6gGuk92F+mYM5zaFAc5ahphvkqvNe2bQ6gVBkd6bfrmVJ2t4KDAfikAYZyIqTnktX3i6/aQDrQ==", "dependencies": { "queue": "6.0.2" }, @@ -8383,7 +8573,7 @@ "image-size": "bin/image-size.js" }, "engines": { - "node": ">=14.0.0" + "node": ">=16.x" } }, "node_modules/immer": { @@ -8443,9 +8633,9 @@ } }, "node_modules/infima": { - "version": "0.2.0-alpha.37", - "resolved": "https://registry.npmjs.org/infima/-/infima-0.2.0-alpha.37.tgz", - "integrity": "sha512-4GX7Baw+/lwS4PPW/UJNY89tWSvYG1DL6baKVdpK6mC593iRgMssxNtORMTFArLPJ/A/lzsGhRmx+z6MaMxj0Q==", + "version": "0.2.0-alpha.43", + "resolved": "https://registry.npmjs.org/infima/-/infima-0.2.0-alpha.43.tgz", + "integrity": "sha512-2uw57LvUqW0rK/SWYnd/2rRfxNA5DDNOh33jxF7fy46VWoNhGxiUQyVZHbBMjQ33mQem0cjdDVwgWVAmlRfgyQ==", "engines": { "node": ">=12" } @@ -8475,9 +8665,9 @@ "integrity": "sha512-7NXolsK4CAS5+xvdj5OMMbI962hU/wvwoxk+LWR9Ek9bVtyuuYScDN6eS0rUm6TxApFpw7CX1o4uJzcd4AyD3Q==" }, "node_modules/inquirer": { - "version": "8.2.4", - "resolved": "https://registry.npmjs.org/inquirer/-/inquirer-8.2.4.tgz", - "integrity": "sha512-nn4F01dxU8VeKfq192IjLsxu0/OmMZ4Lg3xKAns148rCaXP6ntAoEkVYZThWjwON8AlzdZZi6oqnhNbxUG9hVg==", + "version": "8.2.5", + "resolved": "https://registry.npmjs.org/inquirer/-/inquirer-8.2.5.tgz", + "integrity": "sha512-QAgPDQMEgrDssk1XiwwHoOGYF9BAbUcc1+j+FhEvaOt8/cKRqyLn0U5qA6F74fGhTMGxf92pOvPBeh29jQJDTQ==", "dev": true, "dependencies": { "ansi-escapes": "^4.2.1", @@ -8999,6 +9189,100 @@ "node": ">=0.10.0" } }, + "node_modules/jest-util": { + "version": "29.6.1", + "resolved": "https://registry.npmjs.org/jest-util/-/jest-util-29.6.1.tgz", + "integrity": "sha512-NRFCcjc+/uO3ijUVyNOQJluf8PtGCe/W6cix36+M3cTFgiYqFOOW5MgN4JOOcvbUhcKTYVd1CvHz/LWi8d16Mg==", + "dependencies": { + "@jest/types": "^29.6.1", + "@types/node": "*", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "graceful-fs": "^4.2.9", + "picomatch": "^2.2.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-util/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/jest-util/node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/jest-util/node_modules/ci-info": { + "version": "3.8.0", + "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-3.8.0.tgz", + "integrity": "sha512-eXTggHWSooYhq49F2opQhuHWgzucfF2YgODK4e1566GQs5BIfP30B0oenwBJHfWxAs2fyPB1s7Mg949zLf61Yw==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/sibiraj-s" + } + ], + "engines": { + "node": ">=8" + } + }, + "node_modules/jest-util/node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/jest-util/node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" + }, + "node_modules/jest-util/node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "engines": { + "node": ">=8" + } + }, + "node_modules/jest-util/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, "node_modules/jest-worker": { "version": "27.5.1", "resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-27.5.1.tgz", @@ -9034,6 +9318,14 @@ "url": "https://github.com/chalk/supports-color?sponsor=1" } }, + "node_modules/jiti": { + "version": "1.19.1", + "resolved": "https://registry.npmjs.org/jiti/-/jiti-1.19.1.tgz", + "integrity": "sha512-oVhqoRDaBXf7sjkll95LHVS6Myyyb1zaunVwk4Z0+WPSW4gjS0pl01zYKHScTuyEhQsFxV5L4DR5r+YqSyqyyg==", + "bin": { + "jiti": "bin/jiti.js" + } + }, "node_modules/joi": { "version": "17.6.0", "resolved": "https://registry.npmjs.org/joi/-/joi-17.6.0.tgz", @@ -9089,9 +9381,9 @@ "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==" }, "node_modules/json5": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.1.tgz", - "integrity": "sha512-1hqLFMSrGHRHxav9q9gNjJ5EXznIxGVO09xQRrwplcS8qs28pZ8s8hupZAmqDwZUmVZ2Qb2jnyPOWcDH8m8dlA==", + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz", + "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==", "bin": { "json5": "lib/cli.js" }, @@ -9134,14 +9426,6 @@ "node": ">=6" } }, - "node_modules/klona": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/klona/-/klona-2.0.5.tgz", - "integrity": "sha512-pJiBpiXMbt7dkzXe8Ghj/u4FfXOOa98fPW+bihOJ4SjnoijweJrNThJfd3ifXpXhREjpoF2mZVH1GfS9LV3kHQ==", - "engines": { - "node": ">= 8" - } - }, "node_modules/latest-version": { "version": "5.1.0", "resolved": "https://registry.npmjs.org/latest-version/-/latest-version-5.1.0.tgz", @@ -9174,6 +9458,11 @@ "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz", "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==" }, + "node_modules/load-script": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/load-script/-/load-script-1.0.0.tgz", + "integrity": "sha512-kPEjMFtZvwL9TaZo0uZ2ml+Ye9HUMmPwbYRJ324qF9tqMejwykJ5ggTyvzmrbBeapCAbk98BSbTeovHEEP1uCA==" + }, "node_modules/loader-runner": { "version": "4.3.0", "resolved": "https://registry.npmjs.org/loader-runner/-/loader-runner-4.3.0.tgz", @@ -9183,9 +9472,9 @@ } }, "node_modules/loader-utils": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-2.0.2.tgz", - "integrity": "sha512-TM57VeHptv569d/GKh6TAYdzKblwDNiumOdkFnejjD0XwTH87K90w3O7AiJRqdQoXygvi1VQTJTLGhJl7WqA7A==", + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-2.0.4.tgz", + "integrity": "sha512-xXqpXoINfFhgua9xiqD8fPFHgkoq1mmmpE92WlDbm9rNRd/EbRb+Gqf908T2DMfuHjjJlksiK2RbHVOdD/MqSw==", "dependencies": { "big.js": "^5.2.2", "emojis-list": "^3.0.0", @@ -9375,6 +9664,11 @@ "node": ">=10" } }, + "node_modules/lunr-languages": { + "version": "1.14.0", + "resolved": "https://registry.npmjs.org/lunr-languages/-/lunr-languages-1.14.0.tgz", + "integrity": "sha512-hWUAb2KqM3L7J5bcrngszzISY4BxrXn/Xhbb9TTCJYEGqlR1nG67/M14sp09+PTIRklobrn57IAxcdcO/ZFyNA==" + }, "node_modules/make-dir": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-3.1.0.tgz", @@ -9390,9 +9684,9 @@ } }, "node_modules/make-dir/node_modules/semver": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", "bin": { "semver": "bin/semver.js" } @@ -9404,6 +9698,11 @@ "dev": true, "optional": true }, + "node_modules/mark.js": { + "version": "8.11.1", + "resolved": "https://registry.npmjs.org/mark.js/-/mark.js-8.11.1.tgz", + "integrity": "sha512-1I+1qpDt4idfgLQG+BNWmrqku+7/2bi5nLf4YwF8y8zXvmfiTBY3PV3ZibfrjBueCByROpuBjLLFCajqkgYoLQ==" + }, "node_modules/markdown-escapes": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/markdown-escapes/-/markdown-escapes-1.0.4.tgz", @@ -9494,6 +9793,11 @@ "node": ">= 4.0.0" } }, + "node_modules/memoize-one": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/memoize-one/-/memoize-one-5.2.1.tgz", + "integrity": "sha512-zYiwtZUcYyXKo/np96AGZAckk+FWWsUdJ3cHGGmld7+AhvcWmQyGCYUh1hc4Q/pkOhb65dQR/pqCyK0cOaHz4Q==" + }, "node_modules/merge": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/merge/-/merge-2.1.1.tgz", @@ -9588,6 +9892,7 @@ "version": "0.4.1", "resolved": "https://registry.npmjs.org/mini-create-react-context/-/mini-create-react-context-0.4.1.tgz", "integrity": "sha512-YWCYEmd5CQeHGSAKrYvXgmzzkrvssZcuuQDDeqkT+PziKGMgE+0MCCtcKbROzocGBG1meBLl2FotlRwf4gAzbQ==", + "deprecated": "Package no longer supported. Contact Support at https://www.npmjs.com/support for more info.", "dependencies": { "@babel/runtime": "^7.12.1", "tiny-warning": "^1.0.3" @@ -9650,9 +9955,12 @@ } }, "node_modules/minimist": { - "version": "1.2.6", - "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.6.tgz", - "integrity": "sha512-Jsjnk4bw3YJqYzbdyBiNsPWHPfO++UGG749Cxs6peCu5Xg4nrena6OVxOYxrQTqww0Jmwt+Ref8rggumkTLz9Q==" + "version": "1.2.7", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.7.tgz", + "integrity": "sha512-bzfL1YUZsP41gmu/qjrEk0Q6i2ix/cVeAhbCbqH9u3zYutS1cLg00qhrD0M2MVdCcx4Sc0UpP2eBWo9rotpq6g==", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } }, "node_modules/minipass": { "version": "3.3.4", @@ -9709,9 +10017,15 @@ "dev": true }, "node_modules/nanoid": { - "version": "3.3.4", - "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.4.tgz", - "integrity": "sha512-MqBkQh/OHTS2egovRtLk45wEyNXwF+cokD+1YPf9u5VfJiRdAiRwB2froX5Co9Rh20xs4siNPm8naNotSD6RBw==", + "version": "3.3.6", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.6.tgz", + "integrity": "sha512-BGcqMMJuToF7i1rt+2PWSNVnWIkGCU78jBG3RxO/bZlnZPK2Cmi2QaffxGO/2RvWi9sL+FAiRiXMgsyxQ1DIDA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], "bin": { "nanoid": "bin/nanoid.cjs" }, @@ -9755,9 +10069,9 @@ } }, "node_modules/node-fetch": { - "version": "2.6.7", - "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.6.7.tgz", - "integrity": "sha512-ZjMPFEfVx5j+y2yF35Kzx5sF7kDzxuDj6ziH4FFbOp87zKDZNx8yExJIb05OGF4Nlt9IHFIMBkRl41VdvcNdbQ==", + "version": "2.6.12", + "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.6.12.tgz", + "integrity": "sha512-C/fGU2E8ToujUivIO0H+tpQ6HWo4eEmchoPIoXtxCrVghxdKq+QOHqEZW7tuP3KlV3bC8FRMO5nMCC7Zm1VP6g==", "dependencies": { "whatwg-url": "^5.0.0" }, @@ -9782,9 +10096,9 @@ } }, "node_modules/node-releases": { - "version": "2.0.6", - "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.6.tgz", - "integrity": "sha512-PiVXnNuFm5+iYkLBNeq5211hvO38y63T0i2KKh2KnUs3RpzJ+JtODFjkD8yjLwnDkTYF1eKXheUwdssR+NRZdg==" + "version": "2.0.13", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.13.tgz", + "integrity": "sha512-uYr7J37ae/ORWdZeQ1xxMJe3NtdmqMC/JZK+geofDrkLUApKRHPd18/TxtBOJ4A0/+uUIliorNrfYV6s1b02eQ==" }, "node_modules/nodejieba": { "version": "2.6.0", @@ -9887,9 +10201,9 @@ } }, "node_modules/object-inspect": { - "version": "1.12.2", - "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.12.2.tgz", - "integrity": "sha512-z+cPxW0QGUp0mcqcsgQyLVRDoXFQbXOwBaqyF7VIgI4TWNQsDHrBpUQslRmIfAoYWdYzs6UlKJtB2XJpTaNSpQ==", + "version": "1.13.1", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.1.tgz", + "integrity": "sha512-5qoj1RUiKOMsCCNLV1CBiPYE10sziTsnmNxkAI/rZhiD63CF7IqdFGC/XzjWjpSgLf0LxXX3bDFIh0E18f6UhQ==", "funding": { "url": "https://github.com/sponsors/ljharb" } @@ -10173,9 +10487,9 @@ } }, "node_modules/package-json/node_modules/semver": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", "bin": { "semver": "bin/semver.js" } @@ -10419,9 +10733,9 @@ } }, "node_modules/postcss": { - "version": "8.4.16", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.16.tgz", - "integrity": "sha512-ipHE1XBvKzm5xI7hiHCZJCSugxvsdq2mPnsq5+UF+VHCjiBvtDrlxJfMBToWaP9D5XlgNmcFGqoHmUn0EYEaRQ==", + "version": "8.4.31", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.31.tgz", + "integrity": "sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ==", "funding": [ { "type": "opencollective", @@ -10430,10 +10744,14 @@ { "type": "tidelift", "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" } ], "dependencies": { - "nanoid": "^3.3.4", + "nanoid": "^3.3.6", "picocolors": "^1.0.0", "source-map-js": "^1.0.2" }, @@ -10454,11 +10772,11 @@ } }, "node_modules/postcss-colormin": { - "version": "5.3.0", - "resolved": "https://registry.npmjs.org/postcss-colormin/-/postcss-colormin-5.3.0.tgz", - "integrity": "sha512-WdDO4gOFG2Z8n4P8TWBpshnL3JpmNmJwdnfP2gbk2qBA8PWwOYcmjmI/t3CmMeL72a7Hkd+x/Mg9O2/0rD54Pg==", + "version": "5.3.1", + "resolved": "https://registry.npmjs.org/postcss-colormin/-/postcss-colormin-5.3.1.tgz", + "integrity": "sha512-UsWQG0AqTFQmpBegeLLc1+c3jIqBNB0zlDGRWR+dQ3pRKJL1oeMzyqmH3o2PIfn9MBdNrVPWhDbT769LxCTLJQ==", "dependencies": { - "browserslist": "^4.16.6", + "browserslist": "^4.21.4", "caniuse-api": "^3.0.0", "colord": "^2.9.1", "postcss-value-parser": "^4.2.0" @@ -10471,11 +10789,11 @@ } }, "node_modules/postcss-convert-values": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/postcss-convert-values/-/postcss-convert-values-5.1.2.tgz", - "integrity": "sha512-c6Hzc4GAv95B7suy4udszX9Zy4ETyMCgFPUDtWjdFTKH1SE9eFY/jEpHSwTH1QPuwxHpWslhckUQWbNRM4ho5g==", + "version": "5.1.3", + "resolved": "https://registry.npmjs.org/postcss-convert-values/-/postcss-convert-values-5.1.3.tgz", + "integrity": "sha512-82pC1xkJZtcJEfiLw6UXnXVXScgtBrjlO5CBmuDQc+dlb88ZYheFsjTn40+zBVi3DkfF7iezO0nJUPLcJK3pvA==", "dependencies": { - "browserslist": "^4.20.3", + "browserslist": "^4.21.4", "postcss-value-parser": "^4.2.0" }, "engines": { @@ -10544,16 +10862,16 @@ } }, "node_modules/postcss-loader": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/postcss-loader/-/postcss-loader-6.2.1.tgz", - "integrity": "sha512-WbbYpmAaKcux/P66bZ40bpWsBucjx/TTgVVzRZ9yUO8yQfVBlameJ0ZGVaPfH64hNSBh63a+ICP5nqOpBA0w+Q==", + "version": "7.3.3", + "resolved": "https://registry.npmjs.org/postcss-loader/-/postcss-loader-7.3.3.tgz", + "integrity": "sha512-YgO/yhtevGO/vJePCQmTxiaEwER94LABZN0ZMT4A0vsak9TpO+RvKRs7EmJ8peIlB9xfXCsS7M8LjqncsUZ5HA==", "dependencies": { - "cosmiconfig": "^7.0.0", - "klona": "^2.0.5", - "semver": "^7.3.5" + "cosmiconfig": "^8.2.0", + "jiti": "^1.18.2", + "semver": "^7.3.8" }, "engines": { - "node": ">= 12.13.0" + "node": ">= 14.15.0" }, "funding": { "type": "opencollective", @@ -10564,6 +10882,23 @@ "webpack": "^5.0.0" } }, + "node_modules/postcss-loader/node_modules/cosmiconfig": { + "version": "8.2.0", + "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-8.2.0.tgz", + "integrity": "sha512-3rTMnFJA1tCOPwRxtgF4wd7Ab2qvDbL8jX+3smjIbS4HlZBagTlpERbdN7iAbWlrfxE3M8c27kTwTawQ7st+OQ==", + "dependencies": { + "import-fresh": "^3.2.1", + "js-yaml": "^4.1.0", + "parse-json": "^5.0.0", + "path-type": "^4.0.0" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/d-fischer" + } + }, "node_modules/postcss-merge-idents": { "version": "5.1.1", "resolved": "https://registry.npmjs.org/postcss-merge-idents/-/postcss-merge-idents-5.1.1.tgz", @@ -10580,12 +10915,12 @@ } }, "node_modules/postcss-merge-longhand": { - "version": "5.1.6", - "resolved": "https://registry.npmjs.org/postcss-merge-longhand/-/postcss-merge-longhand-5.1.6.tgz", - "integrity": "sha512-6C/UGF/3T5OE2CEbOuX7iNO63dnvqhGZeUnKkDeifebY0XqkkvrctYSZurpNE902LDf2yKwwPFgotnfSoPhQiw==", + "version": "5.1.7", + "resolved": "https://registry.npmjs.org/postcss-merge-longhand/-/postcss-merge-longhand-5.1.7.tgz", + "integrity": "sha512-YCI9gZB+PLNskrK0BB3/2OzPnGhPkBEwmwhfYk1ilBHYVAZB7/tkTHFBAnCrvBBOmeYyMYw3DMjT55SyxMBzjQ==", "dependencies": { "postcss-value-parser": "^4.2.0", - "stylehacks": "^5.1.0" + "stylehacks": "^5.1.1" }, "engines": { "node": "^10 || ^12 || >=14.0" @@ -10595,11 +10930,11 @@ } }, "node_modules/postcss-merge-rules": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/postcss-merge-rules/-/postcss-merge-rules-5.1.2.tgz", - "integrity": "sha512-zKMUlnw+zYCWoPN6yhPjtcEdlJaMUZ0WyVcxTAmw3lkkN/NDMRkOkiuctQEoWAOvH7twaxUUdvBWl0d4+hifRQ==", + "version": "5.1.4", + "resolved": "https://registry.npmjs.org/postcss-merge-rules/-/postcss-merge-rules-5.1.4.tgz", + "integrity": "sha512-0R2IuYpgU93y9lhVbO/OylTtKMVcHb67zjWIfCiKR9rWL3GUk1677LAqD/BcHizukdZEjT8Ru3oHRoAYoJy44g==", "dependencies": { - "browserslist": "^4.16.6", + "browserslist": "^4.21.4", "caniuse-api": "^3.0.0", "cssnano-utils": "^3.1.0", "postcss-selector-parser": "^6.0.5" @@ -10642,11 +10977,11 @@ } }, "node_modules/postcss-minify-params": { - "version": "5.1.3", - "resolved": "https://registry.npmjs.org/postcss-minify-params/-/postcss-minify-params-5.1.3.tgz", - "integrity": "sha512-bkzpWcjykkqIujNL+EVEPOlLYi/eZ050oImVtHU7b4lFS82jPnsCb44gvC6pxaNt38Els3jWYDHTjHKf0koTgg==", + "version": "5.1.4", + "resolved": "https://registry.npmjs.org/postcss-minify-params/-/postcss-minify-params-5.1.4.tgz", + "integrity": "sha512-+mePA3MgdmVmv6g+30rn57USjOGSAyuxUmkfiWpzalZ8aiBkdPYjXWtHuwJGm1v5Ojy0Z0LaSYhHaLJQB0P8Jw==", "dependencies": { - "browserslist": "^4.16.6", + "browserslist": "^4.21.4", "cssnano-utils": "^3.1.0", "postcss-value-parser": "^4.2.0" }, @@ -10808,11 +11143,11 @@ } }, "node_modules/postcss-normalize-unicode": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-normalize-unicode/-/postcss-normalize-unicode-5.1.0.tgz", - "integrity": "sha512-J6M3MizAAZ2dOdSjy2caayJLQT8E8K9XjLce8AUQMwOrCvjCHv24aLC/Lps1R1ylOfol5VIDMaM/Lo9NGlk1SQ==", + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/postcss-normalize-unicode/-/postcss-normalize-unicode-5.1.1.tgz", + "integrity": "sha512-qnCL5jzkNUmKVhZoENp1mJiGNPcsJCs1aaRmURmeJGES23Z/ajaln+EPTD+rBeNkSryI+2WTdW+lwcVdOikrpA==", "dependencies": { - "browserslist": "^4.16.6", + "browserslist": "^4.21.4", "postcss-value-parser": "^4.2.0" }, "engines": { @@ -10881,11 +11216,11 @@ } }, "node_modules/postcss-reduce-initial": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-reduce-initial/-/postcss-reduce-initial-5.1.0.tgz", - "integrity": "sha512-5OgTUviz0aeH6MtBjHfbr57tml13PuedK/Ecg8szzd4XRMbYxH4572JFG067z+FqBIf6Zp/d+0581glkvvWMFw==", + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/postcss-reduce-initial/-/postcss-reduce-initial-5.1.2.tgz", + "integrity": "sha512-dE/y2XRaqAi6OvjzD22pjTUQ8eOfc6m/natGHgKFBK9DxFmIm69YmaRVQrGgFlEfc1HePIurY0TmDeROK05rIg==", "dependencies": { - "browserslist": "^4.16.6", + "browserslist": "^4.21.4", "caniuse-api": "^3.0.0" }, "engines": { @@ -10922,9 +11257,9 @@ } }, "node_modules/postcss-sort-media-queries": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/postcss-sort-media-queries/-/postcss-sort-media-queries-4.3.0.tgz", - "integrity": "sha512-jAl8gJM2DvuIJiI9sL1CuiHtKM4s5aEIomkU8G3LFvbP+p8i7Sz8VV63uieTgoewGqKbi+hxBTiOKJlB35upCg==", + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/postcss-sort-media-queries/-/postcss-sort-media-queries-4.4.1.tgz", + "integrity": "sha512-QDESFzDDGKgpiIh4GYXsSy6sek2yAwQx1JASl5AxBtU1Lq2JfKBljIPNdil989NcSKRQX1ToiaKphImtBuhXWw==", "dependencies": { "sort-css-media-queries": "2.1.0" }, @@ -10980,6 +11315,15 @@ "postcss": "^8.2.15" } }, + "node_modules/preact": { + "version": "10.22.0", + "resolved": "https://registry.npmjs.org/preact/-/preact-10.22.0.tgz", + "integrity": "sha512-RRurnSjJPj4rp5K6XoP45Ui33ncb7e4H7WiOHVpjbkvqvA3U+N8Z6Qbo0AE6leGYBV66n8EhEaFixvIu3SkxFw==", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/preact" + } + }, "node_modules/prepend-http": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/prepend-http/-/prepend-http-2.0.0.tgz", @@ -11119,9 +11463,9 @@ "integrity": "sha512-QFADYnsVoBMw1srW7OVKEYjG+MbIa49s54w1MA1EDY6r2r/sTcKKYqRX1f4GYvnXP7eN/Pe9HFcX+hwzmrXRHA==" }, "node_modules/qs": { - "version": "6.10.3", - "resolved": "https://registry.npmjs.org/qs/-/qs-6.10.3.tgz", - "integrity": "sha512-wr7M2E0OFRfIfJZjKGieI8lBKb7fRCH4Fv5KNPEs7gJ8jadvotdsS08PzOKR7opXhZ/Xkjtt3WF9g38drmyRqQ==", + "version": "6.11.0", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.11.0.tgz", + "integrity": "sha512-MvjoMCJwEarSbUYk5O+nmoSzSutSsTwF85zcHPQ9OrlFoZOYIjaqBAJIqIXjptyD5vThxGq52Xu/MaJzRkIk4Q==", "dependencies": { "side-channel": "^1.0.4" }, @@ -11132,15 +11476,6 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/querystring": { - "version": "0.2.1", - "resolved": "https://registry.npmjs.org/querystring/-/querystring-0.2.1.tgz", - "integrity": "sha512-wkvS7mL/JMugcup3/rMitHmd9ecIGd2lhFhK9N3UUQ450h66d1r3Y9nvXzQAW1Lq+wyx61k/1pfKS5KuKiyEbg==", - "deprecated": "The querystring API is considered Legacy. new code should use the URLSearchParams API instead.", - "engines": { - "node": ">=0.4.x" - } - }, "node_modules/queue": { "version": "6.0.2", "resolved": "https://registry.npmjs.org/queue/-/queue-6.0.2.tgz", @@ -11185,9 +11520,9 @@ } }, "node_modules/raw-body": { - "version": "2.5.1", - "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.1.tgz", - "integrity": "sha512-qqJBtEyVgS0ZmPGdCFPWJ3FreoqvG4MVQln/kCgF7Olq95IbOp0/BWyMwbdtn4VTvkM8Y7khCQ2Xgk/tcrCXig==", + "version": "2.5.2", + "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.2.tgz", + "integrity": "sha512-8zGqypfENjCIqGhgXToC8aB2r7YrBX+AQAfIPs/Mlk+BtPTztOvTS01NRW/3Eh60J+a48lt8qsCzirQ6loCVfA==", "dependencies": { "bytes": "3.1.2", "http-errors": "2.0.0", @@ -11365,9 +11700,9 @@ } }, "node_modules/react-dev-utils/node_modules/loader-utils": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-3.2.0.tgz", - "integrity": "sha512-HVl9ZqccQihZ7JM85dco1MvO9G+ONvxoGa9rkhzFsneGLKSUg1gJf9bWzhRhcvm2qChhWpebQhP44qxjKIUCaQ==", + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-3.2.1.tgz", + "integrity": "sha512-ZvFw1KWS3GVyYBYb7qkmRM/WwL2TQQBxgCK62rlvm4WpVQ23Nb4tYjApUlfjrEGvOs7KHEsmyUn75OHZrJMWPw==", "engines": { "node": ">= 12.13.0" } @@ -11448,6 +11783,17 @@ "resolved": "https://registry.npmjs.org/react-fast-compare/-/react-fast-compare-3.2.0.tgz", "integrity": "sha512-rtGImPZ0YyLrscKI9xTpV8psd6I8VAtjKCzQDlzyDvqJA8XOW78TXYQwNRNd8g8JZnDu8q9Fu/1v4HPAVwVdHA==" }, + "node_modules/react-github-btn": { + "version": "1.4.0", + "resolved": "https://registry.npmmirror.com/react-github-btn/-/react-github-btn-1.4.0.tgz", + "integrity": "sha512-lV4FYClAfjWnBfv0iNlJUGhamDgIq6TayD0kPZED6VzHWdpcHmPfsYOZ/CFwLfPv4Zp+F4m8QKTj0oy2HjiGXg==", + "dependencies": { + "github-buttons": "^2.22.0" + }, + "peerDependencies": { + "react": ">=16.3.0" + } + }, "node_modules/react-helmet-async": { "version": "1.3.0", "resolved": "https://registry.npmjs.org/react-helmet-async/-/react-helmet-async-1.3.0.tgz", @@ -11464,6 +11810,14 @@ "react-dom": "^16.6.0 || ^17.0.0 || ^18.0.0" } }, + "node_modules/react-icons": { + "version": "4.9.0", + "resolved": "https://registry.npmmirror.com/react-icons/-/react-icons-4.9.0.tgz", + "integrity": "sha512-ijUnFr//ycebOqujtqtV9PFS7JjhWg0QU6ykURVHuL4cbofvRCf3f6GMn9+fBktEFQOIVZnuAYLZdiyadRQRFg==", + "peerDependencies": { + "react": "*" + } + }, "node_modules/react-is": { "version": "16.13.1", "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz", @@ -11490,13 +11844,12 @@ "integrity": "sha512-fBASbA6LnOU9dOU2eW7aQ8xmYBSXUIWr+UmF9b1efZBazGNO+rcXT/icdKnYm2pTwcRylVUYwW7H1PHfLekVzA==" }, "node_modules/react-loadable": { - "name": "@docusaurus/react-loadable", - "version": "5.5.2", - "resolved": "https://registry.npmjs.org/@docusaurus/react-loadable/-/react-loadable-5.5.2.tgz", - "integrity": "sha512-A3dYjdBGuy0IGT+wyLIGIKLRE+sAk1iNk0f1HjNDysO7u8lhL4N3VEm+FAubmJbAztn94F7MxBTPmnixbiyFdQ==", + "version": "5.5.0", + "resolved": "https://registry.npmjs.org/react-loadable/-/react-loadable-5.5.0.tgz", + "integrity": "sha512-C8Aui0ZpMd4KokxRdVAm2bQtI03k2RMRNzOB+IipV3yxFTSVICv7WoUr5L9ALB5BmKO1iHgZtWM8EvYG83otdg==", + "peer": true, "dependencies": { - "@types/react": "*", - "prop-types": "^15.6.2" + "prop-types": "^15.5.0" }, "peerDependencies": { "react": "*" @@ -11517,6 +11870,21 @@ "webpack": ">=4.41.1 || 5.x" } }, + "node_modules/react-player": { + "version": "2.16.0", + "resolved": "https://registry.npmjs.org/react-player/-/react-player-2.16.0.tgz", + "integrity": "sha512-mAIPHfioD7yxO0GNYVFD1303QFtI3lyyQZLY229UEAp/a10cSW+hPcakg0Keq8uWJxT2OiT/4Gt+Lc9bD6bJmQ==", + "dependencies": { + "deepmerge": "^4.0.0", + "load-script": "^1.0.0", + "memoize-one": "^5.1.1", + "prop-types": "^15.7.2", + "react-fast-compare": "^3.0.1" + }, + "peerDependencies": { + "react": ">=16.6.0" + } + }, "node_modules/react-router": { "version": "5.3.3", "resolved": "https://registry.npmjs.org/react-router/-/react-router-5.3.3.tgz", @@ -11567,11 +11935,11 @@ } }, "node_modules/react-textarea-autosize": { - "version": "8.3.4", - "resolved": "https://registry.npmjs.org/react-textarea-autosize/-/react-textarea-autosize-8.3.4.tgz", - "integrity": "sha512-CdtmP8Dc19xL8/R6sWvtknD/eCXkQr30dtvC4VmGInhRsfF8X/ihXCq6+9l9qbxmKRiq407/7z5fxE7cVWQNgQ==", + "version": "8.5.3", + "resolved": "https://registry.npmjs.org/react-textarea-autosize/-/react-textarea-autosize-8.5.3.tgz", + "integrity": "sha512-XT1024o2pqCuZSuBt9FwHlaDeNtVrtCXu0Rnz88t1jUGheCLa3PhjE1GH8Ctm2axEtvdCl5SUHYschyQ0L5QHQ==", "dependencies": { - "@babel/runtime": "^7.10.2", + "@babel/runtime": "^7.20.13", "use-composed-ref": "^1.3.0", "use-latest": "^1.2.1" }, @@ -11623,25 +11991,14 @@ } }, "node_modules/recursive-readdir": { - "version": "2.2.2", - "resolved": "https://registry.npmjs.org/recursive-readdir/-/recursive-readdir-2.2.2.tgz", - "integrity": "sha512-nRCcW9Sj7NuZwa2XvH9co8NPeXUBhZP7CRKJtU+cS6PW9FpCIFoI5ib0NT1ZrbNuPoRy0ylyCaUL8Gih4LSyFg==", + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/recursive-readdir/-/recursive-readdir-2.2.3.tgz", + "integrity": "sha512-8HrF5ZsXk5FAH9dgsx3BlUer73nIhuj+9OrQwEbLTPOBzGkL1lsFCR01am+v+0m2Cmbs1nP12hLDl5FA7EszKA==", "dependencies": { - "minimatch": "3.0.4" + "minimatch": "^3.0.5" }, "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/recursive-readdir/node_modules/minimatch": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz", - "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==", - "dependencies": { - "brace-expansion": "^1.1.7" - }, - "engines": { - "node": "*" + "node": ">=6.0.0" } }, "node_modules/regenerate": { @@ -11661,9 +12018,9 @@ } }, "node_modules/regenerator-runtime": { - "version": "0.13.9", - "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.13.9.tgz", - "integrity": "sha512-p3VT+cOEgxFsRRA9X4lkI1E+k2/CtnKtU4gcxyaCUreilL/vqI6CdZ3wxVUx3UOUg+gnUOQQcRI7BmSI656MYA==" + "version": "0.13.11", + "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.13.11.tgz", + "integrity": "sha512-kY1AZVr2Ra+t+piVaJ4gxaFaReZVH40AKNo7UCX6W+dEwBo/2oZJzqfuN1qLq1oL45o56cPaTXELwrTh8Fpggg==" }, "node_modules/regenerator-transform": { "version": "0.15.0", @@ -11716,74 +12073,24 @@ "resolved": "https://registry.npmjs.org/regjsgen/-/regjsgen-0.6.0.tgz", "integrity": "sha512-ozE883Uigtqj3bx7OhL1KNbCzGyW2NQZPl6Hs09WTvCuZD5sTI4JY58bkbQWa/Y9hxIsvJ3M8Nbf7j54IqeZbA==" }, - "node_modules/regjsparser": { - "version": "0.8.4", - "resolved": "https://registry.npmjs.org/regjsparser/-/regjsparser-0.8.4.tgz", - "integrity": "sha512-J3LABycON/VNEu3abOviqGHuB/LOtOQj8SKmfP9anY5GfAVw/SPjwzSjxGjbZXIxbGfqTHtJw58C2Li/WkStmA==", - "dependencies": { - "jsesc": "~0.5.0" - }, - "bin": { - "regjsparser": "bin/parser" - } - }, - "node_modules/regjsparser/node_modules/jsesc": { - "version": "0.5.0", - "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-0.5.0.tgz", - "integrity": "sha512-uZz5UnB7u4T9LvwmFqXii7pZSouaRPorGs5who1Ip7VO0wxanFvBL7GkM6dTHlgX+jhBApRetaWpnDabOeTcnA==", - "bin": { - "jsesc": "bin/jsesc" - } - }, - "node_modules/rehype-parse": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/rehype-parse/-/rehype-parse-6.0.2.tgz", - "integrity": "sha512-0S3CpvpTAgGmnz8kiCyFLGuW5yA4OQhyNTm/nwPopZ7+PI11WnGl1TTWTGv/2hPEe/g2jRLlhVVSsoDH8waRug==", - "dependencies": { - "hast-util-from-parse5": "^5.0.0", - "parse5": "^5.0.0", - "xtend": "^4.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/rehype-parse/node_modules/hast-util-from-parse5": { - "version": "5.0.3", - "resolved": "https://registry.npmjs.org/hast-util-from-parse5/-/hast-util-from-parse5-5.0.3.tgz", - "integrity": "sha512-gOc8UB99F6eWVWFtM9jUikjN7QkWxB3nY0df5Z0Zq1/Nkwl5V4hAAsl0tmwlgWl/1shlTF8DnNYLO8X6wRV9pA==", - "dependencies": { - "ccount": "^1.0.3", - "hastscript": "^5.0.0", - "property-information": "^5.0.0", - "web-namespaces": "^1.1.2", - "xtend": "^4.0.1" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/rehype-parse/node_modules/hastscript": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/hastscript/-/hastscript-5.1.2.tgz", - "integrity": "sha512-WlztFuK+Lrvi3EggsqOkQ52rKbxkXL3RwB6t5lwoa8QLMemoWfBuL43eDrwOamJyR7uKQKdmKYaBH1NZBiIRrQ==", + "node_modules/regjsparser": { + "version": "0.8.4", + "resolved": "https://registry.npmjs.org/regjsparser/-/regjsparser-0.8.4.tgz", + "integrity": "sha512-J3LABycON/VNEu3abOviqGHuB/LOtOQj8SKmfP9anY5GfAVw/SPjwzSjxGjbZXIxbGfqTHtJw58C2Li/WkStmA==", "dependencies": { - "comma-separated-tokens": "^1.0.0", - "hast-util-parse-selector": "^2.0.0", - "property-information": "^5.0.0", - "space-separated-tokens": "^1.0.0" + "jsesc": "~0.5.0" }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" + "bin": { + "regjsparser": "bin/parser" } }, - "node_modules/rehype-parse/node_modules/parse5": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/parse5/-/parse5-5.1.1.tgz", - "integrity": "sha512-ugq4DFI0Ptb+WWjAdOK16+u/nHfiIrcE+sh8kZMaM0WllQKLI9rOUq6c2b7cwPkXdzfQESqvoqK6ug7U/Yyzug==" + "node_modules/regjsparser/node_modules/jsesc": { + "version": "0.5.0", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-0.5.0.tgz", + "integrity": "sha512-uZz5UnB7u4T9LvwmFqXii7pZSouaRPorGs5who1Ip7VO0wxanFvBL7GkM6dTHlgX+jhBApRetaWpnDabOeTcnA==", + "bin": { + "jsesc": "bin/jsesc" + } }, "node_modules/relateurl": { "version": "0.2.7", @@ -11793,32 +12100,6 @@ "node": ">= 0.10" } }, - "node_modules/remark-admonitions": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/remark-admonitions/-/remark-admonitions-1.2.1.tgz", - "integrity": "sha512-Ji6p68VDvD+H1oS95Fdx9Ar5WA2wcDA4kwrrhVU7fGctC6+d3uiMICu7w7/2Xld+lnU7/gi+432+rRbup5S8ow==", - "dependencies": { - "rehype-parse": "^6.0.2", - "unified": "^8.4.2", - "unist-util-visit": "^2.0.1" - } - }, - "node_modules/remark-admonitions/node_modules/unified": { - "version": "8.4.2", - "resolved": "https://registry.npmjs.org/unified/-/unified-8.4.2.tgz", - "integrity": "sha512-JCrmN13jI4+h9UAyKEoGcDZV+i1E7BLFuG7OsaDvTXI5P0qhHX+vZO/kOhz9jn8HGENDKbwSeB0nVOg4gVStGA==", - "dependencies": { - "bail": "^1.0.0", - "extend": "^3.0.0", - "is-plain-obj": "^2.0.0", - "trough": "^1.0.0", - "vfile": "^4.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, "node_modules/remark-emoji": { "version": "2.2.0", "resolved": "https://registry.npmjs.org/remark-emoji/-/remark-emoji-2.2.0.tgz", @@ -11905,6 +12186,7 @@ "version": "7.12.1", "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-object-rest-spread/-/plugin-proposal-object-rest-spread-7.12.1.tgz", "integrity": "sha512-s6SowJIjzlhx8o7lsFx5zmY4At6CTtDvgNQDdPzkBQucle58A6b/TTeEBYtyDgmcXjUTM+vE8YOGHZzzbc/ioA==", + "deprecated": "This proposal has been merged to the ECMAScript standard and thus this plugin is no longer maintained. Please use @babel/plugin-transform-object-rest-spread instead.", "dependencies": { "@babel/helper-plugin-utils": "^7.10.4", "@babel/plugin-syntax-object-rest-spread": "^7.8.0", @@ -11926,9 +12208,9 @@ } }, "node_modules/remark-mdx/node_modules/semver": { - "version": "5.7.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", - "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==", + "version": "5.7.2", + "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.2.tgz", + "integrity": "sha512-cBznnQ9KjJqU67B52RMC65CMarK2600WFnbkcaiwWq3xy/5haFJlshgnpjovMVJ+Hff49d8GEn0b87C5pDQ10g==", "bin": { "semver": "bin/semver" } @@ -11941,6 +12223,23 @@ "node": ">=0.10.0" } }, + "node_modules/remark-mdx/node_modules/unified": { + "version": "9.2.0", + "resolved": "https://registry.npmjs.org/unified/-/unified-9.2.0.tgz", + "integrity": "sha512-vx2Z0vY+a3YoTj8+pttM3tiJHCwY5UFbYdiWrwBEbHmK8pvsPj2rtAX2BFfgXen8T39CJWblWRDT4L5WGXtDdg==", + "dependencies": { + "bail": "^1.0.0", + "extend": "^3.0.0", + "is-buffer": "^2.0.0", + "is-plain-obj": "^2.0.0", + "trough": "^1.0.0", + "vfile": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, "node_modules/remark-parse": { "version": "8.0.3", "resolved": "https://registry.npmjs.org/remark-parse/-/remark-parse-8.0.3.tgz", @@ -11971,7 +12270,8 @@ "node_modules/remark-parse/node_modules/trim": { "version": "0.0.1", "resolved": "https://registry.npmjs.org/trim/-/trim-0.0.1.tgz", - "integrity": "sha512-YzQV+TZg4AxpKxaTHK3c3D+kRDCGVEE7LemdlQZoQXn0iennk10RsIoY6ikzAqJTc9Xjl9C1/waHom/J86ziAQ==" + "integrity": "sha512-YzQV+TZg4AxpKxaTHK3c3D+kRDCGVEE7LemdlQZoQXn0iennk10RsIoY6ikzAqJTc9Xjl9C1/waHom/J86ziAQ==", + "deprecated": "Use String.prototype.trim() instead" }, "node_modules/remark-squeeze-paragraphs": { "version": "4.0.0", @@ -12384,9 +12684,9 @@ "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==" }, "node_modules/sax": { - "version": "1.2.4", - "resolved": "https://registry.npmjs.org/sax/-/sax-1.2.4.tgz", - "integrity": "sha512-NqVDv9TpANUjFm0N8uM5GxL36UgKi9/atZw+x7YFnQ8ckwFGKrl4xX4yWtrey3UJm5nP1kUbnYgLopqWNSRhWw==" + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/sax/-/sax-1.4.1.tgz", + "integrity": "sha512-+aWOz7yVScEGoKNd4PA10LZ8sk0A/z5+nXQG5giUO5rprX9jgYsTdov9qCchZiPIZezbZH+jRut8nPodFAX4Jg==" }, "node_modules/scheduler": { "version": "0.20.2", @@ -12442,6 +12742,12 @@ "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==" }, + "node_modules/search-insights": { + "version": "2.14.0", + "resolved": "https://registry.npmjs.org/search-insights/-/search-insights-2.14.0.tgz", + "integrity": "sha512-OLN6MsPMCghDOqlCtsIsYgtsC0pnwVTyT9Mu6A3ewOj1DxvzZF6COrn2g86E/c05xbktB0XN04m/t1Z+n+fTGw==", + "peer": true + }, "node_modules/section-matter": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/section-matter/-/section-matter-1.0.0.tgz", @@ -12471,9 +12777,9 @@ } }, "node_modules/semver": { - "version": "7.3.7", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.3.7.tgz", - "integrity": "sha512-QlYTucUYOews+WeEujDoEGziz4K6c47V/Bd+LjSSYcA94p+DmINdf7ncaUinThfvZyu13lN9OY1XDxt8C0Tw0g==", + "version": "7.5.4", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz", + "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==", "dependencies": { "lru-cache": "^6.0.0" }, @@ -12496,9 +12802,9 @@ } }, "node_modules/semver-diff/node_modules/semver": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", "bin": { "semver": "bin/semver.js" } @@ -12561,31 +12867,20 @@ } }, "node_modules/serve-handler": { - "version": "6.1.3", - "resolved": "https://registry.npmjs.org/serve-handler/-/serve-handler-6.1.3.tgz", - "integrity": "sha512-FosMqFBNrLyeiIDvP1zgO6YoTzFYHxLDEIavhlmQ+knB2Z7l1t+kGLHkZIDN7UVWqQAmKI3D20A6F6jo3nDd4w==", + "version": "6.1.5", + "resolved": "https://registry.npmjs.org/serve-handler/-/serve-handler-6.1.5.tgz", + "integrity": "sha512-ijPFle6Hwe8zfmBxJdE+5fta53fdIY0lHISJvuikXB3VYFafRjMRpOffSPvCYsbKyBA7pvy9oYr/BT1O3EArlg==", "dependencies": { "bytes": "3.0.0", "content-disposition": "0.5.2", "fast-url-parser": "1.1.3", "mime-types": "2.1.18", - "minimatch": "3.0.4", + "minimatch": "3.1.2", "path-is-inside": "1.0.2", "path-to-regexp": "2.2.1", "range-parser": "1.2.0" } }, - "node_modules/serve-handler/node_modules/minimatch": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz", - "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==", - "dependencies": { - "brace-expansion": "^1.1.7" - }, - "engines": { - "node": "*" - } - }, "node_modules/serve-handler/node_modules/path-to-regexp": { "version": "2.2.1", "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-2.2.1.tgz", @@ -12680,6 +12975,22 @@ "resolved": "https://registry.npmjs.org/set-blocking/-/set-blocking-2.0.0.tgz", "integrity": "sha512-KiKBS8AnWGEyLzofFfmvKwpdPzqiy16LvQfK3yv/fVH7Bj13/wl3JSR1J+rfgRE9q7xUJK4qvgS8raSOeLUehw==" }, + "node_modules/set-function-length": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/set-function-length/-/set-function-length-1.2.2.tgz", + "integrity": "sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg==", + "dependencies": { + "define-data-property": "^1.1.4", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "gopd": "^1.0.1", + "has-property-descriptors": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, "node_modules/setimmediate": { "version": "1.0.5", "resolved": "https://registry.npmjs.org/setimmediate/-/setimmediate-1.0.5.tgz", @@ -12747,13 +13058,17 @@ } }, "node_modules/side-channel": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.4.tgz", - "integrity": "sha512-q5XPytqFEIKHkGdiMIrY10mvLRvnQh42/+GoBlFW3b2LXLE2xxJpZFdm94we0BaoV3RwJyGqg5wS7epxTv0Zvw==", + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.6.tgz", + "integrity": "sha512-fDW/EZ6Q9RiO8eFG8Hj+7u/oW+XrPTIChwCOM2+th2A6OblDtYYIpve9m+KvI9Z4C9qSEXlaGR6bTEYHReuglA==", "dependencies": { - "call-bind": "^1.0.0", - "get-intrinsic": "^1.0.2", - "object-inspect": "^1.9.0" + "call-bind": "^1.0.7", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.4", + "object-inspect": "^1.13.1" + }, + "engines": { + "node": ">= 0.4" }, "funding": { "url": "https://github.com/sponsors/ljharb" @@ -12783,9 +13098,9 @@ "integrity": "sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg==" }, "node_modules/sitemap": { - "version": "7.1.1", - "resolved": "https://registry.npmjs.org/sitemap/-/sitemap-7.1.1.tgz", - "integrity": "sha512-mK3aFtjz4VdJN0igpIJrinf3EO8U8mxOPsTBzSsy06UtjZQJ3YY3o3Xa7zSc5nMqcMrRwlChHZ18Kxg0caiPBg==", + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/sitemap/-/sitemap-7.1.2.tgz", + "integrity": "sha512-ARCqzHJ0p4gWt+j7NlU5eDlIO9+Rkr/JhPFZKKQ1l5GCus7rJH4UdrlVAh0xC/gDS/Qir2UMxqYNHtsKr2rpCw==", "dependencies": { "@types/node": "^17.0.5", "@types/sax": "^1.2.1", @@ -13063,11 +13378,11 @@ } }, "node_modules/stylehacks": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/stylehacks/-/stylehacks-5.1.0.tgz", - "integrity": "sha512-SzLmvHQTrIWfSgljkQCw2++C9+Ne91d/6Sp92I8c5uHTcy/PgeHamwITIbBW9wnFTY/3ZfSXR9HIL6Ikqmcu6Q==", + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/stylehacks/-/stylehacks-5.1.1.tgz", + "integrity": "sha512-sBpcd5Hx7G6seo7b1LkpttvTz7ikD0LlH5RmdcBNb6fFR0Fl7LQwHDFr300q4cwUqi+IYrFGmsIHieMBfnN/Bw==", "dependencies": { - "browserslist": "^4.16.6", + "browserslist": "^4.21.4", "postcss-selector-parser": "^6.0.4" }, "engines": { @@ -13204,19 +13519,27 @@ } }, "node_modules/tar": { - "version": "6.1.11", - "resolved": "https://registry.npmjs.org/tar/-/tar-6.1.11.tgz", - "integrity": "sha512-an/KZQzQUkZCkuoAA64hM92X0Urb6VpRhAFllDzz44U2mcD5scmT3zBc4VgVpkugF580+DQn8eAFSyoQt0tznA==", + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/tar/-/tar-6.2.1.tgz", + "integrity": "sha512-DZ4yORTwrbTj/7MZYq2w+/ZFdI6OZ/f9SFHR+71gIVUZhOQPHzVCLpvRnPgyaMpfWxxk/4ONva3GQSyNIKRv6A==", "dependencies": { "chownr": "^2.0.0", "fs-minipass": "^2.0.0", - "minipass": "^3.0.0", + "minipass": "^5.0.0", "minizlib": "^2.1.1", "mkdirp": "^1.0.3", "yallist": "^4.0.0" }, "engines": { - "node": ">= 10" + "node": ">=10" + } + }, + "node_modules/tar/node_modules/minipass": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-5.0.0.tgz", + "integrity": "sha512-3FnjYuehv9k6ovOEbyOswadCDPX1piCfhV8ncmYtHOjuPwylVWsghTLo7rabjC3Rx5xD4HDx8Wm1xnMF7S5qFQ==", + "engines": { + "node": ">=8" } }, "node_modules/tar/node_modules/mkdirp": { @@ -13427,7 +13750,8 @@ "node_modules/trim": { "version": "0.0.3", "resolved": "https://registry.npmjs.org/trim/-/trim-0.0.3.tgz", - "integrity": "sha512-h82ywcYhHK7veeelXrCScdH7HkWfbIT1D/CgYO+nmDarz3SGNssVBMws6jU16Ga60AJCRAvPV6w6RLuNerQqjg==" + "integrity": "sha512-h82ywcYhHK7veeelXrCScdH7HkWfbIT1D/CgYO+nmDarz3SGNssVBMws6jU16Ga60AJCRAvPV6w6RLuNerQqjg==", + "deprecated": "Use String.prototype.trim() instead" }, "node_modules/trim-trailing-lines": { "version": "1.1.4", @@ -13499,9 +13823,9 @@ "optional": true }, "node_modules/tslib": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.4.0.tgz", - "integrity": "sha512-d6xOpEDfsi2CZVlPQzGeux8XMwLT9hssAsaPYExaQMuYskwb+x1x7J371tWlbBdWHroy99KnVB6qIkUbs5X3UQ==" + "version": "2.6.3", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.3.tgz", + "integrity": "sha512-xNvxJEOUiWPGhUuUdQgAJPKOOJfGnIyKySOc09XkKsgdUV/3E2zvwZYdejjmRgPCgcym1juLH3226yA7sEFJKQ==" }, "node_modules/type-fest": { "version": "2.19.0", @@ -13545,6 +13869,11 @@ "node": ">= 0.6" } }, + "node_modules/typed.js": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/typed.js/-/typed.js-2.1.0.tgz", + "integrity": "sha512-bDuXEf7YcaKN4g08NMTUM6G90XU25CK3bh6U0THC/Mod/QPKlEt9g/EjvbYB8x2Qwr2p6J6I3NrsoYaVnY6wsQ==" + }, "node_modules/typedarray-to-buffer": { "version": "3.1.5", "resolved": "https://registry.npmjs.org/typedarray-to-buffer/-/typedarray-to-buffer-3.1.5.tgz", @@ -13566,9 +13895,9 @@ } }, "node_modules/ua-parser-js": { - "version": "0.7.31", - "resolved": "https://registry.npmjs.org/ua-parser-js/-/ua-parser-js-0.7.31.tgz", - "integrity": "sha512-qLK/Xe9E2uzmYI3qLeOmI0tEOt+TBBQyUIAh4aAgU05FVYzeZrKUdkAZfBNVGRaHVgV0TDkdEngJSw/SyQchkQ==", + "version": "1.0.38", + "resolved": "https://registry.npmjs.org/ua-parser-js/-/ua-parser-js-1.0.38.tgz", + "integrity": "sha512-Aq5ppTOfvrCMgAPneW1HfWj66Xi7XL+/mIy996R1/CLS/rcyJQm6QZdsKrUeivDFQ+Oc9Wyuwor8Ze8peEoUoQ==", "funding": [ { "type": "opencollective", @@ -13577,6 +13906,10 @@ { "type": "paypal", "url": "https://paypal.me/faisalman" + }, + { + "type": "github", + "url": "https://github.com/sponsors/faisalman" } ], "engines": { @@ -13633,9 +13966,9 @@ } }, "node_modules/unified": { - "version": "9.2.0", - "resolved": "https://registry.npmjs.org/unified/-/unified-9.2.0.tgz", - "integrity": "sha512-vx2Z0vY+a3YoTj8+pttM3tiJHCwY5UFbYdiWrwBEbHmK8pvsPj2rtAX2BFfgXen8T39CJWblWRDT4L5WGXtDdg==", + "version": "9.2.2", + "resolved": "https://registry.npmjs.org/unified/-/unified-9.2.2.tgz", + "integrity": "sha512-Sg7j110mtefBD+qunSLO1lqOEKdrwBFBrR6Qd8f4uwkhWNlbkaqwHse6e7QvD3AP/MNoJdEDLaf8OxYyoWgorQ==", "dependencies": { "bail": "^1.0.0", "extend": "^3.0.0", @@ -13776,9 +14109,9 @@ } }, "node_modules/update-browserslist-db": { - "version": "1.0.9", - "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.0.9.tgz", - "integrity": "sha512-/xsqn21EGVdXI3EXSum1Yckj3ZVZugqyOZQ/CxYPBD/R+ko9NSUScf8tFF4dOKY+2pvSSJA/S+5B8s4Zr4kyvg==", + "version": "1.0.11", + "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.0.11.tgz", + "integrity": "sha512-dCwEFf0/oT85M1fHBg4F0jtLwJrutGoHSQXCh7u4o2t1drG+c0a9Flnqww6XUKSfQMPpJBRjU8d4RXB09qtvaA==", "funding": [ { "type": "opencollective", @@ -13787,6 +14120,10 @@ { "type": "tidelift", "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" } ], "dependencies": { @@ -13794,7 +14131,7 @@ "picocolors": "^1.0.0" }, "bin": { - "browserslist-lint": "cli.js" + "update-browserslist-db": "cli.js" }, "peerDependencies": { "browserslist": ">= 4.21.0" @@ -14133,6 +14470,14 @@ } } }, + "node_modules/use-sync-external-store": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/use-sync-external-store/-/use-sync-external-store-1.2.2.tgz", + "integrity": "sha512-PElTlVMwpblvbNqQ82d2n6RjStvdSoNe9FG28kNfz3WiXilJm4DdNkEzRhCZuIDwY8U08WVihhGR5iRqAwfDiw==", + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0 || ^18.0.0" + } + }, "node_modules/util-deprecate": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", @@ -14286,9 +14631,9 @@ "integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==" }, "node_modules/webpack": { - "version": "5.74.0", - "resolved": "https://registry.npmjs.org/webpack/-/webpack-5.74.0.tgz", - "integrity": "sha512-A2InDwnhhGN4LYctJj6M1JEaGL7Luj6LOmyBHjcI8529cm5p6VXiTIW2sn6ffvEAKmveLzvu4jrihwXtPojlAA==", + "version": "5.76.1", + "resolved": "https://registry.npmjs.org/webpack/-/webpack-5.76.1.tgz", + "integrity": "sha512-4+YIK4Abzv8172/SGqObnUjaIHjLEuUasz9EwQj/9xmPPkYJy2Mh03Q/lJfSD3YLzbxy5FeTq5Uw0323Oh6SJQ==", "dependencies": { "@types/eslint-scope": "^3.7.3", "@types/estree": "^0.0.51", @@ -14426,9 +14771,9 @@ } }, "node_modules/webpack-dev-middleware": { - "version": "5.3.3", - "resolved": "https://registry.npmjs.org/webpack-dev-middleware/-/webpack-dev-middleware-5.3.3.tgz", - "integrity": "sha512-hj5CYrY0bZLB+eTO+x/j67Pkrquiy7kWepMHmUMoPsmcUaeEnQJqFzHJOyxgWlq746/wUuA64p9ta34Kyb01pA==", + "version": "5.3.4", + "resolved": "https://registry.npmjs.org/webpack-dev-middleware/-/webpack-dev-middleware-5.3.4.tgz", + "integrity": "sha512-BVdTqhhs+0IfoeAf7EoH5WE+exCmqGerHfDM0IL096Px60Tq2Mn9MAbnaGUe6HiMa41KMCYF19gyzZmBcq/o4Q==", "dependencies": { "colorette": "^2.0.10", "memfs": "^3.4.3", @@ -14565,15 +14910,15 @@ } }, "node_modules/webpack-dev-server/node_modules/ws": { - "version": "8.8.1", - "resolved": "https://registry.npmjs.org/ws/-/ws-8.8.1.tgz", - "integrity": "sha512-bGy2JzvzkPowEJV++hF07hAD6niYSr0JzBNo/J29WsB57A2r7Wlc1UFcTR9IzrPvuNVO4B8LGqF8qcpsVOhJCA==", + "version": "8.18.0", + "resolved": "https://registry.npmjs.org/ws/-/ws-8.18.0.tgz", + "integrity": "sha512-8VbfWfHLbbwu3+N6OKsOMpBdT4kXPDDB9cJk2bJ6mh9ucxdlnNvH1e+roYkKmN9Nxw2yjz7VzeO9oOz2zJ04Pw==", "engines": { "node": ">=10.0.0" }, "peerDependencies": { "bufferutil": "^4.0.1", - "utf-8-validate": "^5.0.2" + "utf-8-validate": ">=5.0.2" }, "peerDependenciesMeta": { "bufferutil": { @@ -14839,9 +15184,9 @@ "integrity": "sha512-JcKqAHLPxcdb9KM49dufGXn2x3ssnfjbcaQdLlfZsL9rH9wgDQjUtDxbo8NE0F6SFvydeu1VhZe7hZuHsB2/pw==" }, "node_modules/word-wrap": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.3.tgz", - "integrity": "sha512-Hz/mrNwitNRh/HUAtM/VT/5VH+ygD6DV7mYKZAtHOrbs8U7lvPS6xf7EJKMF0uW1KJCl0H701g3ZGus+muE5vQ==", + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.4.tgz", + "integrity": "sha512-2V81OA4ugVo5pRo46hAoD2ivUJx8jXmWXfUkY4KFNw0hEptvN0QfH3K4nHiwzGeKl5rFKedV48QVoqYavy4YpA==", "dev": true, "engines": { "node": ">=0.10.0" @@ -14916,9 +15261,9 @@ } }, "node_modules/ws": { - "version": "7.5.9", - "resolved": "https://registry.npmjs.org/ws/-/ws-7.5.9.tgz", - "integrity": "sha512-F+P9Jil7UiSKSkppIiD94dN07AwvFixvLIj1Og1Rl9GGMuNipJnV9JzjD6XuqmAeiswGvUmNLjr5cFuXwNS77Q==", + "version": "7.5.10", + "resolved": "https://registry.npmjs.org/ws/-/ws-7.5.10.tgz", + "integrity": "sha512-+dbF1tHwZpXcbOJdVOkzLDxZP1ailvSxM6ZweXTegylPny803bFhA+vqBYw4s31NSAk4S2Qz+AKXK9a4wkdjcQ==", "engines": { "node": ">=8.3.0" }, @@ -15015,6 +15360,59 @@ "@algolia/autocomplete-shared": "1.7.1" } }, + "@algolia/autocomplete-js": { + "version": "1.17.2", + "resolved": "https://registry.npmjs.org/@algolia/autocomplete-js/-/autocomplete-js-1.17.2.tgz", + "integrity": "sha512-2UP5ZMEAtIJvnJ3qLiz3AzFjJD66n4UWsAf6mFGFXSYA/UU0LuaC8Bzrfj4CnK1d/AZyPLe+rgZXr6mQtBI8jg==", + "requires": { + "@algolia/autocomplete-core": "1.17.2", + "@algolia/autocomplete-preset-algolia": "1.17.2", + "@algolia/autocomplete-shared": "1.17.2", + "htm": "^3.1.1", + "preact": "^10.13.2" + }, + "dependencies": { + "@algolia/autocomplete-core": { + "version": "1.17.2", + "resolved": "https://registry.npmjs.org/@algolia/autocomplete-core/-/autocomplete-core-1.17.2.tgz", + "integrity": "sha512-Fi5cPV5pzEmJgTJ/KTcccJoR/v94OkBwJFyLTsmAx9jbBg5rlgoumRXQM41cgwzY1s/eBLNduUMak2KnZYofcA==", + "requires": { + "@algolia/autocomplete-plugin-algolia-insights": "1.17.2", + "@algolia/autocomplete-shared": "1.17.2" + } + }, + "@algolia/autocomplete-preset-algolia": { + "version": "1.17.2", + "resolved": "https://registry.npmjs.org/@algolia/autocomplete-preset-algolia/-/autocomplete-preset-algolia-1.17.2.tgz", + "integrity": "sha512-pXOD059R1giNJkcFpPEWI20XdQevHlmuTxPisKk/XkqjOCFnMmyNq2O7AWJylkcOeb62o2Ord166tJ90vNTSvw==", + "requires": { + "@algolia/autocomplete-shared": "1.17.2" + } + }, + "@algolia/autocomplete-shared": { + "version": "1.17.2", + "resolved": "https://registry.npmjs.org/@algolia/autocomplete-shared/-/autocomplete-shared-1.17.2.tgz", + "integrity": "sha512-L9gmDgv2J6cXXefV4tg/xlfomd+jjbzKmoc6kcvtS2USkxowoLNvqkLRNQP8bHvX+RXXGNLJBwJj+Ul7JIpv8A==", + "requires": {} + } + } + }, + "@algolia/autocomplete-plugin-algolia-insights": { + "version": "1.17.2", + "resolved": "https://registry.npmjs.org/@algolia/autocomplete-plugin-algolia-insights/-/autocomplete-plugin-algolia-insights-1.17.2.tgz", + "integrity": "sha512-bgVuThYaY9NSQMHOE/GMvlEzQxFzqDH3Lbls7fWuei8iIfcBWGtRUH01m/w5LY1mAw1wv8SyZ9xwuvfdXt8XkA==", + "requires": { + "@algolia/autocomplete-shared": "1.17.2" + }, + "dependencies": { + "@algolia/autocomplete-shared": { + "version": "1.17.2", + "resolved": "https://registry.npmjs.org/@algolia/autocomplete-shared/-/autocomplete-shared-1.17.2.tgz", + "integrity": "sha512-L9gmDgv2J6cXXefV4tg/xlfomd+jjbzKmoc6kcvtS2USkxowoLNvqkLRNQP8bHvX+RXXGNLJBwJj+Ul7JIpv8A==", + "requires": {} + } + } + }, "@algolia/autocomplete-preset-algolia": { "version": "1.7.1", "resolved": "https://registry.npmjs.org/@algolia/autocomplete-preset-algolia/-/autocomplete-preset-algolia-1.7.1.tgz", @@ -15028,6 +15426,11 @@ "resolved": "https://registry.npmjs.org/@algolia/autocomplete-shared/-/autocomplete-shared-1.7.1.tgz", "integrity": "sha512-eTmGVqY3GeyBTT8IWiB2K5EuURAqhnumfktAEoHxfDY2o7vg2rSnO16ZtIG0fMgt3py28Vwgq42/bVEuaQV7pg==" }, + "@algolia/autocomplete-theme-classic": { + "version": "1.17.2", + "resolved": "https://registry.npmjs.org/@algolia/autocomplete-theme-classic/-/autocomplete-theme-classic-1.17.2.tgz", + "integrity": "sha512-aPH4uJAl4HDnodAWg3+zWoBp+m2+5FFHvWm5qLFfr6CxgytdVfEam5bBTGsv1oCWB5YYrPvtYrh9XfTTxKqP0g==" + }, "@algolia/cache-browser-local-storage": { "version": "4.14.2", "resolved": "https://registry.npmjs.org/@algolia/cache-browser-local-storage/-/cache-browser-local-storage-4.14.2.tgz", @@ -15158,11 +15561,12 @@ } }, "@babel/code-frame": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.18.6.tgz", - "integrity": "sha512-TDCmlK5eOvH+eH7cdAFlNXeVJqWIQ7gW9tY1GJIpUtFb6CmjVyq2VM3u71bOyR8CRihcCgMUYoDNyLXao3+70Q==", + "version": "7.22.13", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.22.13.tgz", + "integrity": "sha512-XktuhWlJ5g+3TJXc5upd9Ks1HutSArik6jf2eAjYFyIOf4ej3RN+184cZbzDvbPnuTJIUhPKKJE3cIsYTiAT3w==", "requires": { - "@babel/highlight": "^7.18.6" + "@babel/highlight": "^7.22.13", + "chalk": "^2.4.2" } }, "@babel/compat-data": { @@ -15193,19 +15597,20 @@ }, "dependencies": { "semver": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==" + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==" } } }, "@babel/generator": { - "version": "7.19.0", - "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.19.0.tgz", - "integrity": "sha512-S1ahxf1gZ2dpoiFgA+ohK9DIpz50bJ0CWs7Zlzb54Z4sG8qmdIrGrVqmy1sAtTVRb+9CU6U8VqT9L0Zj7hxHVg==", + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.23.0.tgz", + "integrity": "sha512-lN85QRR+5IbYrMWM6Y4pE/noaQtg4pNiqeNGX60eqOfo6gtEj6uw/JagelB8vVztSd7R6M5n1+PQkDbHbBRU4g==", "requires": { - "@babel/types": "^7.19.0", + "@babel/types": "^7.23.0", "@jridgewell/gen-mapping": "^0.3.2", + "@jridgewell/trace-mapping": "^0.3.17", "jsesc": "^2.5.1" }, "dependencies": { @@ -15250,9 +15655,9 @@ }, "dependencies": { "semver": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==" + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==" } } }, @@ -15293,16 +15698,16 @@ }, "dependencies": { "semver": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==" + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==" } } }, "@babel/helper-environment-visitor": { - "version": "7.18.9", - "resolved": "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.18.9.tgz", - "integrity": "sha512-3r/aACDJ3fhQ/EVgFy0hpj8oHyHpQc+LPtJoY9SzTThAsStm4Ptegq92vqKoE3vD706ZVFWITnMnxucw+S9Ipg==" + "version": "7.22.20", + "resolved": "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.22.20.tgz", + "integrity": "sha512-zfedSIzFhat/gFhWfHtgWvlec0nqB9YEIVrpuwjruLlXfUSnA8cJB0miHKwqDnQ7d32aKo2xt88/xZptwxbfhA==" }, "@babel/helper-explode-assignable-expression": { "version": "7.18.6", @@ -15313,20 +15718,20 @@ } }, "@babel/helper-function-name": { - "version": "7.19.0", - "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.19.0.tgz", - "integrity": "sha512-WAwHBINyrpqywkUH0nTnNgI5ina5TFn85HKS0pbPDfxFfhyR/aNQEn4hGi1P1JyT//I0t4OgXUlofzWILRvS5w==", + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.23.0.tgz", + "integrity": "sha512-OErEqsrxjZTJciZ4Oo+eoZqeW9UIiOcuYKRJA4ZAgV9myA+pOXhhmpfNCKjEH/auVfEYVFJ6y1Tc4r0eIApqiw==", "requires": { - "@babel/template": "^7.18.10", - "@babel/types": "^7.19.0" + "@babel/template": "^7.22.15", + "@babel/types": "^7.23.0" } }, "@babel/helper-hoist-variables": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.18.6.tgz", - "integrity": "sha512-UlJQPkFqFULIcyW5sbzgbkxn2FKRgwWiRexcuaR8RNJRy8+LLveqPjwZV/bwrLZCN0eUHD/x8D0heK1ozuoo6Q==", + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.22.5.tgz", + "integrity": "sha512-wGjk9QZVzvknA6yKIUURb8zY3grXCcOZt+/7Wcy8O2uctxhplmUPkOdlgoNhmdVee2c92JXbf1xpMtVNbfoxRw==", "requires": { - "@babel/types": "^7.18.6" + "@babel/types": "^7.22.5" } }, "@babel/helper-member-expression-to-functions": { @@ -15413,22 +15818,22 @@ } }, "@babel/helper-split-export-declaration": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.18.6.tgz", - "integrity": "sha512-bde1etTx6ZyTmobl9LLMMQsaizFVZrquTEHOqKeQESMKo4PlObf+8+JA25ZsIpZhT/WEd39+vOdLXAFG/nELpA==", + "version": "7.22.6", + "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.22.6.tgz", + "integrity": "sha512-AsUnxuLhRYsisFiaJwvp1QF+I3KjD5FOxut14q/GzovUe6orHLesW2C7d754kRm53h5gqrz6sFl6sxc4BVtE/g==", "requires": { - "@babel/types": "^7.18.6" + "@babel/types": "^7.22.5" } }, "@babel/helper-string-parser": { - "version": "7.18.10", - "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.18.10.tgz", - "integrity": "sha512-XtIfWmeNY3i4t7t4D2t02q50HvqHybPqW2ki1kosnvWCwuCMeo81Jf0gwr85jy/neUdg5XDdeFE/80DXiO+njw==" + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.22.5.tgz", + "integrity": "sha512-mM4COjgZox8U+JcXQwPijIZLElkgEpO5rsERVDJTc2qfCDfERyob6k5WegS14SX18IIjv+XD+GrqNumY5JRCDw==" }, "@babel/helper-validator-identifier": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.18.6.tgz", - "integrity": "sha512-MmetCkz9ej86nJQV+sFCxoGGrUbU3q02kgLciwkrt9QqEB7cP39oKEY0PakknEO0Gu20SskMRi+AYZ3b1TpN9g==" + "version": "7.22.20", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.22.20.tgz", + "integrity": "sha512-Y4OZ+ytlatR8AI+8KZfKuL5urKp7qey08ha31L8b3BwewJAoJamTzyvxPR/5D+KkdJCGPq/+8TukHBlY10FX9A==" }, "@babel/helper-validator-option": { "version": "7.18.6", @@ -15457,19 +15862,19 @@ } }, "@babel/highlight": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.18.6.tgz", - "integrity": "sha512-u7stbOuYjaPezCuLj29hNW1v64M2Md2qupEKP1fHc7WdOA3DgLh37suiSrZYY7haUB7iBeQZ9P1uiRF359do3g==", + "version": "7.22.20", + "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.22.20.tgz", + "integrity": "sha512-dkdMCN3py0+ksCgYmGG8jKeGA/8Tk+gJwSYYlFGxG5lmhfKNoAy004YpLxpS1W2J8m/EK2Ew+yOs9pVRwO89mg==", "requires": { - "@babel/helper-validator-identifier": "^7.18.6", - "chalk": "^2.0.0", + "@babel/helper-validator-identifier": "^7.22.20", + "chalk": "^2.4.2", "js-tokens": "^4.0.0" } }, "@babel/parser": { - "version": "7.19.0", - "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.19.0.tgz", - "integrity": "sha512-74bEXKX2h+8rrfQUfsBfuZZHzsEs6Eql4pqy/T4Nn6Y9wNPggQOqD6z6pn5Bl8ZfysKouFZT/UXEH94ummEeQw==" + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.23.0.tgz", + "integrity": "sha512-vvPKKdMemU85V9WE/l5wZEmImpCtLqbnTvqDS2U1fJ96KrxoW7KrXhNsNCblQlg8Ck4b85yxdTyelsMUgFUXiw==" }, "@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": { "version": "7.18.6", @@ -15903,16 +16308,6 @@ "@babel/helper-module-transforms": "^7.18.6", "@babel/helper-plugin-utils": "^7.18.6", "babel-plugin-dynamic-import-node": "^2.3.3" - }, - "dependencies": { - "babel-plugin-dynamic-import-node": { - "version": "2.3.3", - "resolved": "https://registry.npmjs.org/babel-plugin-dynamic-import-node/-/babel-plugin-dynamic-import-node-2.3.3.tgz", - "integrity": "sha512-jZVI+s9Zg3IqA/kdi0i6UDCybUI3aSBLnglhYbSSjKlV7yF1F/5LWv8MakQmvYpnbJDS6fcBL2KzHSxNCMtWSQ==", - "requires": { - "object.assign": "^4.1.0" - } - } } }, "@babel/plugin-transform-modules-commonjs": { @@ -15924,16 +16319,6 @@ "@babel/helper-plugin-utils": "^7.18.6", "@babel/helper-simple-access": "^7.18.6", "babel-plugin-dynamic-import-node": "^2.3.3" - }, - "dependencies": { - "babel-plugin-dynamic-import-node": { - "version": "2.3.3", - "resolved": "https://registry.npmjs.org/babel-plugin-dynamic-import-node/-/babel-plugin-dynamic-import-node-2.3.3.tgz", - "integrity": "sha512-jZVI+s9Zg3IqA/kdi0i6UDCybUI3aSBLnglhYbSSjKlV7yF1F/5LWv8MakQmvYpnbJDS6fcBL2KzHSxNCMtWSQ==", - "requires": { - "object.assign": "^4.1.0" - } - } } }, "@babel/plugin-transform-modules-systemjs": { @@ -15946,16 +16331,6 @@ "@babel/helper-plugin-utils": "^7.19.0", "@babel/helper-validator-identifier": "^7.18.6", "babel-plugin-dynamic-import-node": "^2.3.3" - }, - "dependencies": { - "babel-plugin-dynamic-import-node": { - "version": "2.3.3", - "resolved": "https://registry.npmjs.org/babel-plugin-dynamic-import-node/-/babel-plugin-dynamic-import-node-2.3.3.tgz", - "integrity": "sha512-jZVI+s9Zg3IqA/kdi0i6UDCybUI3aSBLnglhYbSSjKlV7yF1F/5LWv8MakQmvYpnbJDS6fcBL2KzHSxNCMtWSQ==", - "requires": { - "object.assign": "^4.1.0" - } - } } }, "@babel/plugin-transform-modules-umd": { @@ -16085,9 +16460,9 @@ }, "dependencies": { "semver": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==" + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==" } } }, @@ -16242,9 +16617,9 @@ }, "dependencies": { "semver": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==" + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==" } } }, @@ -16284,11 +16659,11 @@ } }, "@babel/runtime": { - "version": "7.19.0", - "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.19.0.tgz", - "integrity": "sha512-eR8Lo9hnDS7tqkO7NsV+mKvCmv5boaXFSZ70DnfhcgiEne8hv9oCEd36Klw74EtizEqLsy4YnW8UWwpBVolHZA==", + "version": "7.22.6", + "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.22.6.tgz", + "integrity": "sha512-wDb5pWm4WDdF6LFUde3Jl8WzPA+3ZbxYqkC6xAXuD3irdEHN1k0NfTRrJD8ZD378SJ61miMLCqIOXYhd8x+AJQ==", "requires": { - "regenerator-runtime": "^0.13.4" + "regenerator-runtime": "^0.13.11" } }, "@babel/runtime-corejs3": { @@ -16301,42 +16676,58 @@ } }, "@babel/template": { - "version": "7.18.10", - "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.18.10.tgz", - "integrity": "sha512-TI+rCtooWHr3QJ27kJxfjutghu44DLnasDMwpDqCXVTal9RLp3RSYNh4NdBrRP2cQAoG9A8juOQl6P6oZG4JxA==", + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.22.15.tgz", + "integrity": "sha512-QPErUVm4uyJa60rkI73qneDacvdvzxshT3kksGqlGWYdOTIUOwJ7RDUL8sGqslY1uXWSL6xMFKEXDS3ox2uF0w==", "requires": { - "@babel/code-frame": "^7.18.6", - "@babel/parser": "^7.18.10", - "@babel/types": "^7.18.10" + "@babel/code-frame": "^7.22.13", + "@babel/parser": "^7.22.15", + "@babel/types": "^7.22.15" } }, "@babel/traverse": { - "version": "7.19.0", - "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.19.0.tgz", - "integrity": "sha512-4pKpFRDh+utd2mbRC8JLnlsMUii3PMHjpL6a0SZ4NMZy7YFP9aXORxEhdMVOc9CpWtDF09IkciQLEhK7Ml7gRA==", - "requires": { - "@babel/code-frame": "^7.18.6", - "@babel/generator": "^7.19.0", - "@babel/helper-environment-visitor": "^7.18.9", - "@babel/helper-function-name": "^7.19.0", - "@babel/helper-hoist-variables": "^7.18.6", - "@babel/helper-split-export-declaration": "^7.18.6", - "@babel/parser": "^7.19.0", - "@babel/types": "^7.19.0", + "version": "7.23.2", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.23.2.tgz", + "integrity": "sha512-azpe59SQ48qG6nu2CzcMLbxUudtN+dOM9kDbUqGq3HXUJRlo7i8fvPoxQUzYgLZ4cMVmuZgm8vvBpNeRhd6XSw==", + "requires": { + "@babel/code-frame": "^7.22.13", + "@babel/generator": "^7.23.0", + "@babel/helper-environment-visitor": "^7.22.20", + "@babel/helper-function-name": "^7.23.0", + "@babel/helper-hoist-variables": "^7.22.5", + "@babel/helper-split-export-declaration": "^7.22.6", + "@babel/parser": "^7.23.0", + "@babel/types": "^7.23.0", "debug": "^4.1.0", "globals": "^11.1.0" } }, "@babel/types": { - "version": "7.19.0", - "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.19.0.tgz", - "integrity": "sha512-YuGopBq3ke25BVSiS6fgF49Ul9gH1x70Bcr6bqRLjWCkcX8Hre1/5+z+IiWOIerRMSSEfGZVB9z9kyq7wVs9YA==", + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.23.0.tgz", + "integrity": "sha512-0oIyUfKoI3mSqMvsxBdclDwxXKXAUA8v/apZbc+iSyARYou1o8ZGDxbUYyLFoW2arqS2jDGqJuZvv1d/io1axg==", "requires": { - "@babel/helper-string-parser": "^7.18.10", - "@babel/helper-validator-identifier": "^7.18.6", + "@babel/helper-string-parser": "^7.22.5", + "@babel/helper-validator-identifier": "^7.22.20", "to-fast-properties": "^2.0.0" } }, + "@cmfcmf/docusaurus-search-local": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@cmfcmf/docusaurus-search-local/-/docusaurus-search-local-1.2.0.tgz", + "integrity": "sha512-Tc0GhRBsfZAiB+f6BoPB8YCQap6JzzcDyJ0dLSCSzWQ6wdWvDlTBrHc1YqR8q8AZ+STRszL5eZpZFi5dbTCdYg==", + "requires": { + "@algolia/autocomplete-js": "^1.8.2", + "@algolia/autocomplete-theme-classic": "^1.8.2", + "@algolia/client-search": "^4.12.0", + "algoliasearch": "^4.12.0", + "cheerio": "^1.0.0-rc.9", + "clsx": "^1.1.1", + "lunr-languages": "^1.4.0", + "mark.js": "^8.11.1", + "tslib": "^2.6.3" + } + }, "@colors/colors": { "version": "1.5.0", "resolved": "https://registry.npmjs.org/@colors/colors/-/colors-1.5.0.tgz", @@ -16561,102 +16952,155 @@ } }, "@docusaurus/core": { - "version": "2.0.0-beta.17", - "resolved": "https://registry.npmjs.org/@docusaurus/core/-/core-2.0.0-beta.17.tgz", - "integrity": "sha512-iNdW7CsmHNOgc4PxD9BFxa+MD8+i7ln7erOBkF3FSMMPnsKUeVqsR3rr31aLmLZRlTXMITSPLxlXwtBZa3KPCw==", + "version": "2.4.3", + "resolved": "https://registry.npmjs.org/@docusaurus/core/-/core-2.4.3.tgz", + "integrity": "sha512-dWH5P7cgeNSIg9ufReX6gaCl/TmrGKD38Orbwuz05WPhAQtFXHd5B8Qym1TiXfvUNvwoYKkAJOJuGe8ou0Z7PA==", "requires": { - "@babel/core": "^7.17.5", - "@babel/generator": "^7.17.3", + "@babel/core": "^7.18.6", + "@babel/generator": "^7.18.7", "@babel/plugin-syntax-dynamic-import": "^7.8.3", - "@babel/plugin-transform-runtime": "^7.17.0", - "@babel/preset-env": "^7.16.11", - "@babel/preset-react": "^7.16.7", - "@babel/preset-typescript": "^7.16.7", - "@babel/runtime": "^7.17.2", - "@babel/runtime-corejs3": "^7.17.2", - "@babel/traverse": "^7.17.3", - "@docusaurus/cssnano-preset": "2.0.0-beta.17", - "@docusaurus/logger": "2.0.0-beta.17", - "@docusaurus/mdx-loader": "2.0.0-beta.17", + "@babel/plugin-transform-runtime": "^7.18.6", + "@babel/preset-env": "^7.18.6", + "@babel/preset-react": "^7.18.6", + "@babel/preset-typescript": "^7.18.6", + "@babel/runtime": "^7.18.6", + "@babel/runtime-corejs3": "^7.18.6", + "@babel/traverse": "^7.18.8", + "@docusaurus/cssnano-preset": "2.4.3", + "@docusaurus/logger": "2.4.3", + "@docusaurus/mdx-loader": "2.4.3", "@docusaurus/react-loadable": "5.5.2", - "@docusaurus/utils": "2.0.0-beta.17", - "@docusaurus/utils-common": "2.0.0-beta.17", - "@docusaurus/utils-validation": "2.0.0-beta.17", - "@slorber/static-site-generator-webpack-plugin": "^4.0.1", + "@docusaurus/utils": "2.4.3", + "@docusaurus/utils-common": "2.4.3", + "@docusaurus/utils-validation": "2.4.3", + "@slorber/static-site-generator-webpack-plugin": "^4.0.7", "@svgr/webpack": "^6.2.1", - "autoprefixer": "^10.4.2", - "babel-loader": "^8.2.3", - "babel-plugin-dynamic-import-node": "2.3.0", + "autoprefixer": "^10.4.7", + "babel-loader": "^8.2.5", + "babel-plugin-dynamic-import-node": "^2.3.3", "boxen": "^6.2.1", + "chalk": "^4.1.2", "chokidar": "^3.5.3", - "clean-css": "^5.2.4", - "cli-table3": "^0.6.1", + "clean-css": "^5.3.0", + "cli-table3": "^0.6.2", "combine-promises": "^1.1.0", "commander": "^5.1.0", - "copy-webpack-plugin": "^10.2.4", - "core-js": "^3.21.1", - "css-loader": "^6.6.0", - "css-minimizer-webpack-plugin": "^3.4.1", - "cssnano": "^5.0.17", - "del": "^6.0.0", + "copy-webpack-plugin": "^11.0.0", + "core-js": "^3.23.3", + "css-loader": "^6.7.1", + "css-minimizer-webpack-plugin": "^4.0.0", + "cssnano": "^5.1.12", + "del": "^6.1.1", "detect-port": "^1.3.0", "escape-html": "^1.0.3", - "eta": "^1.12.3", + "eta": "^2.0.0", "file-loader": "^6.2.0", - "fs-extra": "^10.0.1", + "fs-extra": "^10.1.0", "html-minifier-terser": "^6.1.0", - "html-tags": "^3.1.0", + "html-tags": "^3.2.0", "html-webpack-plugin": "^5.5.0", "import-fresh": "^3.3.0", - "is-root": "^2.1.0", "leven": "^3.1.0", "lodash": "^4.17.21", - "mini-css-extract-plugin": "^2.5.3", - "nprogress": "^0.2.0", - "postcss": "^8.4.7", - "postcss-loader": "^6.2.1", + "mini-css-extract-plugin": "^2.6.1", + "postcss": "^8.4.14", + "postcss-loader": "^7.0.0", "prompts": "^2.4.2", - "react-dev-utils": "^12.0.0", - "react-helmet-async": "^1.2.3", + "react-dev-utils": "^12.0.1", + "react-helmet-async": "^1.3.0", "react-loadable": "npm:@docusaurus/react-loadable@5.5.2", "react-loadable-ssr-addon-v5-slorber": "^1.0.1", - "react-router": "^5.2.0", + "react-router": "^5.3.3", "react-router-config": "^5.1.1", - "react-router-dom": "^5.2.0", - "remark-admonitions": "^1.2.1", + "react-router-dom": "^5.3.3", "rtl-detect": "^1.0.4", - "semver": "^7.3.4", + "semver": "^7.3.7", "serve-handler": "^6.1.3", "shelljs": "^0.8.5", - "terser-webpack-plugin": "^5.3.1", - "tslib": "^2.3.1", + "terser-webpack-plugin": "^5.3.3", + "tslib": "^2.4.0", "update-notifier": "^5.1.0", "url-loader": "^4.1.1", "wait-on": "^6.0.1", - "webpack": "^5.69.1", + "webpack": "^5.73.0", "webpack-bundle-analyzer": "^4.5.0", - "webpack-dev-server": "^4.7.4", + "webpack-dev-server": "^4.9.3", "webpack-merge": "^5.8.0", "webpackbar": "^5.0.2" + }, + "dependencies": { + "ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "requires": { + "color-convert": "^2.0.1" + } + }, + "chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "requires": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + } + }, + "color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "requires": { + "color-name": "~1.1.4" + } + }, + "color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" + }, + "has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==" + }, + "react-loadable": { + "version": "npm:@docusaurus/react-loadable@5.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/react-loadable/-/react-loadable-5.5.2.tgz", + "integrity": "sha512-A3dYjdBGuy0IGT+wyLIGIKLRE+sAk1iNk0f1HjNDysO7u8lhL4N3VEm+FAubmJbAztn94F7MxBTPmnixbiyFdQ==", + "requires": { + "@types/react": "*", + "prop-types": "^15.6.2" + } + }, + "supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "requires": { + "has-flag": "^4.0.0" + } + } } }, "@docusaurus/cssnano-preset": { - "version": "2.0.0-beta.17", - "resolved": "https://registry.npmjs.org/@docusaurus/cssnano-preset/-/cssnano-preset-2.0.0-beta.17.tgz", - "integrity": "sha512-DoBwtLjJ9IY9/lNMHIEdo90L4NDayvU28nLgtjR2Sc6aBIMEB/3a5Ndjehnp+jZAkwcDdNASA86EkZVUyz1O1A==", + "version": "2.4.3", + "resolved": "https://registry.npmjs.org/@docusaurus/cssnano-preset/-/cssnano-preset-2.4.3.tgz", + "integrity": "sha512-ZvGSRCi7z9wLnZrXNPG6DmVPHdKGd8dIn9pYbEOFiYihfv4uDR3UtxogmKf+rT8ZlKFf5Lqne8E8nt08zNM8CA==", "requires": { - "cssnano-preset-advanced": "^5.1.12", - "postcss": "^8.4.7", - "postcss-sort-media-queries": "^4.2.1" + "cssnano-preset-advanced": "^5.3.8", + "postcss": "^8.4.14", + "postcss-sort-media-queries": "^4.2.1", + "tslib": "^2.4.0" } }, "@docusaurus/logger": { - "version": "2.0.0-beta.17", - "resolved": "https://registry.npmjs.org/@docusaurus/logger/-/logger-2.0.0-beta.17.tgz", - "integrity": "sha512-F9JDl06/VLg+ylsvnq9NpILSUeWtl0j4H2LtlLzX5gufEL4dGiCMlnUzYdHl7FSHSzYJ0A/R7vu0SYofsexC4w==", + "version": "2.4.3", + "resolved": "https://registry.npmjs.org/@docusaurus/logger/-/logger-2.4.3.tgz", + "integrity": "sha512-Zxws7r3yLufk9xM1zq9ged0YHs65mlRmtsobnFkdZTxWXdTYlWWLWdKyNKAsVC+D7zg+pv2fGbyabdOnyZOM3w==", "requires": { "chalk": "^4.1.2", - "tslib": "^2.3.1" + "tslib": "^2.4.0" }, "dependencies": { "ansi-styles": { @@ -16705,746 +17149,385 @@ } }, "@docusaurus/mdx-loader": { - "version": "2.0.0-beta.17", - "resolved": "https://registry.npmjs.org/@docusaurus/mdx-loader/-/mdx-loader-2.0.0-beta.17.tgz", - "integrity": "sha512-AhJ3GWRmjQYCyINHE595pff5tn3Rt83oGpdev5UT9uvG9lPYPC8nEmh1LI6c0ogfw7YkNznzxWSW4hyyVbYQ3A==", - "requires": { - "@babel/parser": "^7.17.3", - "@babel/traverse": "^7.17.3", - "@docusaurus/logger": "2.0.0-beta.17", - "@docusaurus/utils": "2.0.0-beta.17", + "version": "2.4.3", + "resolved": "https://registry.npmjs.org/@docusaurus/mdx-loader/-/mdx-loader-2.4.3.tgz", + "integrity": "sha512-b1+fDnWtl3GiqkL0BRjYtc94FZrcDDBV1j8446+4tptB9BAOlePwG2p/pK6vGvfL53lkOsszXMghr2g67M0vCw==", + "requires": { + "@babel/parser": "^7.18.8", + "@babel/traverse": "^7.18.8", + "@docusaurus/logger": "2.4.3", + "@docusaurus/utils": "2.4.3", "@mdx-js/mdx": "^1.6.22", "escape-html": "^1.0.3", "file-loader": "^6.2.0", - "fs-extra": "^10.0.1", + "fs-extra": "^10.1.0", "image-size": "^1.0.1", "mdast-util-to-string": "^2.0.0", - "remark-emoji": "^2.1.0", + "remark-emoji": "^2.2.0", "stringify-object": "^3.3.0", - "tslib": "^2.3.1", - "unist-util-visit": "^2.0.2", + "tslib": "^2.4.0", + "unified": "^9.2.2", + "unist-util-visit": "^2.0.3", "url-loader": "^4.1.1", - "webpack": "^5.69.1" + "webpack": "^5.73.0" } }, "@docusaurus/module-type-aliases": { - "version": "2.0.0-beta.17", - "resolved": "https://registry.npmjs.org/@docusaurus/module-type-aliases/-/module-type-aliases-2.0.0-beta.17.tgz", - "integrity": "sha512-Tu+8geC/wyygBudbSwvWIHEvt5RwyA7dEoE1JmPbgQtmqUxOZ9bgnfemwXpJW5mKuDiJASbN4of1DhbLqf4sPg==", + "version": "2.4.3", + "resolved": "https://registry.npmjs.org/@docusaurus/module-type-aliases/-/module-type-aliases-2.4.3.tgz", + "integrity": "sha512-cwkBkt1UCiduuvEAo7XZY01dJfRn7UR/75mBgOdb1hKknhrabJZ8YH+7savd/y9kLExPyrhe0QwdS9GuzsRRIA==", "requires": { - "@docusaurus/types": "2.0.0-beta.17", + "@docusaurus/react-loadable": "5.5.2", + "@docusaurus/types": "2.4.3", + "@types/history": "^4.7.11", "@types/react": "*", "@types/react-router-config": "*", "@types/react-router-dom": "*", - "react-helmet-async": "*" + "react-helmet-async": "*", + "react-loadable": "npm:@docusaurus/react-loadable@5.5.2" + }, + "dependencies": { + "react-loadable": { + "version": "npm:@docusaurus/react-loadable@5.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/react-loadable/-/react-loadable-5.5.2.tgz", + "integrity": "sha512-A3dYjdBGuy0IGT+wyLIGIKLRE+sAk1iNk0f1HjNDysO7u8lhL4N3VEm+FAubmJbAztn94F7MxBTPmnixbiyFdQ==", + "requires": { + "@types/react": "*", + "prop-types": "^15.6.2" + } + } } }, "@docusaurus/plugin-content-blog": { - "version": "2.0.0-beta.17", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-blog/-/plugin-content-blog-2.0.0-beta.17.tgz", - "integrity": "sha512-gcX4UR+WKT4bhF8FICBQHy+ESS9iRMeaglSboTZbA/YHGax/3EuZtcPU3dU4E/HFJeZ866wgUdbLKpIpsZOidg==", - "requires": { - "@docusaurus/core": "2.0.0-beta.17", - "@docusaurus/logger": "2.0.0-beta.17", - "@docusaurus/mdx-loader": "2.0.0-beta.17", - "@docusaurus/utils": "2.0.0-beta.17", - "@docusaurus/utils-common": "2.0.0-beta.17", - "@docusaurus/utils-validation": "2.0.0-beta.17", - "cheerio": "^1.0.0-rc.10", + "version": "2.4.3", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-blog/-/plugin-content-blog-2.4.3.tgz", + "integrity": "sha512-PVhypqaA0t98zVDpOeTqWUTvRqCEjJubtfFUQ7zJNYdbYTbS/E/ytq6zbLVsN/dImvemtO/5JQgjLxsh8XLo8Q==", + "requires": { + "@docusaurus/core": "2.4.3", + "@docusaurus/logger": "2.4.3", + "@docusaurus/mdx-loader": "2.4.3", + "@docusaurus/types": "2.4.3", + "@docusaurus/utils": "2.4.3", + "@docusaurus/utils-common": "2.4.3", + "@docusaurus/utils-validation": "2.4.3", + "cheerio": "^1.0.0-rc.12", "feed": "^4.2.2", - "fs-extra": "^10.0.1", + "fs-extra": "^10.1.0", "lodash": "^4.17.21", "reading-time": "^1.5.0", - "remark-admonitions": "^1.2.1", - "tslib": "^2.3.1", + "tslib": "^2.4.0", + "unist-util-visit": "^2.0.3", "utility-types": "^3.10.0", - "webpack": "^5.69.1" + "webpack": "^5.73.0" } }, "@docusaurus/plugin-content-docs": { - "version": "2.0.0-beta.17", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-docs/-/plugin-content-docs-2.0.0-beta.17.tgz", - "integrity": "sha512-YYrBpuRfTfE6NtENrpSHTJ7K7PZifn6j6hcuvdC0QKE+WD8pS+O2/Ws30yoyvHwLnAnfhvaderh1v9Kaa0/ANg==", - "requires": { - "@docusaurus/core": "2.0.0-beta.17", - "@docusaurus/logger": "2.0.0-beta.17", - "@docusaurus/mdx-loader": "2.0.0-beta.17", - "@docusaurus/utils": "2.0.0-beta.17", - "@docusaurus/utils-validation": "2.0.0-beta.17", + "version": "2.4.3", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-docs/-/plugin-content-docs-2.4.3.tgz", + "integrity": "sha512-N7Po2LSH6UejQhzTCsvuX5NOzlC+HiXOVvofnEPj0WhMu1etpLEXE6a4aTxrtg95lQ5kf0xUIdjX9sh3d3G76A==", + "requires": { + "@docusaurus/core": "2.4.3", + "@docusaurus/logger": "2.4.3", + "@docusaurus/mdx-loader": "2.4.3", + "@docusaurus/module-type-aliases": "2.4.3", + "@docusaurus/types": "2.4.3", + "@docusaurus/utils": "2.4.3", + "@docusaurus/utils-validation": "2.4.3", + "@types/react-router-config": "^5.0.6", "combine-promises": "^1.1.0", - "fs-extra": "^10.0.1", + "fs-extra": "^10.1.0", "import-fresh": "^3.3.0", "js-yaml": "^4.1.0", "lodash": "^4.17.21", - "remark-admonitions": "^1.2.1", - "tslib": "^2.3.1", + "tslib": "^2.4.0", "utility-types": "^3.10.0", - "webpack": "^5.69.1" + "webpack": "^5.73.0" } }, "@docusaurus/plugin-content-pages": { - "version": "2.0.0-beta.17", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-pages/-/plugin-content-pages-2.0.0-beta.17.tgz", - "integrity": "sha512-d5x0mXTMJ44ojRQccmLyshYoamFOep2AnBe69osCDnwWMbD3Or3pnc2KMK9N7mVpQFnNFKbHNCLrX3Rv0uwEHA==", - "requires": { - "@docusaurus/core": "2.0.0-beta.17", - "@docusaurus/mdx-loader": "2.0.0-beta.17", - "@docusaurus/utils": "2.0.0-beta.17", - "@docusaurus/utils-validation": "2.0.0-beta.17", - "fs-extra": "^10.0.1", - "remark-admonitions": "^1.2.1", - "tslib": "^2.3.1", - "webpack": "^5.69.1" + "version": "2.4.3", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-pages/-/plugin-content-pages-2.4.3.tgz", + "integrity": "sha512-txtDVz7y3zGk67q0HjG0gRttVPodkHqE0bpJ+7dOaTH40CQFLSh7+aBeGnPOTl+oCPG+hxkim4SndqPqXjQ8Bg==", + "requires": { + "@docusaurus/core": "2.4.3", + "@docusaurus/mdx-loader": "2.4.3", + "@docusaurus/types": "2.4.3", + "@docusaurus/utils": "2.4.3", + "@docusaurus/utils-validation": "2.4.3", + "fs-extra": "^10.1.0", + "tslib": "^2.4.0", + "webpack": "^5.73.0" } }, "@docusaurus/plugin-debug": { - "version": "2.0.0-beta.17", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-debug/-/plugin-debug-2.0.0-beta.17.tgz", - "integrity": "sha512-p26fjYFRSC0esEmKo/kRrLVwXoFnzPCFDumwrImhPyqfVxbj+IKFaiXkayb2qHnyEGE/1KSDIgRF4CHt/pyhiw==", + "version": "2.4.3", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-debug/-/plugin-debug-2.4.3.tgz", + "integrity": "sha512-LkUbuq3zCmINlFb+gAd4ZvYr+bPAzMC0hwND4F7V9bZ852dCX8YoWyovVUBKq4er1XsOwSQaHmNGtObtn8Av8Q==", "requires": { - "@docusaurus/core": "2.0.0-beta.17", - "@docusaurus/utils": "2.0.0-beta.17", - "fs-extra": "^10.0.1", + "@docusaurus/core": "2.4.3", + "@docusaurus/types": "2.4.3", + "@docusaurus/utils": "2.4.3", + "fs-extra": "^10.1.0", "react-json-view": "^1.21.3", - "tslib": "^2.3.1" + "tslib": "^2.4.0" } }, "@docusaurus/plugin-google-analytics": { - "version": "2.0.0-beta.17", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-analytics/-/plugin-google-analytics-2.0.0-beta.17.tgz", - "integrity": "sha512-jvgYIhggYD1W2jymqQVAAyjPJUV1xMCn70bAzaCMxriureMWzhQ/kQMVQpop0ijTMvifOxaV9yTcL1VRXev++A==", + "version": "2.4.3", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-analytics/-/plugin-google-analytics-2.4.3.tgz", + "integrity": "sha512-KzBV3k8lDkWOhg/oYGxlK5o9bOwX7KpPc/FTWoB+SfKhlHfhq7qcQdMi1elAaVEIop8tgK6gD1E58Q+XC6otSQ==", "requires": { - "@docusaurus/core": "2.0.0-beta.17", - "@docusaurus/utils-validation": "2.0.0-beta.17", - "tslib": "^2.3.1" + "@docusaurus/core": "2.4.3", + "@docusaurus/types": "2.4.3", + "@docusaurus/utils-validation": "2.4.3", + "tslib": "^2.4.0" } }, "@docusaurus/plugin-google-gtag": { - "version": "2.0.0-beta.17", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-gtag/-/plugin-google-gtag-2.0.0-beta.17.tgz", - "integrity": "sha512-1pnWHtIk1Jfeqwvr8PlcPE5SODWT1gW4TI+ptmJbJ296FjjyvL/pG0AcGEJmYLY/OQc3oz0VQ0W2ognw9jmFIw==", + "version": "2.4.3", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-gtag/-/plugin-google-gtag-2.4.3.tgz", + "integrity": "sha512-5FMg0rT7sDy4i9AGsvJC71MQrqQZwgLNdDetLEGDHLfSHLvJhQbTCUGbGXknUgWXQJckcV/AILYeJy+HhxeIFA==", "requires": { - "@docusaurus/core": "2.0.0-beta.17", - "@docusaurus/utils-validation": "2.0.0-beta.17", - "tslib": "^2.3.1" - } - }, - "@docusaurus/plugin-sitemap": { - "version": "2.0.0-beta.17", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-sitemap/-/plugin-sitemap-2.0.0-beta.17.tgz", - "integrity": "sha512-19/PaGCsap6cjUPZPGs87yV9e1hAIyd0CTSeVV6Caega8nmOKk20FTrQGFJjZPeX8jvD9QIXcdg6BJnPxcKkaQ==", - "requires": { - "@docusaurus/core": "2.0.0-beta.17", - "@docusaurus/utils": "2.0.0-beta.17", - "@docusaurus/utils-common": "2.0.0-beta.17", - "@docusaurus/utils-validation": "2.0.0-beta.17", - "fs-extra": "^10.0.1", - "sitemap": "^7.1.1", - "tslib": "^2.3.1" - } - }, - "@docusaurus/preset-classic": { - "version": "2.0.0-beta.17", - "resolved": "https://registry.npmjs.org/@docusaurus/preset-classic/-/preset-classic-2.0.0-beta.17.tgz", - "integrity": "sha512-7YUxPEgM09aZWr25/hpDEp1gPl+1KsCPV1ZTRW43sbQ9TinPm+9AKR3rHVDa8ea8MdiS7BpqCVyK+H/eiyQrUw==", - "requires": { - "@docusaurus/core": "2.0.0-beta.17", - "@docusaurus/plugin-content-blog": "2.0.0-beta.17", - "@docusaurus/plugin-content-docs": "2.0.0-beta.17", - "@docusaurus/plugin-content-pages": "2.0.0-beta.17", - "@docusaurus/plugin-debug": "2.0.0-beta.17", - "@docusaurus/plugin-google-analytics": "2.0.0-beta.17", - "@docusaurus/plugin-google-gtag": "2.0.0-beta.17", - "@docusaurus/plugin-sitemap": "2.0.0-beta.17", - "@docusaurus/theme-classic": "2.0.0-beta.17", - "@docusaurus/theme-common": "2.0.0-beta.17", - "@docusaurus/theme-search-algolia": "2.0.0-beta.17" - }, - "dependencies": { - "@docusaurus/theme-search-algolia": { - "version": "2.0.0-beta.17", - "resolved": "https://registry.npmjs.org/@docusaurus/theme-search-algolia/-/theme-search-algolia-2.0.0-beta.17.tgz", - "integrity": "sha512-W12XKM7QC5Jmrec359bJ7aDp5U8DNkCxjVKsMNIs8rDunBoI/N+R35ERJ0N7Bg9ONAWO6o7VkUERQsfGqdvr9w==", - "requires": { - "@docsearch/react": "^3.0.0", - "@docusaurus/core": "2.0.0-beta.17", - "@docusaurus/logger": "2.0.0-beta.17", - "@docusaurus/theme-common": "2.0.0-beta.17", - "@docusaurus/theme-translations": "2.0.0-beta.17", - "@docusaurus/utils": "2.0.0-beta.17", - "@docusaurus/utils-validation": "2.0.0-beta.17", - "algoliasearch": "^4.12.1", - "algoliasearch-helper": "^3.7.0", - "clsx": "^1.1.1", - "eta": "^1.12.3", - "fs-extra": "^10.0.1", - "lodash": "^4.17.21", - "tslib": "^2.3.1", - "utility-types": "^3.10.0" - } - } - } - }, - "@docusaurus/react-loadable": { - "version": "5.5.2", - "resolved": "https://registry.npmjs.org/@docusaurus/react-loadable/-/react-loadable-5.5.2.tgz", - "integrity": "sha512-A3dYjdBGuy0IGT+wyLIGIKLRE+sAk1iNk0f1HjNDysO7u8lhL4N3VEm+FAubmJbAztn94F7MxBTPmnixbiyFdQ==", - "requires": { - "@types/react": "*", - "prop-types": "^15.6.2" - } - }, - "@docusaurus/theme-classic": { - "version": "2.0.0-beta.17", - "resolved": "https://registry.npmjs.org/@docusaurus/theme-classic/-/theme-classic-2.0.0-beta.17.tgz", - "integrity": "sha512-xfZ9kpgqo0lP9YO4rJj79wtiQJXU6ARo5wYy10IIwiWN+lg00scJHhkmNV431b05xIUjUr0cKeH9nqZmEsQRKg==", - "requires": { - "@docusaurus/core": "2.0.0-beta.17", - "@docusaurus/plugin-content-blog": "2.0.0-beta.17", - "@docusaurus/plugin-content-docs": "2.0.0-beta.17", - "@docusaurus/plugin-content-pages": "2.0.0-beta.17", - "@docusaurus/theme-common": "2.0.0-beta.17", - "@docusaurus/theme-translations": "2.0.0-beta.17", - "@docusaurus/utils": "2.0.0-beta.17", - "@docusaurus/utils-common": "2.0.0-beta.17", - "@docusaurus/utils-validation": "2.0.0-beta.17", - "@mdx-js/react": "^1.6.22", - "clsx": "^1.1.1", - "copy-text-to-clipboard": "^3.0.1", - "infima": "0.2.0-alpha.37", - "lodash": "^4.17.21", - "postcss": "^8.4.7", - "prism-react-renderer": "^1.2.1", - "prismjs": "^1.27.0", - "react-router-dom": "^5.2.0", - "rtlcss": "^3.3.0" - } - }, - "@docusaurus/theme-common": { - "version": "2.0.0-beta.17", - "resolved": "https://registry.npmjs.org/@docusaurus/theme-common/-/theme-common-2.0.0-beta.17.tgz", - "integrity": "sha512-LJBDhx+Qexn1JHBqZbE4k+7lBaV1LgpE33enXf43ShB7ebhC91d5HLHhBwgt0pih4+elZU4rG+BG/roAmsNM0g==", - "requires": { - "@docusaurus/module-type-aliases": "2.0.0-beta.17", - "@docusaurus/plugin-content-blog": "2.0.0-beta.17", - "@docusaurus/plugin-content-docs": "2.0.0-beta.17", - "@docusaurus/plugin-content-pages": "2.0.0-beta.17", - "clsx": "^1.1.1", - "parse-numeric-range": "^1.3.0", - "prism-react-renderer": "^1.3.1", - "tslib": "^2.3.1", - "utility-types": "^3.10.0" - } - }, - "@docusaurus/theme-search-algolia": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/@docusaurus/theme-search-algolia/-/theme-search-algolia-2.1.0.tgz", - "integrity": "sha512-rNBvi35VvENhucslEeVPOtbAzBdZY/9j55gdsweGV5bYoAXy4mHB6zTGjealcB4pJ6lJY4a5g75fXXMOlUqPfg==", - "requires": { - "@docsearch/react": "^3.1.1", - "@docusaurus/core": "2.1.0", - "@docusaurus/logger": "2.1.0", - "@docusaurus/plugin-content-docs": "2.1.0", - "@docusaurus/theme-common": "2.1.0", - "@docusaurus/theme-translations": "2.1.0", - "@docusaurus/utils": "2.1.0", - "@docusaurus/utils-validation": "2.1.0", - "algoliasearch": "^4.13.1", - "algoliasearch-helper": "^3.10.0", - "clsx": "^1.2.1", - "eta": "^1.12.3", - "fs-extra": "^10.1.0", - "lodash": "^4.17.21", - "tslib": "^2.4.0", - "utility-types": "^3.10.0" - }, - "dependencies": { - "@docusaurus/core": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/@docusaurus/core/-/core-2.1.0.tgz", - "integrity": "sha512-/ZJ6xmm+VB9Izbn0/s6h6289cbPy2k4iYFwWDhjiLsVqwa/Y0YBBcXvStfaHccudUC3OfP+26hMk7UCjc50J6Q==", - "requires": { - "@babel/core": "^7.18.6", - "@babel/generator": "^7.18.7", - "@babel/plugin-syntax-dynamic-import": "^7.8.3", - "@babel/plugin-transform-runtime": "^7.18.6", - "@babel/preset-env": "^7.18.6", - "@babel/preset-react": "^7.18.6", - "@babel/preset-typescript": "^7.18.6", - "@babel/runtime": "^7.18.6", - "@babel/runtime-corejs3": "^7.18.6", - "@babel/traverse": "^7.18.8", - "@docusaurus/cssnano-preset": "2.1.0", - "@docusaurus/logger": "2.1.0", - "@docusaurus/mdx-loader": "2.1.0", - "@docusaurus/react-loadable": "5.5.2", - "@docusaurus/utils": "2.1.0", - "@docusaurus/utils-common": "2.1.0", - "@docusaurus/utils-validation": "2.1.0", - "@slorber/static-site-generator-webpack-plugin": "^4.0.7", - "@svgr/webpack": "^6.2.1", - "autoprefixer": "^10.4.7", - "babel-loader": "^8.2.5", - "babel-plugin-dynamic-import-node": "^2.3.3", - "boxen": "^6.2.1", - "chalk": "^4.1.2", - "chokidar": "^3.5.3", - "clean-css": "^5.3.0", - "cli-table3": "^0.6.2", - "combine-promises": "^1.1.0", - "commander": "^5.1.0", - "copy-webpack-plugin": "^11.0.0", - "core-js": "^3.23.3", - "css-loader": "^6.7.1", - "css-minimizer-webpack-plugin": "^4.0.0", - "cssnano": "^5.1.12", - "del": "^6.1.1", - "detect-port": "^1.3.0", - "escape-html": "^1.0.3", - "eta": "^1.12.3", - "file-loader": "^6.2.0", - "fs-extra": "^10.1.0", - "html-minifier-terser": "^6.1.0", - "html-tags": "^3.2.0", - "html-webpack-plugin": "^5.5.0", - "import-fresh": "^3.3.0", - "leven": "^3.1.0", - "lodash": "^4.17.21", - "mini-css-extract-plugin": "^2.6.1", - "postcss": "^8.4.14", - "postcss-loader": "^7.0.0", - "prompts": "^2.4.2", - "react-dev-utils": "^12.0.1", - "react-helmet-async": "^1.3.0", - "react-loadable": "npm:@docusaurus/react-loadable@5.5.2", - "react-loadable-ssr-addon-v5-slorber": "^1.0.1", - "react-router": "^5.3.3", - "react-router-config": "^5.1.1", - "react-router-dom": "^5.3.3", - "rtl-detect": "^1.0.4", - "semver": "^7.3.7", - "serve-handler": "^6.1.3", - "shelljs": "^0.8.5", - "terser-webpack-plugin": "^5.3.3", - "tslib": "^2.4.0", - "update-notifier": "^5.1.0", - "url-loader": "^4.1.1", - "wait-on": "^6.0.1", - "webpack": "^5.73.0", - "webpack-bundle-analyzer": "^4.5.0", - "webpack-dev-server": "^4.9.3", - "webpack-merge": "^5.8.0", - "webpackbar": "^5.0.2" - } - }, - "@docusaurus/cssnano-preset": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/@docusaurus/cssnano-preset/-/cssnano-preset-2.1.0.tgz", - "integrity": "sha512-pRLewcgGhOies6pzsUROfmPStDRdFw+FgV5sMtLr5+4Luv2rty5+b/eSIMMetqUsmg3A9r9bcxHk9bKAKvx3zQ==", - "requires": { - "cssnano-preset-advanced": "^5.3.8", - "postcss": "^8.4.14", - "postcss-sort-media-queries": "^4.2.1", - "tslib": "^2.4.0" - } - }, - "@docusaurus/logger": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/@docusaurus/logger/-/logger-2.1.0.tgz", - "integrity": "sha512-uuJx2T6hDBg82joFeyobywPjSOIfeq05GfyKGHThVoXuXsu1KAzMDYcjoDxarb9CoHCI/Dor8R2MoL6zII8x1Q==", - "requires": { - "chalk": "^4.1.2", - "tslib": "^2.4.0" - } - }, - "@docusaurus/mdx-loader": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/@docusaurus/mdx-loader/-/mdx-loader-2.1.0.tgz", - "integrity": "sha512-i97hi7hbQjsD3/8OSFhLy7dbKGH8ryjEzOfyhQIn2CFBYOY3ko0vMVEf3IY9nD3Ld7amYzsZ8153RPkcnXA+Lg==", - "requires": { - "@babel/parser": "^7.18.8", - "@babel/traverse": "^7.18.8", - "@docusaurus/logger": "2.1.0", - "@docusaurus/utils": "2.1.0", - "@mdx-js/mdx": "^1.6.22", - "escape-html": "^1.0.3", - "file-loader": "^6.2.0", - "fs-extra": "^10.1.0", - "image-size": "^1.0.1", - "mdast-util-to-string": "^2.0.0", - "remark-emoji": "^2.2.0", - "stringify-object": "^3.3.0", - "tslib": "^2.4.0", - "unified": "^9.2.2", - "unist-util-visit": "^2.0.3", - "url-loader": "^4.1.1", - "webpack": "^5.73.0" - } - }, - "@docusaurus/module-type-aliases": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/@docusaurus/module-type-aliases/-/module-type-aliases-2.1.0.tgz", - "integrity": "sha512-Z8WZaK5cis3xEtyfOT817u9xgGUauT0PuuVo85ysnFRX8n7qLN1lTPCkC+aCmFm/UcV8h/W5T4NtIsst94UntQ==", - "requires": { - "@docusaurus/react-loadable": "5.5.2", - "@docusaurus/types": "2.1.0", - "@types/history": "^4.7.11", - "@types/react": "*", - "@types/react-router-config": "*", - "@types/react-router-dom": "*", - "react-helmet-async": "*", - "react-loadable": "npm:@docusaurus/react-loadable@5.5.2" - } - }, - "@docusaurus/plugin-content-blog": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-blog/-/plugin-content-blog-2.1.0.tgz", - "integrity": "sha512-xEp6jlu92HMNUmyRBEeJ4mCW1s77aAEQO4Keez94cUY/Ap7G/r0Awa6xSLff7HL0Fjg8KK1bEbDy7q9voIavdg==", - "requires": { - "@docusaurus/core": "2.1.0", - "@docusaurus/logger": "2.1.0", - "@docusaurus/mdx-loader": "2.1.0", - "@docusaurus/types": "2.1.0", - "@docusaurus/utils": "2.1.0", - "@docusaurus/utils-common": "2.1.0", - "@docusaurus/utils-validation": "2.1.0", - "cheerio": "^1.0.0-rc.12", - "feed": "^4.2.2", - "fs-extra": "^10.1.0", - "lodash": "^4.17.21", - "reading-time": "^1.5.0", - "tslib": "^2.4.0", - "unist-util-visit": "^2.0.3", - "utility-types": "^3.10.0", - "webpack": "^5.73.0" - } - }, - "@docusaurus/plugin-content-docs": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-docs/-/plugin-content-docs-2.1.0.tgz", - "integrity": "sha512-Rup5pqXrXlKGIC4VgwvioIhGWF7E/NNSlxv+JAxRYpik8VKlWsk9ysrdHIlpX+KJUCO9irnY21kQh2814mlp/Q==", - "requires": { - "@docusaurus/core": "2.1.0", - "@docusaurus/logger": "2.1.0", - "@docusaurus/mdx-loader": "2.1.0", - "@docusaurus/module-type-aliases": "2.1.0", - "@docusaurus/types": "2.1.0", - "@docusaurus/utils": "2.1.0", - "@docusaurus/utils-validation": "2.1.0", - "@types/react-router-config": "^5.0.6", - "combine-promises": "^1.1.0", - "fs-extra": "^10.1.0", - "import-fresh": "^3.3.0", - "js-yaml": "^4.1.0", - "lodash": "^4.17.21", - "tslib": "^2.4.0", - "utility-types": "^3.10.0", - "webpack": "^5.73.0" - } - }, - "@docusaurus/plugin-content-pages": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-pages/-/plugin-content-pages-2.1.0.tgz", - "integrity": "sha512-SwZdDZRlObHNKXTnFo7W2aF6U5ZqNVI55Nw2GCBryL7oKQSLeI0lsrMlMXdzn+fS7OuBTd3MJBO1T4Zpz0i/+g==", - "requires": { - "@docusaurus/core": "2.1.0", - "@docusaurus/mdx-loader": "2.1.0", - "@docusaurus/types": "2.1.0", - "@docusaurus/utils": "2.1.0", - "@docusaurus/utils-validation": "2.1.0", - "fs-extra": "^10.1.0", - "tslib": "^2.4.0", - "webpack": "^5.73.0" - } - }, - "@docusaurus/theme-common": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/@docusaurus/theme-common/-/theme-common-2.1.0.tgz", - "integrity": "sha512-vT1otpVPbKux90YpZUnvknsn5zvpLf+AW1W0EDcpE9up4cDrPqfsh0QoxGHFJnobE2/qftsBFC19BneN4BH8Ag==", - "requires": { - "@docusaurus/mdx-loader": "2.1.0", - "@docusaurus/module-type-aliases": "2.1.0", - "@docusaurus/plugin-content-blog": "2.1.0", - "@docusaurus/plugin-content-docs": "2.1.0", - "@docusaurus/plugin-content-pages": "2.1.0", - "@docusaurus/utils": "2.1.0", - "@types/history": "^4.7.11", - "@types/react": "*", - "@types/react-router-config": "*", - "clsx": "^1.2.1", - "parse-numeric-range": "^1.3.0", - "prism-react-renderer": "^1.3.5", - "tslib": "^2.4.0", - "utility-types": "^3.10.0" - } - }, - "@docusaurus/theme-translations": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/@docusaurus/theme-translations/-/theme-translations-2.1.0.tgz", - "integrity": "sha512-07n2akf2nqWvtJeMy3A+7oSGMuu5F673AovXVwY0aGAux1afzGCiqIFlYW3EP0CujvDJAEFSQi/Tetfh+95JNg==", - "requires": { - "fs-extra": "^10.1.0", - "tslib": "^2.4.0" - } - }, - "@docusaurus/types": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/@docusaurus/types/-/types-2.1.0.tgz", - "integrity": "sha512-BS1ebpJZnGG6esKqsjtEC9U9qSaPylPwlO7cQ1GaIE7J/kMZI3FITnNn0otXXu7c7ZTqhb6+8dOrG6fZn6fqzQ==", - "requires": { - "@types/history": "^4.7.11", - "@types/react": "*", - "commander": "^5.1.0", - "joi": "^17.6.0", - "react-helmet-async": "^1.3.0", - "utility-types": "^3.10.0", - "webpack": "^5.73.0", - "webpack-merge": "^5.8.0" - } - }, - "@docusaurus/utils": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/@docusaurus/utils/-/utils-2.1.0.tgz", - "integrity": "sha512-fPvrfmAuC54n8MjZuG4IysaMdmvN5A/qr7iFLbSGSyDrsbP4fnui6KdZZIa/YOLIPLec8vjZ8RIITJqF18mx4A==", - "requires": { - "@docusaurus/logger": "2.1.0", - "@svgr/webpack": "^6.2.1", - "file-loader": "^6.2.0", - "fs-extra": "^10.1.0", - "github-slugger": "^1.4.0", - "globby": "^11.1.0", - "gray-matter": "^4.0.3", - "js-yaml": "^4.1.0", - "lodash": "^4.17.21", - "micromatch": "^4.0.5", - "resolve-pathname": "^3.0.0", - "shelljs": "^0.8.5", - "tslib": "^2.4.0", - "url-loader": "^4.1.1", - "webpack": "^5.73.0" - } - }, - "@docusaurus/utils-common": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/@docusaurus/utils-common/-/utils-common-2.1.0.tgz", - "integrity": "sha512-F2vgmt4yRFgRQR2vyEFGTWeyAdmgKbtmu3sjHObF0tjjx/pN0Iw/c6eCopaH34E6tc9nO0nvp01pwW+/86d1fg==", - "requires": { - "tslib": "^2.4.0" - } - }, - "@docusaurus/utils-validation": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/@docusaurus/utils-validation/-/utils-validation-2.1.0.tgz", - "integrity": "sha512-AMJzWYKL3b7FLltKtDXNLO9Y649V2BXvrnRdnW2AA+PpBnYV78zKLSCz135cuWwRj1ajNtP4onbXdlnyvCijGQ==", - "requires": { - "@docusaurus/logger": "2.1.0", - "@docusaurus/utils": "2.1.0", - "joi": "^17.6.0", - "js-yaml": "^4.1.0", - "tslib": "^2.4.0" - } - }, - "ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "requires": { - "color-convert": "^2.0.1" - } - }, - "babel-plugin-dynamic-import-node": { - "version": "2.3.3", - "resolved": "https://registry.npmjs.org/babel-plugin-dynamic-import-node/-/babel-plugin-dynamic-import-node-2.3.3.tgz", - "integrity": "sha512-jZVI+s9Zg3IqA/kdi0i6UDCybUI3aSBLnglhYbSSjKlV7yF1F/5LWv8MakQmvYpnbJDS6fcBL2KzHSxNCMtWSQ==", - "requires": { - "object.assign": "^4.1.0" - } - }, - "chalk": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", - "requires": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - } - }, - "color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "requires": { - "color-name": "~1.1.4" - } - }, - "color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" - }, - "copy-webpack-plugin": { - "version": "11.0.0", - "resolved": "https://registry.npmjs.org/copy-webpack-plugin/-/copy-webpack-plugin-11.0.0.tgz", - "integrity": "sha512-fX2MWpamkW0hZxMEg0+mYnA40LTosOSa5TqZ9GYIBzyJa9C3QUaMPSE2xAi/buNr8u89SfD9wHSQVBzrRa/SOQ==", - "requires": { - "fast-glob": "^3.2.11", - "glob-parent": "^6.0.1", - "globby": "^13.1.1", - "normalize-path": "^3.0.0", - "schema-utils": "^4.0.0", - "serialize-javascript": "^6.0.0" - }, - "dependencies": { - "globby": { - "version": "13.1.2", - "resolved": "https://registry.npmjs.org/globby/-/globby-13.1.2.tgz", - "integrity": "sha512-LKSDZXToac40u8Q1PQtZihbNdTYSNMuWe+K5l+oa6KgDzSvVrHXlJy40hUP522RjAIoNLJYBJi7ow+rbFpIhHQ==", - "requires": { - "dir-glob": "^3.0.1", - "fast-glob": "^3.2.11", - "ignore": "^5.2.0", - "merge2": "^1.4.1", - "slash": "^4.0.0" - } - } - } - }, - "css-minimizer-webpack-plugin": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/css-minimizer-webpack-plugin/-/css-minimizer-webpack-plugin-4.1.0.tgz", - "integrity": "sha512-Zd+yz4nta4GXi3pMqF6skO8kjzuCUbr62z8SLMGZZtxWxTGTLopOiabPGNDEyjHCRhnhdA1EfHmqLa2Oekjtng==", - "requires": { - "cssnano": "^5.1.8", - "jest-worker": "^27.5.1", - "postcss": "^8.4.13", - "schema-utils": "^4.0.0", - "serialize-javascript": "^6.0.0", - "source-map": "^0.6.1" - } - }, - "glob-parent": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", - "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", - "requires": { - "is-glob": "^4.0.3" - } - }, - "has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==" - }, - "postcss-loader": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/postcss-loader/-/postcss-loader-7.0.1.tgz", - "integrity": "sha512-VRviFEyYlLjctSM93gAZtcJJ/iSkPZ79zWbN/1fSH+NisBByEiVLqpdVDrPLVSi8DX0oJo12kL/GppTBdKVXiQ==", - "requires": { - "cosmiconfig": "^7.0.0", - "klona": "^2.0.5", - "semver": "^7.3.7" - } - }, - "schema-utils": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.0.0.tgz", - "integrity": "sha512-1edyXKgh6XnJsJSQ8mKWXnN/BVaIbFMLpouRUrXgVq7WYne5kw3MW7UPhO44uRXQSIpTSXoJbmrR2X0w9kUTyg==", - "requires": { - "@types/json-schema": "^7.0.9", - "ajv": "^8.8.0", - "ajv-formats": "^2.1.1", - "ajv-keywords": "^5.0.0" - } - }, - "slash": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/slash/-/slash-4.0.0.tgz", - "integrity": "sha512-3dOsAHXXUkQTpOYcoAxLIorMTp4gIQr5IW3iVb7A7lFIp0VHhnynm9izx6TssdrIcVIESAlVjtnO2K8bg+Coew==" - }, - "supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "requires": { - "has-flag": "^4.0.0" - } - }, - "unified": { - "version": "9.2.2", - "resolved": "https://registry.npmjs.org/unified/-/unified-9.2.2.tgz", - "integrity": "sha512-Sg7j110mtefBD+qunSLO1lqOEKdrwBFBrR6Qd8f4uwkhWNlbkaqwHse6e7QvD3AP/MNoJdEDLaf8OxYyoWgorQ==", - "requires": { - "bail": "^1.0.0", - "extend": "^3.0.0", - "is-buffer": "^2.0.0", - "is-plain-obj": "^2.0.0", - "trough": "^1.0.0", - "vfile": "^4.0.0" - } - } + "@docusaurus/core": "2.4.3", + "@docusaurus/types": "2.4.3", + "@docusaurus/utils-validation": "2.4.3", + "tslib": "^2.4.0" + } + }, + "@docusaurus/plugin-google-tag-manager": { + "version": "2.4.3", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-tag-manager/-/plugin-google-tag-manager-2.4.3.tgz", + "integrity": "sha512-1jTzp71yDGuQiX9Bi0pVp3alArV0LSnHXempvQTxwCGAEzUWWaBg4d8pocAlTpbP9aULQQqhgzrs8hgTRPOM0A==", + "requires": { + "@docusaurus/core": "2.4.3", + "@docusaurus/types": "2.4.3", + "@docusaurus/utils-validation": "2.4.3", + "tslib": "^2.4.0" + } + }, + "@docusaurus/plugin-sitemap": { + "version": "2.4.3", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-sitemap/-/plugin-sitemap-2.4.3.tgz", + "integrity": "sha512-LRQYrK1oH1rNfr4YvWBmRzTL0LN9UAPxBbghgeFRBm5yloF6P+zv1tm2pe2hQTX/QP5bSKdnajCvfnScgKXMZQ==", + "requires": { + "@docusaurus/core": "2.4.3", + "@docusaurus/logger": "2.4.3", + "@docusaurus/types": "2.4.3", + "@docusaurus/utils": "2.4.3", + "@docusaurus/utils-common": "2.4.3", + "@docusaurus/utils-validation": "2.4.3", + "fs-extra": "^10.1.0", + "sitemap": "^7.1.1", + "tslib": "^2.4.0" + } + }, + "@docusaurus/preset-classic": { + "version": "2.4.3", + "resolved": "https://registry.npmjs.org/@docusaurus/preset-classic/-/preset-classic-2.4.3.tgz", + "integrity": "sha512-tRyMliepY11Ym6hB1rAFSNGwQDpmszvWYJvlK1E+md4SW8i6ylNHtpZjaYFff9Mdk3i/Pg8ItQq9P0daOJAvQw==", + "requires": { + "@docusaurus/core": "2.4.3", + "@docusaurus/plugin-content-blog": "2.4.3", + "@docusaurus/plugin-content-docs": "2.4.3", + "@docusaurus/plugin-content-pages": "2.4.3", + "@docusaurus/plugin-debug": "2.4.3", + "@docusaurus/plugin-google-analytics": "2.4.3", + "@docusaurus/plugin-google-gtag": "2.4.3", + "@docusaurus/plugin-google-tag-manager": "2.4.3", + "@docusaurus/plugin-sitemap": "2.4.3", + "@docusaurus/theme-classic": "2.4.3", + "@docusaurus/theme-common": "2.4.3", + "@docusaurus/theme-search-algolia": "2.4.3", + "@docusaurus/types": "2.4.3" + } + }, + "@docusaurus/react-loadable": { + "version": "5.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/react-loadable/-/react-loadable-5.5.2.tgz", + "integrity": "sha512-A3dYjdBGuy0IGT+wyLIGIKLRE+sAk1iNk0f1HjNDysO7u8lhL4N3VEm+FAubmJbAztn94F7MxBTPmnixbiyFdQ==", + "requires": { + "@types/react": "*", + "prop-types": "^15.6.2" + } + }, + "@docusaurus/theme-classic": { + "version": "2.4.3", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-classic/-/theme-classic-2.4.3.tgz", + "integrity": "sha512-QKRAJPSGPfDY2yCiPMIVyr+MqwZCIV2lxNzqbyUW0YkrlmdzzP3WuQJPMGLCjWgQp/5c9kpWMvMxjhpZx1R32Q==", + "requires": { + "@docusaurus/core": "2.4.3", + "@docusaurus/mdx-loader": "2.4.3", + "@docusaurus/module-type-aliases": "2.4.3", + "@docusaurus/plugin-content-blog": "2.4.3", + "@docusaurus/plugin-content-docs": "2.4.3", + "@docusaurus/plugin-content-pages": "2.4.3", + "@docusaurus/theme-common": "2.4.3", + "@docusaurus/theme-translations": "2.4.3", + "@docusaurus/types": "2.4.3", + "@docusaurus/utils": "2.4.3", + "@docusaurus/utils-common": "2.4.3", + "@docusaurus/utils-validation": "2.4.3", + "@mdx-js/react": "^1.6.22", + "clsx": "^1.2.1", + "copy-text-to-clipboard": "^3.0.1", + "infima": "0.2.0-alpha.43", + "lodash": "^4.17.21", + "nprogress": "^0.2.0", + "postcss": "^8.4.14", + "prism-react-renderer": "^1.3.5", + "prismjs": "^1.28.0", + "react-router-dom": "^5.3.3", + "rtlcss": "^3.5.0", + "tslib": "^2.4.0", + "utility-types": "^3.10.0" + } + }, + "@docusaurus/theme-common": { + "version": "2.4.3", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-common/-/theme-common-2.4.3.tgz", + "integrity": "sha512-7KaDJBXKBVGXw5WOVt84FtN8czGWhM0lbyWEZXGp8AFfL6sZQfRTluFp4QriR97qwzSyOfQb+nzcDZZU4tezUw==", + "requires": { + "@docusaurus/mdx-loader": "2.4.3", + "@docusaurus/module-type-aliases": "2.4.3", + "@docusaurus/plugin-content-blog": "2.4.3", + "@docusaurus/plugin-content-docs": "2.4.3", + "@docusaurus/plugin-content-pages": "2.4.3", + "@docusaurus/utils": "2.4.3", + "@docusaurus/utils-common": "2.4.3", + "@types/history": "^4.7.11", + "@types/react": "*", + "@types/react-router-config": "*", + "clsx": "^1.2.1", + "parse-numeric-range": "^1.3.0", + "prism-react-renderer": "^1.3.5", + "tslib": "^2.4.0", + "use-sync-external-store": "^1.2.0", + "utility-types": "^3.10.0" + } + }, + "@docusaurus/theme-search-algolia": { + "version": "2.4.3", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-search-algolia/-/theme-search-algolia-2.4.3.tgz", + "integrity": "sha512-jziq4f6YVUB5hZOB85ELATwnxBz/RmSLD3ksGQOLDPKVzat4pmI8tddNWtriPpxR04BNT+ZfpPUMFkNFetSW1Q==", + "requires": { + "@docsearch/react": "^3.1.1", + "@docusaurus/core": "2.4.3", + "@docusaurus/logger": "2.4.3", + "@docusaurus/plugin-content-docs": "2.4.3", + "@docusaurus/theme-common": "2.4.3", + "@docusaurus/theme-translations": "2.4.3", + "@docusaurus/utils": "2.4.3", + "@docusaurus/utils-validation": "2.4.3", + "algoliasearch": "^4.13.1", + "algoliasearch-helper": "^3.10.0", + "clsx": "^1.2.1", + "eta": "^2.0.0", + "fs-extra": "^10.1.0", + "lodash": "^4.17.21", + "tslib": "^2.4.0", + "utility-types": "^3.10.0" } }, "@docusaurus/theme-translations": { - "version": "2.0.0-beta.17", - "resolved": "https://registry.npmjs.org/@docusaurus/theme-translations/-/theme-translations-2.0.0-beta.17.tgz", - "integrity": "sha512-oxCX6khjZH3lgdRCL0DH06KkUM/kDr9+lzB35+vY8rpFeQruVgRdi8ekPqG3+Wr0U/N+LMhcYE5BmCb6D0Fv2A==", + "version": "2.4.3", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-translations/-/theme-translations-2.4.3.tgz", + "integrity": "sha512-H4D+lbZbjbKNS/Zw1Lel64PioUAIT3cLYYJLUf3KkuO/oc9e0QCVhIYVtUI2SfBCF2NNdlyhBDQEEMygsCedIg==", "requires": { - "fs-extra": "^10.0.1", - "tslib": "^2.3.1" + "fs-extra": "^10.1.0", + "tslib": "^2.4.0" } }, "@docusaurus/types": { - "version": "2.0.0-beta.17", - "resolved": "https://registry.npmjs.org/@docusaurus/types/-/types-2.0.0-beta.17.tgz", - "integrity": "sha512-4o7TXu5sKlQpybfFFtsGUElBXwSpiXKsQyyWaRKj7DRBkvMtkDX6ITZNnZO9+EHfLbP/cfrokB8C/oO7mCQ5BQ==", + "version": "2.4.3", + "resolved": "https://registry.npmjs.org/@docusaurus/types/-/types-2.4.3.tgz", + "integrity": "sha512-W6zNLGQqfrp/EoPD0bhb9n7OobP+RHpmvVzpA+Z/IuU3Q63njJM24hmT0GYboovWcDtFmnIJC9wcyx4RVPQscw==", "requires": { + "@types/history": "^4.7.11", + "@types/react": "*", "commander": "^5.1.0", "joi": "^17.6.0", - "querystring": "0.2.1", + "react-helmet-async": "^1.3.0", "utility-types": "^3.10.0", - "webpack": "^5.69.1", + "webpack": "^5.73.0", "webpack-merge": "^5.8.0" } }, "@docusaurus/utils": { - "version": "2.0.0-beta.17", - "resolved": "https://registry.npmjs.org/@docusaurus/utils/-/utils-2.0.0-beta.17.tgz", - "integrity": "sha512-yRKGdzSc5v6M/6GyQ4omkrAHCleevwKYiIrufCJgRbOtkhYE574d8mIjjirOuA/emcyLxjh+TLtqAA5TwhIryA==", + "version": "2.4.3", + "resolved": "https://registry.npmjs.org/@docusaurus/utils/-/utils-2.4.3.tgz", + "integrity": "sha512-fKcXsjrD86Smxv8Pt0TBFqYieZZCPh4cbf9oszUq/AMhZn3ujwpKaVYZACPX8mmjtYx0JOgNx52CREBfiGQB4A==", "requires": { - "@docusaurus/logger": "2.0.0-beta.17", - "@svgr/webpack": "^6.0.0", + "@docusaurus/logger": "2.4.3", + "@svgr/webpack": "^6.2.1", + "escape-string-regexp": "^4.0.0", "file-loader": "^6.2.0", - "fs-extra": "^10.0.1", + "fs-extra": "^10.1.0", "github-slugger": "^1.4.0", - "globby": "^11.0.4", + "globby": "^11.1.0", "gray-matter": "^4.0.3", "js-yaml": "^4.1.0", "lodash": "^4.17.21", - "micromatch": "^4.0.4", + "micromatch": "^4.0.5", "resolve-pathname": "^3.0.0", "shelljs": "^0.8.5", - "tslib": "^2.3.1", + "tslib": "^2.4.0", "url-loader": "^4.1.1", - "webpack": "^5.69.1" + "webpack": "^5.73.0" + }, + "dependencies": { + "escape-string-regexp": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", + "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==" + } } }, "@docusaurus/utils-common": { - "version": "2.0.0-beta.17", - "resolved": "https://registry.npmjs.org/@docusaurus/utils-common/-/utils-common-2.0.0-beta.17.tgz", - "integrity": "sha512-90WCVdj6zYzs7neEIS594qfLO78cUL6EVK1CsRHJgVkkGjcYlCQ1NwkyO7bOb+nIAwdJrPJRc2FBSpuEGxPD3w==", + "version": "2.4.3", + "resolved": "https://registry.npmjs.org/@docusaurus/utils-common/-/utils-common-2.4.3.tgz", + "integrity": "sha512-/jascp4GbLQCPVmcGkPzEQjNaAk3ADVfMtudk49Ggb+131B1WDD6HqlSmDf8MxGdy7Dja2gc+StHf01kiWoTDQ==", "requires": { - "tslib": "^2.3.1" + "tslib": "^2.4.0" } }, "@docusaurus/utils-validation": { - "version": "2.0.0-beta.17", - "resolved": "https://registry.npmjs.org/@docusaurus/utils-validation/-/utils-validation-2.0.0-beta.17.tgz", - "integrity": "sha512-5UjayUP16fDjgd52eSEhL7SlN9x60pIhyS+K7kt7RmpSLy42+4/bSr2pns2VlATmuaoNOO6iIFdB2jgSYJ6SGA==", + "version": "2.4.3", + "resolved": "https://registry.npmjs.org/@docusaurus/utils-validation/-/utils-validation-2.4.3.tgz", + "integrity": "sha512-G2+Vt3WR5E/9drAobP+hhZQMaswRwDlp6qOMi7o7ZypB+VO7N//DZWhZEwhcRGepMDJGQEwtPv7UxtYwPL9PBw==", "requires": { - "@docusaurus/logger": "2.0.0-beta.17", - "@docusaurus/utils": "2.0.0-beta.17", + "@docusaurus/logger": "2.4.3", + "@docusaurus/utils": "2.4.3", "joi": "^17.6.0", - "tslib": "^2.3.1" + "js-yaml": "^4.1.0", + "tslib": "^2.4.0" + } + }, + "@emnapi/core": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@emnapi/core/-/core-1.2.0.tgz", + "integrity": "sha512-E7Vgw78I93we4ZWdYCb4DGAwRROGkMIXk7/y87UmANR+J6qsWusmC3gLt0H+O0KOt5e6O38U8oJamgbudrES/w==", + "optional": true, + "requires": { + "@emnapi/wasi-threads": "1.0.1", + "tslib": "^2.4.0" + } + }, + "@emnapi/runtime": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@emnapi/runtime/-/runtime-1.2.0.tgz", + "integrity": "sha512-bV21/9LQmcQeCPEg3BDFtvwL6cwiTMksYNWQQ4KOxCZikEGalWtenoZ0wCiukJINlGCIi2KXx01g4FoH/LxpzQ==", + "optional": true, + "requires": { + "tslib": "^2.4.0" + } + }, + "@emnapi/wasi-threads": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@emnapi/wasi-threads/-/wasi-threads-1.0.1.tgz", + "integrity": "sha512-iIBu7mwkq4UQGeMEM8bLwNK962nXdhodeScX4slfQnRhEMMzvYivHhutCIk8uojvmASXXPC2WNEjwxFWk72Oqw==", + "optional": true, + "requires": { + "tslib": "^2.4.0" } }, "@hapi/hoek": { @@ -17460,6 +17543,72 @@ "@hapi/hoek": "^9.0.0" } }, + "@jest/schemas": { + "version": "29.6.0", + "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.0.tgz", + "integrity": "sha512-rxLjXyJBTL4LQeJW3aKo0M/+GkCOXsO+8i9Iu7eDb6KwtP65ayoDsitrdPBtujxQ88k4wI2FNYfa6TOGwSn6cQ==", + "requires": { + "@sinclair/typebox": "^0.27.8" + } + }, + "@jest/types": { + "version": "29.6.1", + "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.1.tgz", + "integrity": "sha512-tPKQNMPuXgvdOn2/Lg9HNfUvjYVGolt04Hp03f5hAk878uwOLikN+JzeLY0HcVgKgFl9Hs3EIqpu3WX27XNhnw==", + "requires": { + "@jest/schemas": "^29.6.0", + "@types/istanbul-lib-coverage": "^2.0.0", + "@types/istanbul-reports": "^3.0.0", + "@types/node": "*", + "@types/yargs": "^17.0.8", + "chalk": "^4.0.0" + }, + "dependencies": { + "ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "requires": { + "color-convert": "^2.0.1" + } + }, + "chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "requires": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + } + }, + "color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "requires": { + "color-name": "~1.1.4" + } + }, + "color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" + }, + "has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==" + }, + "supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "requires": { + "has-flag": "^4.0.0" + } + } + } + }, "@jridgewell/gen-mapping": { "version": "0.1.1", "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.1.1.tgz", @@ -17506,12 +17655,12 @@ "integrity": "sha512-XPSJHWmi394fuUuzDnGz1wiKqWfo1yXecHQMRf2l6hztTO+nPru658AyDngaBe7isIxEkRsPR3FZh+s7iVa4Uw==" }, "@jridgewell/trace-mapping": { - "version": "0.3.15", - "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.15.tgz", - "integrity": "sha512-oWZNOULl+UbhsgB51uuZzglikfIKSUBO/M9W2OfEjn7cmqoAiCgmv9lyACTUacZwBz0ITnJ2NqjU8Tx0DHL88g==", + "version": "0.3.20", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.20.tgz", + "integrity": "sha512-R8LcPeWZol2zR8mmH3JeKQ6QRCFb7XgUhV9ZlGhHLGyg4wpPiPZNQOOWhFZhxKw8u//yTbNGI42Bx/3paXEQ+Q==", "requires": { - "@jridgewell/resolve-uri": "^3.0.3", - "@jridgewell/sourcemap-codec": "^1.4.10" + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" } }, "@leichtgewicht/ip-codec": { @@ -17593,14 +17742,27 @@ } }, "semver": { - "version": "5.7.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", - "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==" + "version": "5.7.2", + "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.2.tgz", + "integrity": "sha512-cBznnQ9KjJqU67B52RMC65CMarK2600WFnbkcaiwWq3xy/5haFJlshgnpjovMVJ+Hff49d8GEn0b87C5pDQ10g==" }, "source-map": { "version": "0.5.7", "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz", "integrity": "sha512-LbrmJOMUSdEVxIKvdcJzQC+nQhe8FUZQTXQy6+I75skNgn3OoQ0DZA8YnFa7gp8tqtL3KPf1kmo0R5DoApeSGQ==" + }, + "unified": { + "version": "9.2.0", + "resolved": "https://registry.npmjs.org/unified/-/unified-9.2.0.tgz", + "integrity": "sha512-vx2Z0vY+a3YoTj8+pttM3tiJHCwY5UFbYdiWrwBEbHmK8pvsPj2rtAX2BFfgXen8T39CJWblWRDT4L5WGXtDdg==", + "requires": { + "bail": "^1.0.0", + "extend": "^3.0.0", + "is-buffer": "^2.0.0", + "is-plain-obj": "^2.0.0", + "trough": "^1.0.0", + "vfile": "^4.0.0" + } } } }, @@ -17615,6 +17777,125 @@ "resolved": "https://registry.npmjs.org/@mdx-js/util/-/util-1.6.22.tgz", "integrity": "sha512-H1rQc1ZOHANWBvPcW+JpGwr+juXSxM8Q8YCkm3GhZd8REu1fHR3z99CErO1p9pkcfcxZnMdIZdIsXkOHY0NilA==" }, + "@napi-rs/wasm-runtime": { + "version": "0.2.4", + "resolved": "https://registry.npmjs.org/@napi-rs/wasm-runtime/-/wasm-runtime-0.2.4.tgz", + "integrity": "sha512-9zESzOO5aDByvhIAsOy9TbpZ0Ur2AJbUI7UT73kcUTS2mxAMHOBaa1st/jAymNoCtvrit99kkzT1FZuXVcgfIQ==", + "optional": true, + "requires": { + "@emnapi/core": "^1.1.0", + "@emnapi/runtime": "^1.1.0", + "@tybys/wasm-util": "^0.9.0" + } + }, + "@node-rs/jieba": { + "version": "1.10.3", + "resolved": "https://registry.npmjs.org/@node-rs/jieba/-/jieba-1.10.3.tgz", + "integrity": "sha512-SG0CWHmhIveH6upJURgymDKLertEPYbOc5NSFIpbZWW1W2MpqgumVteQO+5YBlkmpR6jMNDPWNQyQwkB6HoeNg==", + "requires": { + "@node-rs/jieba-android-arm-eabi": "1.10.3", + "@node-rs/jieba-android-arm64": "1.10.3", + "@node-rs/jieba-darwin-arm64": "1.10.3", + "@node-rs/jieba-darwin-x64": "1.10.3", + "@node-rs/jieba-freebsd-x64": "1.10.3", + "@node-rs/jieba-linux-arm-gnueabihf": "1.10.3", + "@node-rs/jieba-linux-arm64-gnu": "1.10.3", + "@node-rs/jieba-linux-arm64-musl": "1.10.3", + "@node-rs/jieba-linux-x64-gnu": "1.10.3", + "@node-rs/jieba-linux-x64-musl": "1.10.3", + "@node-rs/jieba-wasm32-wasi": "1.10.3", + "@node-rs/jieba-win32-arm64-msvc": "1.10.3", + "@node-rs/jieba-win32-ia32-msvc": "1.10.3", + "@node-rs/jieba-win32-x64-msvc": "1.10.3" + } + }, + "@node-rs/jieba-android-arm-eabi": { + "version": "1.10.3", + "resolved": "https://registry.npmjs.org/@node-rs/jieba-android-arm-eabi/-/jieba-android-arm-eabi-1.10.3.tgz", + "integrity": "sha512-fuqVtaYlUKZg3cqagYFxj1DSa7ZHKXLle4iGH2kbQWg7Kw6cf7aCYBHIUZuH5sliK10M/CWccZ+SGRUwcSGfbg==", + "optional": true + }, + "@node-rs/jieba-android-arm64": { + "version": "1.10.3", + "resolved": "https://registry.npmjs.org/@node-rs/jieba-android-arm64/-/jieba-android-arm64-1.10.3.tgz", + "integrity": "sha512-iuZZZq5yD9lT+AgaXpFe19gtAsIecUODRLLaBFbavjgjLk5cumv38ytWjS36s/eqptwI15MQfysSYOlWtMEG5g==", + "optional": true + }, + "@node-rs/jieba-darwin-arm64": { + "version": "1.10.3", + "resolved": "https://registry.npmjs.org/@node-rs/jieba-darwin-arm64/-/jieba-darwin-arm64-1.10.3.tgz", + "integrity": "sha512-dwPhkav1tEARskwPz91UUXL2NXy4h0lJYTuJzpGgwXxm552zBM2JJ41kjah1364j+EOq5At3NQvf5r5rH89phQ==", + "optional": true + }, + "@node-rs/jieba-darwin-x64": { + "version": "1.10.3", + "resolved": "https://registry.npmjs.org/@node-rs/jieba-darwin-x64/-/jieba-darwin-x64-1.10.3.tgz", + "integrity": "sha512-kjxvV6G1baQo/2I3mELv5qGv4Q0rhd5srwXhypSxMWZFtSpNwCDsLcIOR5bvMBci6QVFfZOs6WD6DKiWVz0SlA==", + "optional": true + }, + "@node-rs/jieba-freebsd-x64": { + "version": "1.10.3", + "resolved": "https://registry.npmjs.org/@node-rs/jieba-freebsd-x64/-/jieba-freebsd-x64-1.10.3.tgz", + "integrity": "sha512-QYTsn+zlWRil+MuBeLfTK5Md4GluOf2lHnFqjrOZW2oMgNOvxB3qoLV4TUf70S/E2XHeP6PUdjCKItX8C7GQPg==", + "optional": true + }, + "@node-rs/jieba-linux-arm-gnueabihf": { + "version": "1.10.3", + "resolved": "https://registry.npmjs.org/@node-rs/jieba-linux-arm-gnueabihf/-/jieba-linux-arm-gnueabihf-1.10.3.tgz", + "integrity": "sha512-UFB43kDOvqmbRl99e3GPwaTuwJZaAvgLaMTvBkmxww4MpQH6G1k31RLzMW/S21uSQso2lj6W/Mm59gaJk2FiyA==", + "optional": true + }, + "@node-rs/jieba-linux-arm64-gnu": { + "version": "1.10.3", + "resolved": "https://registry.npmjs.org/@node-rs/jieba-linux-arm64-gnu/-/jieba-linux-arm64-gnu-1.10.3.tgz", + "integrity": "sha512-bu++yWi10wZtnS5uLcwxzxKmHVT77NgQMK8JiQr1TWCl3Y1Th7CnEHQtxfVB489edDK8l644h1/4zSTe5fRnOQ==", + "optional": true + }, + "@node-rs/jieba-linux-arm64-musl": { + "version": "1.10.3", + "resolved": "https://registry.npmjs.org/@node-rs/jieba-linux-arm64-musl/-/jieba-linux-arm64-musl-1.10.3.tgz", + "integrity": "sha512-pJh+SzrK1HaKakhdFM+ew9vXwpZqMxy9u0U7J4GT+3GvOwnAZ+KjeaHebIfgOz7ZHvp/T4YBNf8oWW4zwj3AJw==", + "optional": true + }, + "@node-rs/jieba-linux-x64-gnu": { + "version": "1.10.3", + "resolved": "https://registry.npmjs.org/@node-rs/jieba-linux-x64-gnu/-/jieba-linux-x64-gnu-1.10.3.tgz", + "integrity": "sha512-GF5cfvu/0wXO2fVX/XV3WYH/xEGWzMBvfqLhGiA1OA1xHIufnA1T7uU3ZXkyoNi5Bzf6dmxnwtE4CJL0nvhwjQ==", + "optional": true + }, + "@node-rs/jieba-linux-x64-musl": { + "version": "1.10.3", + "resolved": "https://registry.npmjs.org/@node-rs/jieba-linux-x64-musl/-/jieba-linux-x64-musl-1.10.3.tgz", + "integrity": "sha512-h45HMVU/hgzQ0saXNsK9fKlGdah1i1cXZULpB5vQRlRL2ZIaGp+ULtWTogS7vkoo2K8s2l4tqakWMg9eUjIJ2A==", + "optional": true + }, + "@node-rs/jieba-wasm32-wasi": { + "version": "1.10.3", + "resolved": "https://registry.npmjs.org/@node-rs/jieba-wasm32-wasi/-/jieba-wasm32-wasi-1.10.3.tgz", + "integrity": "sha512-vuoQ62vVoedNGcBmIi4UWdtNBOZG8B+vDYfjx3FD6rNg6g/RgwbVjYXbOVMOQwX06Ob9CfrutICXdUGHgoxzEQ==", + "optional": true, + "requires": { + "@napi-rs/wasm-runtime": "^0.2.3" + } + }, + "@node-rs/jieba-win32-arm64-msvc": { + "version": "1.10.3", + "resolved": "https://registry.npmjs.org/@node-rs/jieba-win32-arm64-msvc/-/jieba-win32-arm64-msvc-1.10.3.tgz", + "integrity": "sha512-B8t4dh56TZnMLBoYWDkopf1ed37Ru/iU1qiIeBkbZWXGmNBChNZUOd//eaPOFjx8m9Sfc8bkj3FBRWt/kTAhmw==", + "optional": true + }, + "@node-rs/jieba-win32-ia32-msvc": { + "version": "1.10.3", + "resolved": "https://registry.npmjs.org/@node-rs/jieba-win32-ia32-msvc/-/jieba-win32-ia32-msvc-1.10.3.tgz", + "integrity": "sha512-SKuPGZJ5T+X4jOn1S8LklOSZ6HC7UBiw0hwi2z9uqX6WgElquLjGi/xfZ2gPqffeR/5K/PUu7aqYUUPL1XonVQ==", + "optional": true + }, + "@node-rs/jieba-win32-x64-msvc": { + "version": "1.10.3", + "resolved": "https://registry.npmjs.org/@node-rs/jieba-win32-x64-msvc/-/jieba-win32-x64-msvc-1.10.3.tgz", + "integrity": "sha512-j9I4+a/tf2hsLu8Sr0NhcLBVNBBQctO2mzcjemMpRa1SlEeODyic9RIyP8Ljz3YTN6MYqKh1KA9iR1xvxjxYFg==", + "optional": true + }, "@nodelib/fs.scandir": { "version": "2.1.5", "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", @@ -17652,15 +17933,20 @@ } }, "@sideway/formula": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/@sideway/formula/-/formula-3.0.0.tgz", - "integrity": "sha512-vHe7wZ4NOXVfkoRb8T5otiENVlT7a3IAiw7H5M2+GO+9CDgcVUUsX1zalAztCmwyOr2RUTGJdgB+ZvSVqmdHmg==" + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@sideway/formula/-/formula-3.0.1.tgz", + "integrity": "sha512-/poHZJJVjx3L+zVD6g9KgHfYnb443oi7wLu/XKojDviHy6HOEOA6z1Trk5aR1dGcmPenJEgb2sK2I80LeS3MIg==" }, "@sideway/pinpoint": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/@sideway/pinpoint/-/pinpoint-2.0.0.tgz", "integrity": "sha512-RNiOoTPkptFtSVzQevY/yWtZwf/RxyVnPy/OcA9HBM3MlGDnBEYL5B41H0MTn0Uec8Hi+2qUtTfG2WWZBmMejQ==" }, + "@sinclair/typebox": { + "version": "0.27.8", + "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz", + "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==" + }, "@sindresorhus/is": { "version": "0.14.0", "resolved": "https://registry.npmjs.org/@sindresorhus/is/-/is-0.14.0.tgz", @@ -17835,6 +18121,15 @@ "dev": true, "optional": true }, + "@tybys/wasm-util": { + "version": "0.9.0", + "resolved": "https://registry.npmjs.org/@tybys/wasm-util/-/wasm-util-0.9.0.tgz", + "integrity": "sha512-6+7nlbMVX/PVDCwaIQ8nTOPveOcFLSt8GcXdx8hD0bt39uWxYT88uXzqTd4fTvqta7oeUJqudepapKNt2DYJFw==", + "optional": true, + "requires": { + "tslib": "^2.4.0" + } + }, "@types/body-parser": { "version": "1.19.2", "resolved": "https://registry.npmjs.org/@types/body-parser/-/body-parser-1.19.2.tgz", @@ -17914,11 +18209,11 @@ } }, "@types/hast": { - "version": "2.3.4", - "resolved": "https://registry.npmjs.org/@types/hast/-/hast-2.3.4.tgz", - "integrity": "sha512-wLEm0QvaoawEDoTRwzTXp4b4jpwiJDvR5KMnFnVodm3scufTlBOWRD6N1OBf9TZMhjlNsSfcO5V+7AF4+Vy+9g==", + "version": "2.3.10", + "resolved": "https://registry.npmjs.org/@types/hast/-/hast-2.3.10.tgz", + "integrity": "sha512-McWspRw8xx8J9HurkVBfYj0xKoE25tOFlHGdx4MJ5xORQrMGZNqJhVQWaIbm6Oyla5kYOXtDiopzKRJzEOkwJw==", "requires": { - "@types/unist": "*" + "@types/unist": "^2" } }, "@types/history": { @@ -17939,17 +18234,38 @@ "@types/node": "*" } }, + "@types/istanbul-lib-coverage": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/@types/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.4.tgz", + "integrity": "sha512-z/QT1XN4K4KYuslS23k62yDIDLwLFkzxOuMplDtObz0+y7VqJCaO2o+SPwHCvLFZh7xazvvoor2tA/hPz9ee7g==" + }, + "@types/istanbul-lib-report": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@types/istanbul-lib-report/-/istanbul-lib-report-3.0.0.tgz", + "integrity": "sha512-plGgXAPfVKFoYfa9NpYDAkseG+g6Jr294RqeqcqDixSbU34MZVJRi/P+7Y8GDpzkEwLaGZZOpKIEmeVZNtKsrg==", + "requires": { + "@types/istanbul-lib-coverage": "*" + } + }, + "@types/istanbul-reports": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@types/istanbul-reports/-/istanbul-reports-3.0.1.tgz", + "integrity": "sha512-c3mAZEuK0lvBp8tmuL74XRKn1+y2dcwOUpH7x4WrF6gk1GIgiluDRgMYQtw2OFcBvAJWlt6ASU3tSqxp0Uu0Aw==", + "requires": { + "@types/istanbul-lib-report": "*" + } + }, "@types/json-schema": { "version": "7.0.11", "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.11.tgz", "integrity": "sha512-wOuvG1SN4Us4rez+tylwwwCV1psiNVOkJeM3AUWUNWg/jDQY2+HE/444y5gc+jBmRqASOm2Oeh5c1axHobwRKQ==" }, "@types/mdast": { - "version": "3.0.10", - "resolved": "https://registry.npmjs.org/@types/mdast/-/mdast-3.0.10.tgz", - "integrity": "sha512-W864tg/Osz1+9f4lrGTZpCSO5/z4608eUp19tbozkq2HJK6i3z1kT0H9tlADXuYIb1YYOBByU4Jsqkk75q48qA==", + "version": "3.0.15", + "resolved": "https://registry.npmjs.org/@types/mdast/-/mdast-3.0.15.tgz", + "integrity": "sha512-LnwD+mUEfxWMa1QpDraczIn6k0Ee3SMicuYSSzS6ZYl2gKS09EClnJYGd8Du6rfc5r/GZEk5o1mRb8TaTj03sQ==", "requires": { - "@types/unist": "*" + "@types/unist": "^2" } }, "@types/mime": { @@ -17998,22 +18314,22 @@ } }, "@types/react-router": { - "version": "5.1.19", - "resolved": "https://registry.npmjs.org/@types/react-router/-/react-router-5.1.19.tgz", - "integrity": "sha512-Fv/5kb2STAEMT3wHzdKQK2z8xKq38EDIGVrutYLmQVVLe+4orDFquU52hQrULnEHinMKv9FSA6lf9+uNT1ITtA==", + "version": "5.1.20", + "resolved": "https://registry.npmjs.org/@types/react-router/-/react-router-5.1.20.tgz", + "integrity": "sha512-jGjmu/ZqS7FjSH6owMcD5qpq19+1RS9DeVRqfl1FeBMxTDQAGwlMWOcs52NDoXaNKyG3d1cYQFMs9rCrb88o9Q==", "requires": { "@types/history": "^4.7.11", "@types/react": "*" } }, "@types/react-router-config": { - "version": "5.0.6", - "resolved": "https://registry.npmjs.org/@types/react-router-config/-/react-router-config-5.0.6.tgz", - "integrity": "sha512-db1mx37a1EJDf1XeX8jJN7R3PZABmJQXR8r28yUjVMFSjkmnQo6X6pOEEmNl+Tp2gYQOGPdYbFIipBtdElZ3Yg==", + "version": "5.0.11", + "resolved": "https://registry.npmjs.org/@types/react-router-config/-/react-router-config-5.0.11.tgz", + "integrity": "sha512-WmSAg7WgqW7m4x8Mt4N6ZyKz0BubSj/2tVUMsAHp+Yd2AMwcSbeFq9WympT19p5heCFmF97R9eD5uUR/t4HEqw==", "requires": { "@types/history": "^4.7.11", "@types/react": "*", - "@types/react-router": "*" + "@types/react-router": "^5.1.0" } }, "@types/react-router-dom": { @@ -18032,9 +18348,9 @@ "integrity": "sha512-wWKOClTTiizcZhXnPY4wikVAwmdYHp8q6DmC+EJUzAMsycb7HB32Kh9RN4+0gExjmPmZSAQjgURXIGATPegAvA==" }, "@types/sax": { - "version": "1.2.4", - "resolved": "https://registry.npmjs.org/@types/sax/-/sax-1.2.4.tgz", - "integrity": "sha512-pSAff4IAxJjfAXUG6tFkO7dsSbTmf8CtUpfhhZ5VhkRpC4628tJhh3+V6H1E+/Gs9piSzYKT5yzHO5M4GG9jkw==", + "version": "1.2.7", + "resolved": "https://registry.npmjs.org/@types/sax/-/sax-1.2.7.tgz", + "integrity": "sha512-rO73L89PJxeYM3s3pPPjiPgVVcymqU490g0YO5n5By0k2Erzj6tay/4lr1CHAAU4JyOWd1rpQ8bCf6cZfHU96A==", "requires": { "@types/node": "*" } @@ -18082,6 +18398,19 @@ "@types/node": "*" } }, + "@types/yargs": { + "version": "17.0.24", + "resolved": "https://registry.npmjs.org/@types/yargs/-/yargs-17.0.24.tgz", + "integrity": "sha512-6i0aC7jV6QzQB8ne1joVZ0eSFIstHsCrobmOtghM11yGlH0j43FKL2UhWdELkyps0zuf7qVTUVCCR+tgSlyLLw==", + "requires": { + "@types/yargs-parser": "*" + } + }, + "@types/yargs-parser": { + "version": "21.0.0", + "resolved": "https://registry.npmjs.org/@types/yargs-parser/-/yargs-parser-21.0.0.tgz", + "integrity": "sha512-iO9ZQHkZxHn4mSakYV0vFHAVDyEOIJQrV2uZ06HxEPcx+mt8swXoZHIbaaJ2crJYFfErySgktuTZ3BeLz+XmFA==" + }, "@webassemblyjs/ast": { "version": "1.11.1", "resolved": "https://registry.npmjs.org/@webassemblyjs/ast/-/ast-1.11.1.tgz", @@ -18460,12 +18789,12 @@ "integrity": "sha512-+q/t7Ekv1EDY2l6Gda6LLiX14rU9TV20Wa3ofeQmwPFZbOMo9DXrLbOjFaaclkXKWidIaopwAObQDqwWtGUjqg==" }, "autoprefixer": { - "version": "10.4.10", - "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-10.4.10.tgz", - "integrity": "sha512-nMaiDARyp1e74c8IeAXkr+BmFKa8By4Zak7tyaNPF09Iu39WFpNXOWrVirmXjKr+5cOyERwvtbMOLYz6iBJYgQ==", + "version": "10.4.14", + "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-10.4.14.tgz", + "integrity": "sha512-FQzyfOsTlwVzjHxKEqRIAdJx9niO6VCBCoEwax/VLSoQF29ggECcPuBqUMZ+u8jCZOPSy8b8/8KnuFbp0SaFZQ==", "requires": { - "browserslist": "^4.21.3", - "caniuse-lite": "^1.0.30001399", + "browserslist": "^4.21.5", + "caniuse-lite": "^1.0.30001464", "fraction.js": "^4.2.0", "normalize-range": "^0.1.2", "picocolors": "^1.0.0", @@ -18508,9 +18837,9 @@ } }, "babel-plugin-dynamic-import-node": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/babel-plugin-dynamic-import-node/-/babel-plugin-dynamic-import-node-2.3.0.tgz", - "integrity": "sha512-o6qFkpeQEBxcqt0XYlWzAVxNCSCZdUgcR8IRlhD/8DylxjjO4foPcvTW0GGKa/cVt3rvxZ7o5ippJ+/0nvLhlQ==", + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/babel-plugin-dynamic-import-node/-/babel-plugin-dynamic-import-node-2.3.3.tgz", + "integrity": "sha512-jZVI+s9Zg3IqA/kdi0i6UDCybUI3aSBLnglhYbSSjKlV7yF1F/5LWv8MakQmvYpnbJDS6fcBL2KzHSxNCMtWSQ==", "requires": { "object.assign": "^4.1.0" } @@ -18541,9 +18870,9 @@ }, "dependencies": { "semver": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==" + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==" } } }, @@ -18612,20 +18941,20 @@ } }, "body-parser": { - "version": "1.20.0", - "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.0.tgz", - "integrity": "sha512-DfJ+q6EPcGKZD1QWUjSpqp+Q7bDQTsQIF4zfUAtZ6qk+H/3/QRhg9CEp39ss+/T2vw0+HaidC0ecJj/DRLIaKg==", + "version": "1.20.2", + "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.2.tgz", + "integrity": "sha512-ml9pReCu3M61kGlqoTm2umSXTlRTuGTx0bfYj+uIUKKYycG5NtSbeetV3faSU6R7ajOPw0g/J1PvK4qNy7s5bA==", "requires": { "bytes": "3.1.2", - "content-type": "~1.0.4", + "content-type": "~1.0.5", "debug": "2.6.9", "depd": "2.0.0", "destroy": "1.2.0", "http-errors": "2.0.0", "iconv-lite": "0.4.24", "on-finished": "2.4.1", - "qs": "6.10.3", - "raw-body": "2.5.1", + "qs": "6.11.0", + "raw-body": "2.5.2", "type-is": "~1.6.18", "unpipe": "1.0.0" }, @@ -18736,22 +19065,22 @@ } }, "braces": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", - "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", + "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", "requires": { - "fill-range": "^7.0.1" + "fill-range": "^7.1.1" } }, "browserslist": { - "version": "4.21.3", - "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.21.3.tgz", - "integrity": "sha512-898rgRXLAyRkM1GryrrBHGkqA5hlpkV5MhtZwg9QXeiyLUYs2k00Un05aX5l2/yJIOObYKOpS2JNo8nJDE7fWQ==", + "version": "4.21.9", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.21.9.tgz", + "integrity": "sha512-M0MFoZzbUrRU4KNfCrDLnvyE7gub+peetoTid3TBIqtunaDJyXlwhakT+/VkvSXcfIzFfK/nkCs4nmyTmxdNSg==", "requires": { - "caniuse-lite": "^1.0.30001370", - "electron-to-chromium": "^1.4.202", - "node-releases": "^2.0.6", - "update-browserslist-db": "^1.0.5" + "caniuse-lite": "^1.0.30001503", + "electron-to-chromium": "^1.4.431", + "node-releases": "^2.0.12", + "update-browserslist-db": "^1.0.11" } }, "buffer": { @@ -18815,12 +19144,15 @@ "dev": true }, "call-bind": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.2.tgz", - "integrity": "sha512-7O+FbCihrB5WGbFYesctwmTKae6rOiIzmz1icreWJ+0aA7LJfuqhEso2T9ncpcFtzMQtzXf2QGGueWJGTYsqrA==", + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.7.tgz", + "integrity": "sha512-GHTSNSYICQ7scH7sZ+M2rFopRoLh8t2bLSW6BbgrtLsahOIB5iyAVJf9GjWK3cYTDaMj4XdBpM1cA6pIS0Kv2w==", "requires": { - "function-bind": "^1.1.1", - "get-intrinsic": "^1.0.2" + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "set-function-length": "^1.2.1" } }, "callsites": { @@ -18859,9 +19191,9 @@ } }, "caniuse-lite": { - "version": "1.0.30001399", - "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001399.tgz", - "integrity": "sha512-4vQ90tMKS+FkvuVWS5/QY1+d805ODxZiKFzsU8o/RsVJz49ZSRR8EjykLJbqhzdPgadbX6wB538wOzle3JniRA==" + "version": "1.0.30001628", + "resolved": "https://registry.npmmirror.com/caniuse-lite/-/caniuse-lite-1.0.30001628.tgz", + "integrity": "sha512-S3BnR4Kh26TBxbi5t5kpbcUlLJb9lhtDXISDPwOfI+JoC+ik0QksvkZtUVyikw3hjnkgkMPSJ8oIM9yMm9vflA==" }, "ccount": { "version": "1.1.0", @@ -18984,9 +19316,9 @@ } }, "cli-spinners": { - "version": "2.7.0", - "resolved": "https://registry.npmjs.org/cli-spinners/-/cli-spinners-2.7.0.tgz", - "integrity": "sha512-qu3pN8Y3qHNgE2AFweciB1IfMnmZ/fsNTEE+NOFjmGB2F/7rLhnhzppvpCnN4FovtP26k8lHyy9ptEbNwWFLzw==", + "version": "2.9.0", + "resolved": "https://registry.npmjs.org/cli-spinners/-/cli-spinners-2.9.0.tgz", + "integrity": "sha512-4/aL9X3Wh0yiMQlE+eeRhWP6vclO3QRtw1JHKIT0FFUs5FjpFmESqtMvYZ0+lbzBw900b95mS0hohy+qn2VK/g==", "dev": true }, "cli-table3": { @@ -19099,9 +19431,9 @@ "integrity": "sha512-P0CysNDQ7rtVw4QIQtm+MRxV66vKFSvlsQvGYXZWR3qFU0jlMKHZZZgw8e+8DSah4UDKMqnknRDQz+xuQXQ/Zg==" }, "commitizen": { - "version": "4.2.5", - "resolved": "https://registry.npmjs.org/commitizen/-/commitizen-4.2.5.tgz", - "integrity": "sha512-9sXju8Qrz1B4Tw7kC5KhnvwYQN88qs2zbiB8oyMsnXZyJ24PPGiNM3nHr73d32dnE3i8VJEXddBFIbOgYSEXtQ==", + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/commitizen/-/commitizen-4.3.0.tgz", + "integrity": "sha512-H0iNtClNEhT0fotHvGV3E9tDejDeS04sN1veIebsKYGMuGscFaswRoYJKmT3eW85eIJAs0F28bG2+a/9wCOfPw==", "dev": true, "requires": { "cachedir": "2.3.0", @@ -19112,10 +19444,10 @@ "find-root": "1.1.0", "fs-extra": "9.1.0", "glob": "7.2.3", - "inquirer": "8.2.4", + "inquirer": "8.2.5", "is-utf8": "^0.2.1", "lodash": "4.17.21", - "minimist": "1.2.6", + "minimist": "1.2.7", "strip-bom": "4.0.0", "strip-json-comments": "3.1.1" }, @@ -19222,9 +19554,9 @@ "integrity": "sha512-kRGRZw3bLlFISDBgwTSA1TMBFN6J6GWDeubmDE3AF+3+yXL8hTWv8r5rkLbqYXY4RjPk/EzHnClI3zQf1cFmHA==" }, "content-type": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.4.tgz", - "integrity": "sha512-hIP3EEPs8tB9AT1L+NUqtwOAps4mk2Zob89MWXMHjHWg9milF/j4osnnQLXBCBFBk/tvIG/tUc9mOUJiPBhPXA==" + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.5.tgz", + "integrity": "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==" }, "conventional-commit-types": { "version": "3.0.0", @@ -19241,9 +19573,9 @@ } }, "cookie": { - "version": "0.5.0", - "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.5.0.tgz", - "integrity": "sha512-YZ3GUyn/o8gfKJlnlX7g7xq4gyO6OSuhGPKaaGssGB2qgDUS0gPgtTvoyZLTt9Ab6dC4hfc9dV5arkvc/OCmrw==" + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.6.0.tgz", + "integrity": "sha512-U71cyTamuh1CRNCfpGY6to28lxvNwPG4Guz/EVjgf3Jmzv0vlDp1atT9eS5dDjMYHucpHbWns6Lwf3BKz6svdw==" }, "cookie-signature": { "version": "1.0.6", @@ -19251,28 +19583,23 @@ "integrity": "sha512-QADzlaHc8icV8I7vbaJXJwod9HWYp8uCqf1xa4OfNu1T7JVxQIrUgOWtHdNDtPiywmFbiS12VjotIXLrKM3orQ==" }, "copy-text-to-clipboard": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/copy-text-to-clipboard/-/copy-text-to-clipboard-3.0.1.tgz", - "integrity": "sha512-rvVsHrpFcL4F2P8ihsoLdFHmd404+CMg71S756oRSeQgqk51U3kicGdnvfkrxva0xXH92SjGS62B0XIJsbh+9Q==" + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/copy-text-to-clipboard/-/copy-text-to-clipboard-3.2.0.tgz", + "integrity": "sha512-RnJFp1XR/LOBDckxTib5Qjr/PMfkatD0MUCQgdpqS8MdKiNUzBjAQBEN6oUy+jW7LI93BBG3DtMB2KOOKpGs2Q==" }, "copy-webpack-plugin": { - "version": "10.2.4", - "resolved": "https://registry.npmjs.org/copy-webpack-plugin/-/copy-webpack-plugin-10.2.4.tgz", - "integrity": "sha512-xFVltahqlsRcyyJqQbDY6EYTtyQZF9rf+JPjwHObLdPFMEISqkFkr7mFoVOC6BfYS/dNThyoQKvziugm+OnwBg==", + "version": "11.0.0", + "resolved": "https://registry.npmjs.org/copy-webpack-plugin/-/copy-webpack-plugin-11.0.0.tgz", + "integrity": "sha512-fX2MWpamkW0hZxMEg0+mYnA40LTosOSa5TqZ9GYIBzyJa9C3QUaMPSE2xAi/buNr8u89SfD9wHSQVBzrRa/SOQ==", "requires": { - "fast-glob": "^3.2.7", + "fast-glob": "^3.2.11", "glob-parent": "^6.0.1", - "globby": "^12.0.2", + "globby": "^13.1.1", "normalize-path": "^3.0.0", "schema-utils": "^4.0.0", "serialize-javascript": "^6.0.0" }, "dependencies": { - "array-union": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/array-union/-/array-union-3.0.1.tgz", - "integrity": "sha512-1OvF9IbWwaeiM9VhzYXVQacMibxpXOMYVNIvMtKRyX9SImBXpKcFr8XvFDeEslCyuH/t6KRt7HEO94AlP8Iatw==" - }, "glob-parent": { "version": "6.0.2", "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", @@ -19282,27 +19609,26 @@ } }, "globby": { - "version": "12.2.0", - "resolved": "https://registry.npmjs.org/globby/-/globby-12.2.0.tgz", - "integrity": "sha512-wiSuFQLZ+urS9x2gGPl1H5drc5twabmm4m2gTR27XDFyjUHJUNsS8o/2aKyIF6IoBaR630atdher0XJ5g6OMmA==", + "version": "13.2.2", + "resolved": "https://registry.npmjs.org/globby/-/globby-13.2.2.tgz", + "integrity": "sha512-Y1zNGV+pzQdh7H39l9zgB4PJqjRNqydvdYCDG4HFXM4XuvSaQQlEc91IU1yALL8gUTDomgBAfz3XJdmUS+oo0w==", "requires": { - "array-union": "^3.0.1", "dir-glob": "^3.0.1", - "fast-glob": "^3.2.7", - "ignore": "^5.1.9", + "fast-glob": "^3.3.0", + "ignore": "^5.2.4", "merge2": "^1.4.1", "slash": "^4.0.0" } }, "schema-utils": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.0.0.tgz", - "integrity": "sha512-1edyXKgh6XnJsJSQ8mKWXnN/BVaIbFMLpouRUrXgVq7WYne5kw3MW7UPhO44uRXQSIpTSXoJbmrR2X0w9kUTyg==", + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.2.0.tgz", + "integrity": "sha512-L0jRsrPpjdckP3oPug3/VxNKt2trR8TcabrM6FOAAlvC/9Phcmm+cuAgTlxBqdBR1WJx7Naj9WHw+aOmheSVbw==", "requires": { "@types/json-schema": "^7.0.9", - "ajv": "^8.8.0", + "ajv": "^8.9.0", "ajv-formats": "^2.1.1", - "ajv-keywords": "^5.0.0" + "ajv-keywords": "^5.1.0" } }, "slash": { @@ -19363,11 +19689,11 @@ "optional": true }, "cross-fetch": { - "version": "3.1.5", - "resolved": "https://registry.npmjs.org/cross-fetch/-/cross-fetch-3.1.5.tgz", - "integrity": "sha512-lvb1SBsI0Z7GDwmuid+mU3kWVBwTVUbe7S0H52yaaAdQOXq2YktTCZdlAcNKFzE6QtRz0snpw9bNiPeOIkkQvw==", + "version": "3.1.8", + "resolved": "https://registry.npmjs.org/cross-fetch/-/cross-fetch-3.1.8.tgz", + "integrity": "sha512-cvA+JwZoU0Xq+h6WkMvAUqPEYy92Obet6UdKLfW60qn99ftItKjB5T+BkyWOFWe2pUyfQ+IJHmpOTznqk1M6Kg==", "requires": { - "node-fetch": "2.6.7" + "node-fetch": "^2.6.12" } }, "cross-spawn": { @@ -19407,27 +19733,51 @@ } }, "css-minimizer-webpack-plugin": { - "version": "3.4.1", - "resolved": "https://registry.npmjs.org/css-minimizer-webpack-plugin/-/css-minimizer-webpack-plugin-3.4.1.tgz", - "integrity": "sha512-1u6D71zeIfgngN2XNRJefc/hY7Ybsxd74Jm4qngIXyUEk7fss3VUzuHxLAq/R8NAba4QU9OUSaMZlbpRc7bM4Q==", + "version": "4.2.2", + "resolved": "https://registry.npmjs.org/css-minimizer-webpack-plugin/-/css-minimizer-webpack-plugin-4.2.2.tgz", + "integrity": "sha512-s3Of/4jKfw1Hj9CxEO1E5oXhQAxlayuHO2y/ML+C6I9sQ7FdzfEV6QgMLN3vI+qFsjJGIAFLKtQK7t8BOXAIyA==", "requires": { - "cssnano": "^5.0.6", - "jest-worker": "^27.0.2", - "postcss": "^8.3.5", + "cssnano": "^5.1.8", + "jest-worker": "^29.1.2", + "postcss": "^8.4.17", "schema-utils": "^4.0.0", "serialize-javascript": "^6.0.0", "source-map": "^0.6.1" }, "dependencies": { + "has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==" + }, + "jest-worker": { + "version": "29.6.1", + "resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-29.6.1.tgz", + "integrity": "sha512-U+Wrbca7S8ZAxAe9L6nb6g8kPdia5hj32Puu5iOqBCMTMWFHXuK6dOV2IFrpedbTV8fjMFLdWNttQTBL6u2MRA==", + "requires": { + "@types/node": "*", + "jest-util": "^29.6.1", + "merge-stream": "^2.0.0", + "supports-color": "^8.0.0" + } + }, "schema-utils": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.0.0.tgz", - "integrity": "sha512-1edyXKgh6XnJsJSQ8mKWXnN/BVaIbFMLpouRUrXgVq7WYne5kw3MW7UPhO44uRXQSIpTSXoJbmrR2X0w9kUTyg==", + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.2.0.tgz", + "integrity": "sha512-L0jRsrPpjdckP3oPug3/VxNKt2trR8TcabrM6FOAAlvC/9Phcmm+cuAgTlxBqdBR1WJx7Naj9WHw+aOmheSVbw==", "requires": { "@types/json-schema": "^7.0.9", - "ajv": "^8.8.0", + "ajv": "^8.9.0", "ajv-formats": "^2.1.1", - "ajv-keywords": "^5.0.0" + "ajv-keywords": "^5.1.0" + } + }, + "supports-color": { + "version": "8.1.1", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", + "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", + "requires": { + "has-flag": "^4.0.0" } } } @@ -19474,12 +19824,12 @@ } }, "cssnano-preset-advanced": { - "version": "5.3.8", - "resolved": "https://registry.npmjs.org/cssnano-preset-advanced/-/cssnano-preset-advanced-5.3.8.tgz", - "integrity": "sha512-xUlLLnEB1LjpEik+zgRNlk8Y/koBPPtONZjp7JKbXigeAmCrFvq9H0pXW5jJV45bQWAlmJ0sKy+IMr0XxLYQZg==", + "version": "5.3.10", + "resolved": "https://registry.npmjs.org/cssnano-preset-advanced/-/cssnano-preset-advanced-5.3.10.tgz", + "integrity": "sha512-fnYJyCS9jgMU+cmHO1rPSPf9axbQyD7iUhLO5Df6O4G+fKIOMps+ZbU0PdGFejFBBZ3Pftf18fn1eG7MAPUSWQ==", "requires": { - "autoprefixer": "^10.3.7", - "cssnano-preset-default": "^5.2.12", + "autoprefixer": "^10.4.12", + "cssnano-preset-default": "^5.2.14", "postcss-discard-unused": "^5.1.0", "postcss-merge-idents": "^5.1.1", "postcss-reduce-idents": "^5.2.0", @@ -19487,24 +19837,24 @@ } }, "cssnano-preset-default": { - "version": "5.2.12", - "resolved": "https://registry.npmjs.org/cssnano-preset-default/-/cssnano-preset-default-5.2.12.tgz", - "integrity": "sha512-OyCBTZi+PXgylz9HAA5kHyoYhfGcYdwFmyaJzWnzxuGRtnMw/kR6ilW9XzlzlRAtB6PLT/r+prYgkef7hngFew==", + "version": "5.2.14", + "resolved": "https://registry.npmjs.org/cssnano-preset-default/-/cssnano-preset-default-5.2.14.tgz", + "integrity": "sha512-t0SFesj/ZV2OTylqQVOrFgEh5uanxbO6ZAdeCrNsUQ6fVuXwYTxJPNAGvGTxHbD68ldIJNec7PyYZDBrfDQ+6A==", "requires": { - "css-declaration-sorter": "^6.3.0", + "css-declaration-sorter": "^6.3.1", "cssnano-utils": "^3.1.0", "postcss-calc": "^8.2.3", - "postcss-colormin": "^5.3.0", - "postcss-convert-values": "^5.1.2", + "postcss-colormin": "^5.3.1", + "postcss-convert-values": "^5.1.3", "postcss-discard-comments": "^5.1.2", "postcss-discard-duplicates": "^5.1.0", "postcss-discard-empty": "^5.1.1", "postcss-discard-overridden": "^5.1.0", - "postcss-merge-longhand": "^5.1.6", - "postcss-merge-rules": "^5.1.2", + "postcss-merge-longhand": "^5.1.7", + "postcss-merge-rules": "^5.1.4", "postcss-minify-font-values": "^5.1.0", "postcss-minify-gradients": "^5.1.1", - "postcss-minify-params": "^5.1.3", + "postcss-minify-params": "^5.1.4", "postcss-minify-selectors": "^5.2.1", "postcss-normalize-charset": "^5.1.0", "postcss-normalize-display-values": "^5.1.0", @@ -19512,11 +19862,11 @@ "postcss-normalize-repeat-style": "^5.1.1", "postcss-normalize-string": "^5.1.0", "postcss-normalize-timing-functions": "^5.1.0", - "postcss-normalize-unicode": "^5.1.0", + "postcss-normalize-unicode": "^5.1.1", "postcss-normalize-url": "^5.1.0", "postcss-normalize-whitespace": "^5.1.1", "postcss-ordered-values": "^5.1.3", - "postcss-reduce-initial": "^5.1.0", + "postcss-reduce-initial": "^5.1.2", "postcss-reduce-transforms": "^5.1.0", "postcss-svgo": "^5.1.0", "postcss-unique-selectors": "^5.1.1" @@ -19597,9 +19947,9 @@ } }, "defaults": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/defaults/-/defaults-1.0.3.tgz", - "integrity": "sha512-s82itHOnYrN0Ib8r+z7laQz3sdE+4FP3d9Q7VLO7U+KRT+CR0GsWuyHxzdAY82I7cXv0G/twrqomTJLOssO5HA==", + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/defaults/-/defaults-1.0.4.tgz", + "integrity": "sha512-eFuaLoy/Rxalv2kr+lqMlUnrDWV+3j4pljOIJgLIhI058IQfWJ7vXhyEIHu+HtC738klGALYxOKDO0bQP3tg8A==", "dev": true, "requires": { "clone": "^1.0.2" @@ -19610,6 +19960,16 @@ "resolved": "https://registry.npmjs.org/defer-to-connect/-/defer-to-connect-1.1.3.tgz", "integrity": "sha512-0ISdNousHvZT2EiFlZeZAHBUvSxmKswVCEf8hW7KWgG4a8MVEu/3Vb6uWYozkjylyCxe0JBIiRB1jV45S70WVQ==" }, + "define-data-property": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.4.tgz", + "integrity": "sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A==", + "requires": { + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "gopd": "^1.0.1" + } + }, "define-lazy-prop": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/define-lazy-prop/-/define-lazy-prop-2.0.0.tgz", @@ -19846,9 +20206,9 @@ "integrity": "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==" }, "electron-to-chromium": { - "version": "1.4.249", - "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.249.tgz", - "integrity": "sha512-GMCxR3p2HQvIw47A599crTKYZprqihoBL4lDSAUmr7IYekXFK5t/WgEBrGJDCa2HWIZFQEkGuMqPCi05ceYqPQ==" + "version": "1.4.459", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.459.tgz", + "integrity": "sha512-XXRS5NFv8nCrBL74Rm3qhJjA2VCsRFx0OjHKBMPI0otij56aun8UWiKTDABmd5/7GTR021pA4wivs+Ri6XCElg==" }, "emoji-regex": { "version": "9.2.2", @@ -19900,6 +20260,19 @@ "is-arrayish": "^0.2.1" } }, + "es-define-property": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.0.tgz", + "integrity": "sha512-jxayLKShrEqqzJ0eumQbVhTYQM27CfT1T35+gCgDFoL82JLsXqTJ76zv6A0YLOgEnLUMvLzsDsGIrl8NFpT2gQ==", + "requires": { + "get-intrinsic": "^1.2.4" + } + }, + "es-errors": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", + "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==" + }, "es-module-lexer": { "version": "0.9.3", "resolved": "https://registry.npmjs.org/es-module-lexer/-/es-module-lexer-0.9.3.tgz", @@ -19965,9 +20338,9 @@ "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==" }, "eta": { - "version": "1.12.3", - "resolved": "https://registry.npmjs.org/eta/-/eta-1.12.3.tgz", - "integrity": "sha512-qHixwbDLtekO/d51Yr4glcaUJCIjGVJyTzuqV4GPlgZo1YpgOKG+avQynErZIYrfM6JIJdtiG2Kox8tbb+DoGg==" + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/eta/-/eta-2.2.0.tgz", + "integrity": "sha512-UVQ72Rqjy/ZKQalzV5dCCJP80GrmPrMxh6NlNf+erV6ObL0ZFkhCstWRawS85z3smdr3d2wXPsZEY7rDPfGd2g==" }, "etag": { "version": "1.8.1", @@ -20026,16 +20399,16 @@ } }, "express": { - "version": "4.18.1", - "resolved": "https://registry.npmjs.org/express/-/express-4.18.1.tgz", - "integrity": "sha512-zZBcOX9TfehHQhtupq57OF8lFZ3UZi08Y97dwFCkD8p9d/d2Y3M+ykKcwaMDEL+4qyUolgBDX6AblpR3fL212Q==", + "version": "4.19.2", + "resolved": "https://registry.npmjs.org/express/-/express-4.19.2.tgz", + "integrity": "sha512-5T6nhjsT+EOMzuck8JjBHARTHfMht0POzlA60WV2pMD3gyXw2LZnZ+ueGdNxG+0calOJcWKbpFcuzLZ91YWq9Q==", "requires": { "accepts": "~1.3.8", "array-flatten": "1.1.1", - "body-parser": "1.20.0", + "body-parser": "1.20.2", "content-disposition": "0.5.4", "content-type": "~1.0.4", - "cookie": "0.5.0", + "cookie": "0.6.0", "cookie-signature": "1.0.6", "debug": "2.6.9", "depd": "2.0.0", @@ -20051,7 +20424,7 @@ "parseurl": "~1.3.3", "path-to-regexp": "0.1.7", "proxy-addr": "~2.0.7", - "qs": "6.10.3", + "qs": "6.11.0", "range-parser": "~1.2.1", "safe-buffer": "5.2.1", "send": "0.18.0", @@ -20136,9 +20509,9 @@ "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==" }, "fast-glob": { - "version": "3.2.12", - "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.2.12.tgz", - "integrity": "sha512-DVj4CQIYYow0BlaelwK1pHl5n5cRSJfM60UA0zK891sVInoPri2Ekj7+e1CT3/3qxXenpI+nBBmQAcJPJgaj4w==", + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.0.tgz", + "integrity": "sha512-ChDuvbOypPuNjO8yIDf36x7BlZX1smcUMTTcyoIjycexOxd6DFsKsg21qVBzEmr3G7fUKIRy2/psii+CIUt7FA==", "requires": { "@nodelib/fs.stat": "^2.0.2", "@nodelib/fs.walk": "^1.2.3", @@ -20185,9 +20558,9 @@ } }, "fbjs": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/fbjs/-/fbjs-3.0.4.tgz", - "integrity": "sha512-ucV0tDODnGV3JCnnkmoszb5lf4bNpzjv80K41wd4k798Etq+UYD0y0TIfalLjZoKgjive6/adkRnszwapiDgBQ==", + "version": "3.0.5", + "resolved": "https://registry.npmjs.org/fbjs/-/fbjs-3.0.5.tgz", + "integrity": "sha512-ztsSx77JBtkuMrEypfhgc3cI0+0h+svqeie7xHbh1k/IKdcydnvadp/mUaGgjAOXQmQSxsqgaRhS3q9fy+1kxg==", "requires": { "cross-fetch": "^3.1.5", "fbjs-css-vars": "^1.0.0", @@ -20195,7 +20568,7 @@ "object-assign": "^4.1.0", "promise": "^7.1.1", "setimmediate": "^1.0.5", - "ua-parser-js": "^0.7.30" + "ua-parser-js": "^1.0.35" } }, "fbjs-css-vars": { @@ -20269,9 +20642,9 @@ "integrity": "sha512-pjmC+bkIF8XI7fWaH8KxHcZL3DPybs1roSKP4rKDvy20tAWwIObE4+JIseG2byfGKhud5ZnM4YSGKBz7Sh0ndQ==" }, "fill-range": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", - "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", + "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", "requires": { "to-regex-range": "^5.0.1" } @@ -20352,19 +20725,24 @@ "resolve-dir": "^1.0.1" } }, + "flat-color-icons": { + "version": "1.1.0", + "resolved": "https://registry.npmmirror.com/flat-color-icons/-/flat-color-icons-1.1.0.tgz", + "integrity": "sha512-duN0PycToLxktav7Asyw0hYCSxH8dvoCbeZHw0zSTw+KXg4rAH4CcLViDrGjg//nLfoZjiDV34kw8xmJJfSfqA==" + }, "flux": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/flux/-/flux-4.0.3.tgz", - "integrity": "sha512-yKAbrp7JhZhj6uiT1FTuVMlIAT1J4jqEyBpFApi1kxpGZCvacMVc/t1pMQyotqHhAgvoE3bNvAykhCo2CLjnYw==", + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/flux/-/flux-4.0.4.tgz", + "integrity": "sha512-NCj3XlayA2UsapRpM7va6wU1+9rE5FIL7qoMcmxWHRzbp0yujihMBm9BBHZ1MDIk5h5o2Bl6eGiCe8rYELAmYw==", "requires": { "fbemitter": "^3.0.0", "fbjs": "^3.0.1" } }, "follow-redirects": { - "version": "1.15.2", - "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.2.tgz", - "integrity": "sha512-VQLG33o04KaQ8uYi2tVNbdrWp1QWxNNea+nmIB4EVM28v0hmP17z7aG1+wAkNzVq4KeXTq3221ye5qTJP91JwA==" + "version": "1.15.6", + "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.6.tgz", + "integrity": "sha512-wWN62YITEaOpSK584EZXJafH1AGpO8RVgElfkuXbTOrPX4fIfOyEpW/CsiNd8JdYrAoOvafRTOEnvsO++qCqFA==" }, "fork-ts-checker-webpack-plugin": { "version": "6.5.2", @@ -20541,9 +20919,9 @@ "optional": true }, "function-bind": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz", - "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==" + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==" }, "gauge": { "version": "3.0.2", @@ -20584,13 +20962,15 @@ "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==" }, "get-intrinsic": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.1.3.tgz", - "integrity": "sha512-QJVz1Tj7MS099PevUG5jvnt9tSkXN8K14dxQlikJuPt4uD9hHAHjLyLBiLR5zELelBdD9QNRAXZzsJx0WaDL9A==", + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.4.tgz", + "integrity": "sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ==", "requires": { - "function-bind": "^1.1.1", - "has": "^1.0.3", - "has-symbols": "^1.0.3" + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "has-proto": "^1.0.1", + "has-symbols": "^1.0.3", + "hasown": "^2.0.0" } }, "get-own-enumerable-property-symbols": { @@ -20606,10 +20986,15 @@ "pump": "^3.0.0" } }, + "github-buttons": { + "version": "2.28.0", + "resolved": "https://registry.npmmirror.com/github-buttons/-/github-buttons-2.28.0.tgz", + "integrity": "sha512-KsCbYiA+MiHO3ytzdGvGt/GNde4GfG9BrrLxxc+ut2snBF9IAjrn2F5mNgHHEXdG/CfFIHOMV8Uxy4LNhxZwUA==" + }, "github-slugger": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/github-slugger/-/github-slugger-1.4.0.tgz", - "integrity": "sha512-w0dzqw/nt51xMVmlaV1+JRzN+oCa1KfcgGEWhxUG16wbdA+Xnt/yoFO8Z8x/V82ZcZ0wy6ln9QDup5avbhiDhQ==" + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/github-slugger/-/github-slugger-1.5.0.tgz", + "integrity": "sha512-wIh+gKBI9Nshz2o46B0B3f5k/W+WI9ZAv6y5Dn5WJ5SK1t0TnDimB4WE5rmTD05ZAIn8HALCZVmCsvj0w0v0lw==" }, "glob": { "version": "7.2.3", @@ -20693,6 +21078,14 @@ "slash": "^3.0.0" } }, + "gopd": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.0.1.tgz", + "integrity": "sha512-d65bNlIadxvpb/A2abVdlqKqV563juRnZ1Wtk6s1sIR8uNsXR70xqIzVqxVf1eTqDunwT2MkczEeaezCKTZhwA==", + "requires": { + "get-intrinsic": "^1.1.3" + } + }, "got": { "version": "9.6.0", "resolved": "https://registry.npmjs.org/got/-/got-9.6.0.tgz", @@ -20773,13 +21166,18 @@ "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==" }, "has-property-descriptors": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.0.tgz", - "integrity": "sha512-62DVLZGoiEBDHQyqG4w9xCuZ7eJEwNmJRWw2VY84Oedb7WFcA27fiEVe8oUQx9hAUJ4ekurquucTGwsyO1XGdQ==", + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.2.tgz", + "integrity": "sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg==", "requires": { - "get-intrinsic": "^1.1.1" + "es-define-property": "^1.0.0" } }, + "has-proto": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/has-proto/-/has-proto-1.0.3.tgz", + "integrity": "sha512-SJ1amZAJUiZS+PhsVLf5tGydlaVB8EdFpaSO4gmiUKUOxk8qzn5AIy4ZeJUmh22znIdk/uMAUT2pl3FxzVUH+Q==" + }, "has-symbols": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz", @@ -20795,6 +21193,14 @@ "resolved": "https://registry.npmjs.org/has-yarn/-/has-yarn-2.1.0.tgz", "integrity": "sha512-UqBRqi4ju7T+TqGNdqAO0PaSVGsDGJUBQvk9eUWNGRY1CFGDzYhLWoM7JQEemnlvVcv/YEmc2wNW8BC24EnUsw==" }, + "hasown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", + "requires": { + "function-bind": "^1.1.2" + } + }, "hast-to-hyperscript": { "version": "9.0.1", "resolved": "https://registry.npmjs.org/hast-to-hyperscript/-/hast-to-hyperscript-9.0.1.tgz", @@ -20955,6 +21361,11 @@ } } }, + "htm": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/htm/-/htm-3.1.1.tgz", + "integrity": "sha512-983Vyg8NwUE7JkZ6NmOqpCZ+sh1bKv2iYTlUkzlWmA5JD2acKoxd4KVxbMmxX/85mtfdnDmTFoNKcg5DGAvxNQ==" + }, "html-entities": { "version": "2.3.3", "resolved": "https://registry.npmjs.org/html-entities/-/html-entities-2.3.3.tgz", @@ -21015,9 +21426,9 @@ } }, "http-cache-semantics": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/http-cache-semantics/-/http-cache-semantics-4.1.0.tgz", - "integrity": "sha512-carPklcUh7ROWRK7Cv27RPtdhYhUsela/ue5/jKzjegVvXDqM2ILE9Q2BGn9JZJh1g87cp56su/FgQSzcWS8cQ==" + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/http-cache-semantics/-/http-cache-semantics-4.1.1.tgz", + "integrity": "sha512-er295DKPVsV82j5kw1Gjt+ADA/XYHsajl82cGNQG2eyoPkvgUhX+nDIyelzhIWbbsXP39EHcI6l5tYs2FYqYXQ==" }, "http-deceiver": { "version": "1.2.7", @@ -21105,14 +21516,14 @@ "dev": true }, "ignore": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.2.0.tgz", - "integrity": "sha512-CmxgYGiEPCLhfLnpPp1MoRmifwEIOgjcHXxOBjv7mY96c+eWScsOP9c112ZyLdWHi0FxHjI+4uVhKYp/gcdRmQ==" + "version": "5.2.4", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.2.4.tgz", + "integrity": "sha512-MAb38BcSbH0eHNBxn7ql2NH/kX33OkB3lZ1BNdh7ENeRChHTYsTvWrMubiIAMNS2llXEEgZ1MUOBtXChP3kaFQ==" }, "image-size": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/image-size/-/image-size-1.0.2.tgz", - "integrity": "sha512-xfOoWjceHntRb3qFCrh5ZFORYH8XCdYpASltMhZ/Q0KZiOwjdE/Yl2QCiWdwD+lygV5bMCvauzgu5PxBX/Yerg==", + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/image-size/-/image-size-1.1.1.tgz", + "integrity": "sha512-541xKlUw6jr/6gGuk92F+mYM5zaFAc5ahphvkqvNe2bQ6gVBkd6bfrmVJ2t4KDAfikAYZyIqTnktX3i6/aQDrQ==", "requires": { "queue": "6.0.2" } @@ -21154,9 +21565,9 @@ "integrity": "sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==" }, "infima": { - "version": "0.2.0-alpha.37", - "resolved": "https://registry.npmjs.org/infima/-/infima-0.2.0-alpha.37.tgz", - "integrity": "sha512-4GX7Baw+/lwS4PPW/UJNY89tWSvYG1DL6baKVdpK6mC593iRgMssxNtORMTFArLPJ/A/lzsGhRmx+z6MaMxj0Q==" + "version": "0.2.0-alpha.43", + "resolved": "https://registry.npmjs.org/infima/-/infima-0.2.0-alpha.43.tgz", + "integrity": "sha512-2uw57LvUqW0rK/SWYnd/2rRfxNA5DDNOh33jxF7fy46VWoNhGxiUQyVZHbBMjQ33mQem0cjdDVwgWVAmlRfgyQ==" }, "inflight": { "version": "1.0.6", @@ -21183,9 +21594,9 @@ "integrity": "sha512-7NXolsK4CAS5+xvdj5OMMbI962hU/wvwoxk+LWR9Ek9bVtyuuYScDN6eS0rUm6TxApFpw7CX1o4uJzcd4AyD3Q==" }, "inquirer": { - "version": "8.2.4", - "resolved": "https://registry.npmjs.org/inquirer/-/inquirer-8.2.4.tgz", - "integrity": "sha512-nn4F01dxU8VeKfq192IjLsxu0/OmMZ4Lg3xKAns148rCaXP6ntAoEkVYZThWjwON8AlzdZZi6oqnhNbxUG9hVg==", + "version": "8.2.5", + "resolved": "https://registry.npmjs.org/inquirer/-/inquirer-8.2.5.tgz", + "integrity": "sha512-QAgPDQMEgrDssk1XiwwHoOGYF9BAbUcc1+j+FhEvaOt8/cKRqyLn0U5qA6F74fGhTMGxf92pOvPBeh29jQJDTQ==", "dev": true, "requires": { "ansi-escapes": "^4.2.1", @@ -21532,6 +21943,69 @@ "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", "integrity": "sha512-WhB9zCku7EGTj/HQQRz5aUQEUeoQZH2bWcltRErOpymJ4boYE6wL9Tbr23krRPSZ+C5zqNSrSw+Cc7sZZ4b7vg==" }, + "jest-util": { + "version": "29.6.1", + "resolved": "https://registry.npmjs.org/jest-util/-/jest-util-29.6.1.tgz", + "integrity": "sha512-NRFCcjc+/uO3ijUVyNOQJluf8PtGCe/W6cix36+M3cTFgiYqFOOW5MgN4JOOcvbUhcKTYVd1CvHz/LWi8d16Mg==", + "requires": { + "@jest/types": "^29.6.1", + "@types/node": "*", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "graceful-fs": "^4.2.9", + "picomatch": "^2.2.3" + }, + "dependencies": { + "ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "requires": { + "color-convert": "^2.0.1" + } + }, + "chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "requires": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + } + }, + "ci-info": { + "version": "3.8.0", + "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-3.8.0.tgz", + "integrity": "sha512-eXTggHWSooYhq49F2opQhuHWgzucfF2YgODK4e1566GQs5BIfP30B0oenwBJHfWxAs2fyPB1s7Mg949zLf61Yw==" + }, + "color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "requires": { + "color-name": "~1.1.4" + } + }, + "color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" + }, + "has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==" + }, + "supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "requires": { + "has-flag": "^4.0.0" + } + } + } + }, "jest-worker": { "version": "27.5.1", "resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-27.5.1.tgz", @@ -21557,6 +22031,11 @@ } } }, + "jiti": { + "version": "1.19.1", + "resolved": "https://registry.npmjs.org/jiti/-/jiti-1.19.1.tgz", + "integrity": "sha512-oVhqoRDaBXf7sjkll95LHVS6Myyyb1zaunVwk4Z0+WPSW4gjS0pl01zYKHScTuyEhQsFxV5L4DR5r+YqSyqyyg==" + }, "joi": { "version": "17.6.0", "resolved": "https://registry.npmjs.org/joi/-/joi-17.6.0.tgz", @@ -21603,9 +22082,9 @@ "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==" }, "json5": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.1.tgz", - "integrity": "sha512-1hqLFMSrGHRHxav9q9gNjJ5EXznIxGVO09xQRrwplcS8qs28pZ8s8hupZAmqDwZUmVZ2Qb2jnyPOWcDH8m8dlA==" + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz", + "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==" }, "jsonfile": { "version": "6.1.0", @@ -21634,11 +22113,6 @@ "resolved": "https://registry.npmjs.org/kleur/-/kleur-3.0.3.tgz", "integrity": "sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w==" }, - "klona": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/klona/-/klona-2.0.5.tgz", - "integrity": "sha512-pJiBpiXMbt7dkzXe8Ghj/u4FfXOOa98fPW+bihOJ4SjnoijweJrNThJfd3ifXpXhREjpoF2mZVH1GfS9LV3kHQ==" - }, "latest-version": { "version": "5.1.0", "resolved": "https://registry.npmjs.org/latest-version/-/latest-version-5.1.0.tgz", @@ -21662,15 +22136,20 @@ "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz", "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==" }, + "load-script": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/load-script/-/load-script-1.0.0.tgz", + "integrity": "sha512-kPEjMFtZvwL9TaZo0uZ2ml+Ye9HUMmPwbYRJ324qF9tqMejwykJ5ggTyvzmrbBeapCAbk98BSbTeovHEEP1uCA==" + }, "loader-runner": { "version": "4.3.0", "resolved": "https://registry.npmjs.org/loader-runner/-/loader-runner-4.3.0.tgz", "integrity": "sha512-3R/1M+yS3j5ou80Me59j7F9IMs4PXs3VqRrm0TU3AbKPxlmpoY1TNscJV/oGJXo8qCatFGTfDbY6W6ipGOYXfg==" }, "loader-utils": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-2.0.2.tgz", - "integrity": "sha512-TM57VeHptv569d/GKh6TAYdzKblwDNiumOdkFnejjD0XwTH87K90w3O7AiJRqdQoXygvi1VQTJTLGhJl7WqA7A==", + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-2.0.4.tgz", + "integrity": "sha512-xXqpXoINfFhgua9xiqD8fPFHgkoq1mmmpE92WlDbm9rNRd/EbRb+Gqf908T2DMfuHjjJlksiK2RbHVOdD/MqSw==", "requires": { "big.js": "^5.2.2", "emojis-list": "^3.0.0", @@ -21817,6 +22296,11 @@ "yallist": "^4.0.0" } }, + "lunr-languages": { + "version": "1.14.0", + "resolved": "https://registry.npmjs.org/lunr-languages/-/lunr-languages-1.14.0.tgz", + "integrity": "sha512-hWUAb2KqM3L7J5bcrngszzISY4BxrXn/Xhbb9TTCJYEGqlR1nG67/M14sp09+PTIRklobrn57IAxcdcO/ZFyNA==" + }, "make-dir": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-3.1.0.tgz", @@ -21826,9 +22310,9 @@ }, "dependencies": { "semver": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==" + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==" } } }, @@ -21839,6 +22323,11 @@ "dev": true, "optional": true }, + "mark.js": { + "version": "8.11.1", + "resolved": "https://registry.npmjs.org/mark.js/-/mark.js-8.11.1.tgz", + "integrity": "sha512-1I+1qpDt4idfgLQG+BNWmrqku+7/2bi5nLf4YwF8y8zXvmfiTBY3PV3ZibfrjBueCByROpuBjLLFCajqkgYoLQ==" + }, "markdown-escapes": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/markdown-escapes/-/markdown-escapes-1.0.4.tgz", @@ -21903,6 +22392,11 @@ "fs-monkey": "^1.0.3" } }, + "memoize-one": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/memoize-one/-/memoize-one-5.2.1.tgz", + "integrity": "sha512-zYiwtZUcYyXKo/np96AGZAckk+FWWsUdJ3cHGGmld7+AhvcWmQyGCYUh1hc4Q/pkOhb65dQR/pqCyK0cOaHz4Q==" + }, "merge": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/merge/-/merge-2.1.1.tgz", @@ -22010,9 +22504,9 @@ } }, "minimist": { - "version": "1.2.6", - "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.6.tgz", - "integrity": "sha512-Jsjnk4bw3YJqYzbdyBiNsPWHPfO++UGG749Cxs6peCu5Xg4nrena6OVxOYxrQTqww0Jmwt+Ref8rggumkTLz9Q==" + "version": "1.2.7", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.7.tgz", + "integrity": "sha512-bzfL1YUZsP41gmu/qjrEk0Q6i2ix/cVeAhbCbqH9u3zYutS1cLg00qhrD0M2MVdCcx4Sc0UpP2eBWo9rotpq6g==" }, "minipass": { "version": "3.3.4", @@ -22057,9 +22551,9 @@ "dev": true }, "nanoid": { - "version": "3.3.4", - "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.4.tgz", - "integrity": "sha512-MqBkQh/OHTS2egovRtLk45wEyNXwF+cokD+1YPf9u5VfJiRdAiRwB2froX5Co9Rh20xs4siNPm8naNotSD6RBw==" + "version": "3.3.6", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.6.tgz", + "integrity": "sha512-BGcqMMJuToF7i1rt+2PWSNVnWIkGCU78jBG3RxO/bZlnZPK2Cmi2QaffxGO/2RvWi9sL+FAiRiXMgsyxQ1DIDA==" }, "negotiator": { "version": "0.6.3", @@ -22094,9 +22588,9 @@ } }, "node-fetch": { - "version": "2.6.7", - "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.6.7.tgz", - "integrity": "sha512-ZjMPFEfVx5j+y2yF35Kzx5sF7kDzxuDj6ziH4FFbOp87zKDZNx8yExJIb05OGF4Nlt9IHFIMBkRl41VdvcNdbQ==", + "version": "2.6.12", + "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.6.12.tgz", + "integrity": "sha512-C/fGU2E8ToujUivIO0H+tpQ6HWo4eEmchoPIoXtxCrVghxdKq+QOHqEZW7tuP3KlV3bC8FRMO5nMCC7Zm1VP6g==", "requires": { "whatwg-url": "^5.0.0" } @@ -22107,9 +22601,9 @@ "integrity": "sha512-dPEtOeMvF9VMcYV/1Wb8CPoVAXtp6MKMlcbAt4ddqmGqUJ6fQZFXkNZNkNlfevtNkGtaSoXf/vNNNSvgrdXwtA==" }, "node-releases": { - "version": "2.0.6", - "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.6.tgz", - "integrity": "sha512-PiVXnNuFm5+iYkLBNeq5211hvO38y63T0i2KKh2KnUs3RpzJ+JtODFjkD8yjLwnDkTYF1eKXheUwdssR+NRZdg==" + "version": "2.0.13", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.13.tgz", + "integrity": "sha512-uYr7J37ae/ORWdZeQ1xxMJe3NtdmqMC/JZK+geofDrkLUApKRHPd18/TxtBOJ4A0/+uUIliorNrfYV6s1b02eQ==" }, "nodejieba": { "version": "2.6.0", @@ -22181,9 +22675,9 @@ "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==" }, "object-inspect": { - "version": "1.12.2", - "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.12.2.tgz", - "integrity": "sha512-z+cPxW0QGUp0mcqcsgQyLVRDoXFQbXOwBaqyF7VIgI4TWNQsDHrBpUQslRmIfAoYWdYzs6UlKJtB2XJpTaNSpQ==" + "version": "1.13.1", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.1.tgz", + "integrity": "sha512-5qoj1RUiKOMsCCNLV1CBiPYE10sziTsnmNxkAI/rZhiD63CF7IqdFGC/XzjWjpSgLf0LxXX3bDFIh0E18f6UhQ==" }, "object-keys": { "version": "1.1.1", @@ -22379,9 +22873,9 @@ }, "dependencies": { "semver": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==" + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==" } } }, @@ -22565,11 +23059,11 @@ } }, "postcss": { - "version": "8.4.16", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.16.tgz", - "integrity": "sha512-ipHE1XBvKzm5xI7hiHCZJCSugxvsdq2mPnsq5+UF+VHCjiBvtDrlxJfMBToWaP9D5XlgNmcFGqoHmUn0EYEaRQ==", + "version": "8.4.31", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.31.tgz", + "integrity": "sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ==", "requires": { - "nanoid": "^3.3.4", + "nanoid": "^3.3.6", "picocolors": "^1.0.0", "source-map-js": "^1.0.2" } @@ -22584,22 +23078,22 @@ } }, "postcss-colormin": { - "version": "5.3.0", - "resolved": "https://registry.npmjs.org/postcss-colormin/-/postcss-colormin-5.3.0.tgz", - "integrity": "sha512-WdDO4gOFG2Z8n4P8TWBpshnL3JpmNmJwdnfP2gbk2qBA8PWwOYcmjmI/t3CmMeL72a7Hkd+x/Mg9O2/0rD54Pg==", + "version": "5.3.1", + "resolved": "https://registry.npmjs.org/postcss-colormin/-/postcss-colormin-5.3.1.tgz", + "integrity": "sha512-UsWQG0AqTFQmpBegeLLc1+c3jIqBNB0zlDGRWR+dQ3pRKJL1oeMzyqmH3o2PIfn9MBdNrVPWhDbT769LxCTLJQ==", "requires": { - "browserslist": "^4.16.6", + "browserslist": "^4.21.4", "caniuse-api": "^3.0.0", "colord": "^2.9.1", "postcss-value-parser": "^4.2.0" } }, "postcss-convert-values": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/postcss-convert-values/-/postcss-convert-values-5.1.2.tgz", - "integrity": "sha512-c6Hzc4GAv95B7suy4udszX9Zy4ETyMCgFPUDtWjdFTKH1SE9eFY/jEpHSwTH1QPuwxHpWslhckUQWbNRM4ho5g==", + "version": "5.1.3", + "resolved": "https://registry.npmjs.org/postcss-convert-values/-/postcss-convert-values-5.1.3.tgz", + "integrity": "sha512-82pC1xkJZtcJEfiLw6UXnXVXScgtBrjlO5CBmuDQc+dlb88ZYheFsjTn40+zBVi3DkfF7iezO0nJUPLcJK3pvA==", "requires": { - "browserslist": "^4.20.3", + "browserslist": "^4.21.4", "postcss-value-parser": "^4.2.0" } }, @@ -22636,13 +23130,26 @@ } }, "postcss-loader": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/postcss-loader/-/postcss-loader-6.2.1.tgz", - "integrity": "sha512-WbbYpmAaKcux/P66bZ40bpWsBucjx/TTgVVzRZ9yUO8yQfVBlameJ0ZGVaPfH64hNSBh63a+ICP5nqOpBA0w+Q==", + "version": "7.3.3", + "resolved": "https://registry.npmjs.org/postcss-loader/-/postcss-loader-7.3.3.tgz", + "integrity": "sha512-YgO/yhtevGO/vJePCQmTxiaEwER94LABZN0ZMT4A0vsak9TpO+RvKRs7EmJ8peIlB9xfXCsS7M8LjqncsUZ5HA==", "requires": { - "cosmiconfig": "^7.0.0", - "klona": "^2.0.5", - "semver": "^7.3.5" + "cosmiconfig": "^8.2.0", + "jiti": "^1.18.2", + "semver": "^7.3.8" + }, + "dependencies": { + "cosmiconfig": { + "version": "8.2.0", + "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-8.2.0.tgz", + "integrity": "sha512-3rTMnFJA1tCOPwRxtgF4wd7Ab2qvDbL8jX+3smjIbS4HlZBagTlpERbdN7iAbWlrfxE3M8c27kTwTawQ7st+OQ==", + "requires": { + "import-fresh": "^3.2.1", + "js-yaml": "^4.1.0", + "parse-json": "^5.0.0", + "path-type": "^4.0.0" + } + } } }, "postcss-merge-idents": { @@ -22655,20 +23162,20 @@ } }, "postcss-merge-longhand": { - "version": "5.1.6", - "resolved": "https://registry.npmjs.org/postcss-merge-longhand/-/postcss-merge-longhand-5.1.6.tgz", - "integrity": "sha512-6C/UGF/3T5OE2CEbOuX7iNO63dnvqhGZeUnKkDeifebY0XqkkvrctYSZurpNE902LDf2yKwwPFgotnfSoPhQiw==", + "version": "5.1.7", + "resolved": "https://registry.npmjs.org/postcss-merge-longhand/-/postcss-merge-longhand-5.1.7.tgz", + "integrity": "sha512-YCI9gZB+PLNskrK0BB3/2OzPnGhPkBEwmwhfYk1ilBHYVAZB7/tkTHFBAnCrvBBOmeYyMYw3DMjT55SyxMBzjQ==", "requires": { "postcss-value-parser": "^4.2.0", - "stylehacks": "^5.1.0" + "stylehacks": "^5.1.1" } }, "postcss-merge-rules": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/postcss-merge-rules/-/postcss-merge-rules-5.1.2.tgz", - "integrity": "sha512-zKMUlnw+zYCWoPN6yhPjtcEdlJaMUZ0WyVcxTAmw3lkkN/NDMRkOkiuctQEoWAOvH7twaxUUdvBWl0d4+hifRQ==", + "version": "5.1.4", + "resolved": "https://registry.npmjs.org/postcss-merge-rules/-/postcss-merge-rules-5.1.4.tgz", + "integrity": "sha512-0R2IuYpgU93y9lhVbO/OylTtKMVcHb67zjWIfCiKR9rWL3GUk1677LAqD/BcHizukdZEjT8Ru3oHRoAYoJy44g==", "requires": { - "browserslist": "^4.16.6", + "browserslist": "^4.21.4", "caniuse-api": "^3.0.0", "cssnano-utils": "^3.1.0", "postcss-selector-parser": "^6.0.5" @@ -22693,11 +23200,11 @@ } }, "postcss-minify-params": { - "version": "5.1.3", - "resolved": "https://registry.npmjs.org/postcss-minify-params/-/postcss-minify-params-5.1.3.tgz", - "integrity": "sha512-bkzpWcjykkqIujNL+EVEPOlLYi/eZ050oImVtHU7b4lFS82jPnsCb44gvC6pxaNt38Els3jWYDHTjHKf0koTgg==", + "version": "5.1.4", + "resolved": "https://registry.npmjs.org/postcss-minify-params/-/postcss-minify-params-5.1.4.tgz", + "integrity": "sha512-+mePA3MgdmVmv6g+30rn57USjOGSAyuxUmkfiWpzalZ8aiBkdPYjXWtHuwJGm1v5Ojy0Z0LaSYhHaLJQB0P8Jw==", "requires": { - "browserslist": "^4.16.6", + "browserslist": "^4.21.4", "cssnano-utils": "^3.1.0", "postcss-value-parser": "^4.2.0" } @@ -22789,11 +23296,11 @@ } }, "postcss-normalize-unicode": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-normalize-unicode/-/postcss-normalize-unicode-5.1.0.tgz", - "integrity": "sha512-J6M3MizAAZ2dOdSjy2caayJLQT8E8K9XjLce8AUQMwOrCvjCHv24aLC/Lps1R1ylOfol5VIDMaM/Lo9NGlk1SQ==", + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/postcss-normalize-unicode/-/postcss-normalize-unicode-5.1.1.tgz", + "integrity": "sha512-qnCL5jzkNUmKVhZoENp1mJiGNPcsJCs1aaRmURmeJGES23Z/ajaln+EPTD+rBeNkSryI+2WTdW+lwcVdOikrpA==", "requires": { - "browserslist": "^4.16.6", + "browserslist": "^4.21.4", "postcss-value-parser": "^4.2.0" } }, @@ -22832,11 +23339,11 @@ } }, "postcss-reduce-initial": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/postcss-reduce-initial/-/postcss-reduce-initial-5.1.0.tgz", - "integrity": "sha512-5OgTUviz0aeH6MtBjHfbr57tml13PuedK/Ecg8szzd4XRMbYxH4572JFG067z+FqBIf6Zp/d+0581glkvvWMFw==", + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/postcss-reduce-initial/-/postcss-reduce-initial-5.1.2.tgz", + "integrity": "sha512-dE/y2XRaqAi6OvjzD22pjTUQ8eOfc6m/natGHgKFBK9DxFmIm69YmaRVQrGgFlEfc1HePIurY0TmDeROK05rIg==", "requires": { - "browserslist": "^4.16.6", + "browserslist": "^4.21.4", "caniuse-api": "^3.0.0" } }, @@ -22858,9 +23365,9 @@ } }, "postcss-sort-media-queries": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/postcss-sort-media-queries/-/postcss-sort-media-queries-4.3.0.tgz", - "integrity": "sha512-jAl8gJM2DvuIJiI9sL1CuiHtKM4s5aEIomkU8G3LFvbP+p8i7Sz8VV63uieTgoewGqKbi+hxBTiOKJlB35upCg==", + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/postcss-sort-media-queries/-/postcss-sort-media-queries-4.4.1.tgz", + "integrity": "sha512-QDESFzDDGKgpiIh4GYXsSy6sek2yAwQx1JASl5AxBtU1Lq2JfKBljIPNdil989NcSKRQX1ToiaKphImtBuhXWw==", "requires": { "sort-css-media-queries": "2.1.0" } @@ -22893,6 +23400,11 @@ "integrity": "sha512-fgFMf0OtVSBR1va1JNHYgMxYk73yhn/qb4uQDq1DLGYolz8gHCyr/sesEuGUaYs58E3ZJRcpoGuPVoB7Meiq9A==", "requires": {} }, + "preact": { + "version": "10.22.0", + "resolved": "https://registry.npmjs.org/preact/-/preact-10.22.0.tgz", + "integrity": "sha512-RRurnSjJPj4rp5K6XoP45Ui33ncb7e4H7WiOHVpjbkvqvA3U+N8Z6Qbo0AE6leGYBV66n8EhEaFixvIu3SkxFw==" + }, "prepend-http": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/prepend-http/-/prepend-http-2.0.0.tgz", @@ -23007,18 +23519,13 @@ "integrity": "sha512-QFADYnsVoBMw1srW7OVKEYjG+MbIa49s54w1MA1EDY6r2r/sTcKKYqRX1f4GYvnXP7eN/Pe9HFcX+hwzmrXRHA==" }, "qs": { - "version": "6.10.3", - "resolved": "https://registry.npmjs.org/qs/-/qs-6.10.3.tgz", - "integrity": "sha512-wr7M2E0OFRfIfJZjKGieI8lBKb7fRCH4Fv5KNPEs7gJ8jadvotdsS08PzOKR7opXhZ/Xkjtt3WF9g38drmyRqQ==", + "version": "6.11.0", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.11.0.tgz", + "integrity": "sha512-MvjoMCJwEarSbUYk5O+nmoSzSutSsTwF85zcHPQ9OrlFoZOYIjaqBAJIqIXjptyD5vThxGq52Xu/MaJzRkIk4Q==", "requires": { "side-channel": "^1.0.4" } }, - "querystring": { - "version": "0.2.1", - "resolved": "https://registry.npmjs.org/querystring/-/querystring-0.2.1.tgz", - "integrity": "sha512-wkvS7mL/JMugcup3/rMitHmd9ecIGd2lhFhK9N3UUQ450h66d1r3Y9nvXzQAW1Lq+wyx61k/1pfKS5KuKiyEbg==" - }, "queue": { "version": "6.0.2", "resolved": "https://registry.npmjs.org/queue/-/queue-6.0.2.tgz", @@ -23046,9 +23553,9 @@ "integrity": "sha512-kA5WQoNVo4t9lNx2kQNFCxKeBl5IbbSNBl1M/tLkw9WCn+hxNBAW5Qh8gdhs63CJnhjJ2zQWFoqPJP2sK1AV5A==" }, "raw-body": { - "version": "2.5.1", - "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.1.tgz", - "integrity": "sha512-qqJBtEyVgS0ZmPGdCFPWJ3FreoqvG4MVQln/kCgF7Olq95IbOp0/BWyMwbdtn4VTvkM8Y7khCQ2Xgk/tcrCXig==", + "version": "2.5.2", + "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.2.tgz", + "integrity": "sha512-8zGqypfENjCIqGhgXToC8aB2r7YrBX+AQAfIPs/Mlk+BtPTztOvTS01NRW/3Eh60J+a48lt8qsCzirQ6loCVfA==", "requires": { "bytes": "3.1.2", "http-errors": "2.0.0", @@ -23182,9 +23689,9 @@ "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==" }, "loader-utils": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-3.2.0.tgz", - "integrity": "sha512-HVl9ZqccQihZ7JM85dco1MvO9G+ONvxoGa9rkhzFsneGLKSUg1gJf9bWzhRhcvm2qChhWpebQhP44qxjKIUCaQ==" + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-3.2.1.tgz", + "integrity": "sha512-ZvFw1KWS3GVyYBYb7qkmRM/WwL2TQQBxgCK62rlvm4WpVQ23Nb4tYjApUlfjrEGvOs7KHEsmyUn75OHZrJMWPw==" }, "locate-path": { "version": "6.0.0", @@ -23240,6 +23747,14 @@ "resolved": "https://registry.npmjs.org/react-fast-compare/-/react-fast-compare-3.2.0.tgz", "integrity": "sha512-rtGImPZ0YyLrscKI9xTpV8psd6I8VAtjKCzQDlzyDvqJA8XOW78TXYQwNRNd8g8JZnDu8q9Fu/1v4HPAVwVdHA==" }, + "react-github-btn": { + "version": "1.4.0", + "resolved": "https://registry.npmmirror.com/react-github-btn/-/react-github-btn-1.4.0.tgz", + "integrity": "sha512-lV4FYClAfjWnBfv0iNlJUGhamDgIq6TayD0kPZED6VzHWdpcHmPfsYOZ/CFwLfPv4Zp+F4m8QKTj0oy2HjiGXg==", + "requires": { + "github-buttons": "^2.22.0" + } + }, "react-helmet-async": { "version": "1.3.0", "resolved": "https://registry.npmjs.org/react-helmet-async/-/react-helmet-async-1.3.0.tgz", @@ -23252,6 +23767,12 @@ "shallowequal": "^1.1.0" } }, + "react-icons": { + "version": "4.9.0", + "resolved": "https://registry.npmmirror.com/react-icons/-/react-icons-4.9.0.tgz", + "integrity": "sha512-ijUnFr//ycebOqujtqtV9PFS7JjhWg0QU6ykURVHuL4cbofvRCf3f6GMn9+fBktEFQOIVZnuAYLZdiyadRQRFg==", + "requires": {} + }, "react-is": { "version": "16.13.1", "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz", @@ -23274,12 +23795,12 @@ "integrity": "sha512-fBASbA6LnOU9dOU2eW7aQ8xmYBSXUIWr+UmF9b1efZBazGNO+rcXT/icdKnYm2pTwcRylVUYwW7H1PHfLekVzA==" }, "react-loadable": { - "version": "npm:@docusaurus/react-loadable@5.5.2", - "resolved": "https://registry.npmjs.org/@docusaurus/react-loadable/-/react-loadable-5.5.2.tgz", - "integrity": "sha512-A3dYjdBGuy0IGT+wyLIGIKLRE+sAk1iNk0f1HjNDysO7u8lhL4N3VEm+FAubmJbAztn94F7MxBTPmnixbiyFdQ==", + "version": "5.5.0", + "resolved": "https://registry.npmjs.org/react-loadable/-/react-loadable-5.5.0.tgz", + "integrity": "sha512-C8Aui0ZpMd4KokxRdVAm2bQtI03k2RMRNzOB+IipV3yxFTSVICv7WoUr5L9ALB5BmKO1iHgZtWM8EvYG83otdg==", + "peer": true, "requires": { - "@types/react": "*", - "prop-types": "^15.6.2" + "prop-types": "^15.5.0" } }, "react-loadable-ssr-addon-v5-slorber": { @@ -23290,6 +23811,18 @@ "@babel/runtime": "^7.10.3" } }, + "react-player": { + "version": "2.16.0", + "resolved": "https://registry.npmjs.org/react-player/-/react-player-2.16.0.tgz", + "integrity": "sha512-mAIPHfioD7yxO0GNYVFD1303QFtI3lyyQZLY229UEAp/a10cSW+hPcakg0Keq8uWJxT2OiT/4Gt+Lc9bD6bJmQ==", + "requires": { + "deepmerge": "^4.0.0", + "load-script": "^1.0.0", + "memoize-one": "^5.1.1", + "prop-types": "^15.7.2", + "react-fast-compare": "^3.0.1" + } + }, "react-router": { "version": "5.3.3", "resolved": "https://registry.npmjs.org/react-router/-/react-router-5.3.3.tgz", @@ -23330,11 +23863,11 @@ } }, "react-textarea-autosize": { - "version": "8.3.4", - "resolved": "https://registry.npmjs.org/react-textarea-autosize/-/react-textarea-autosize-8.3.4.tgz", - "integrity": "sha512-CdtmP8Dc19xL8/R6sWvtknD/eCXkQr30dtvC4VmGInhRsfF8X/ihXCq6+9l9qbxmKRiq407/7z5fxE7cVWQNgQ==", + "version": "8.5.3", + "resolved": "https://registry.npmjs.org/react-textarea-autosize/-/react-textarea-autosize-8.5.3.tgz", + "integrity": "sha512-XT1024o2pqCuZSuBt9FwHlaDeNtVrtCXu0Rnz88t1jUGheCLa3PhjE1GH8Ctm2axEtvdCl5SUHYschyQ0L5QHQ==", "requires": { - "@babel/runtime": "^7.10.2", + "@babel/runtime": "^7.20.13", "use-composed-ref": "^1.3.0", "use-latest": "^1.2.1" } @@ -23371,21 +23904,11 @@ } }, "recursive-readdir": { - "version": "2.2.2", - "resolved": "https://registry.npmjs.org/recursive-readdir/-/recursive-readdir-2.2.2.tgz", - "integrity": "sha512-nRCcW9Sj7NuZwa2XvH9co8NPeXUBhZP7CRKJtU+cS6PW9FpCIFoI5ib0NT1ZrbNuPoRy0ylyCaUL8Gih4LSyFg==", + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/recursive-readdir/-/recursive-readdir-2.2.3.tgz", + "integrity": "sha512-8HrF5ZsXk5FAH9dgsx3BlUer73nIhuj+9OrQwEbLTPOBzGkL1lsFCR01am+v+0m2Cmbs1nP12hLDl5FA7EszKA==", "requires": { - "minimatch": "3.0.4" - }, - "dependencies": { - "minimatch": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz", - "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==", - "requires": { - "brace-expansion": "^1.1.7" - } - } + "minimatch": "^3.0.5" } }, "regenerate": { @@ -23402,9 +23925,9 @@ } }, "regenerator-runtime": { - "version": "0.13.9", - "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.13.9.tgz", - "integrity": "sha512-p3VT+cOEgxFsRRA9X4lkI1E+k2/CtnKtU4gcxyaCUreilL/vqI6CdZ3wxVUx3UOUg+gnUOQQcRI7BmSI656MYA==" + "version": "0.13.11", + "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.13.11.tgz", + "integrity": "sha512-kY1AZVr2Ra+t+piVaJ4gxaFaReZVH40AKNo7UCX6W+dEwBo/2oZJzqfuN1qLq1oL45o56cPaTXELwrTh8Fpggg==" }, "regenerator-transform": { "version": "0.15.0", @@ -23463,75 +23986,11 @@ } } }, - "rehype-parse": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/rehype-parse/-/rehype-parse-6.0.2.tgz", - "integrity": "sha512-0S3CpvpTAgGmnz8kiCyFLGuW5yA4OQhyNTm/nwPopZ7+PI11WnGl1TTWTGv/2hPEe/g2jRLlhVVSsoDH8waRug==", - "requires": { - "hast-util-from-parse5": "^5.0.0", - "parse5": "^5.0.0", - "xtend": "^4.0.0" - }, - "dependencies": { - "hast-util-from-parse5": { - "version": "5.0.3", - "resolved": "https://registry.npmjs.org/hast-util-from-parse5/-/hast-util-from-parse5-5.0.3.tgz", - "integrity": "sha512-gOc8UB99F6eWVWFtM9jUikjN7QkWxB3nY0df5Z0Zq1/Nkwl5V4hAAsl0tmwlgWl/1shlTF8DnNYLO8X6wRV9pA==", - "requires": { - "ccount": "^1.0.3", - "hastscript": "^5.0.0", - "property-information": "^5.0.0", - "web-namespaces": "^1.1.2", - "xtend": "^4.0.1" - } - }, - "hastscript": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/hastscript/-/hastscript-5.1.2.tgz", - "integrity": "sha512-WlztFuK+Lrvi3EggsqOkQ52rKbxkXL3RwB6t5lwoa8QLMemoWfBuL43eDrwOamJyR7uKQKdmKYaBH1NZBiIRrQ==", - "requires": { - "comma-separated-tokens": "^1.0.0", - "hast-util-parse-selector": "^2.0.0", - "property-information": "^5.0.0", - "space-separated-tokens": "^1.0.0" - } - }, - "parse5": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/parse5/-/parse5-5.1.1.tgz", - "integrity": "sha512-ugq4DFI0Ptb+WWjAdOK16+u/nHfiIrcE+sh8kZMaM0WllQKLI9rOUq6c2b7cwPkXdzfQESqvoqK6ug7U/Yyzug==" - } - } - }, "relateurl": { "version": "0.2.7", "resolved": "https://registry.npmjs.org/relateurl/-/relateurl-0.2.7.tgz", "integrity": "sha512-G08Dxvm4iDN3MLM0EsP62EDV9IuhXPR6blNz6Utcp7zyV3tr4HVNINt6MpaRWbxoOHT3Q7YN2P+jaHX8vUbgog==" }, - "remark-admonitions": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/remark-admonitions/-/remark-admonitions-1.2.1.tgz", - "integrity": "sha512-Ji6p68VDvD+H1oS95Fdx9Ar5WA2wcDA4kwrrhVU7fGctC6+d3uiMICu7w7/2Xld+lnU7/gi+432+rRbup5S8ow==", - "requires": { - "rehype-parse": "^6.0.2", - "unified": "^8.4.2", - "unist-util-visit": "^2.0.1" - }, - "dependencies": { - "unified": { - "version": "8.4.2", - "resolved": "https://registry.npmjs.org/unified/-/unified-8.4.2.tgz", - "integrity": "sha512-JCrmN13jI4+h9UAyKEoGcDZV+i1E7BLFuG7OsaDvTXI5P0qhHX+vZO/kOhz9jn8HGENDKbwSeB0nVOg4gVStGA==", - "requires": { - "bail": "^1.0.0", - "extend": "^3.0.0", - "is-plain-obj": "^2.0.0", - "trough": "^1.0.0", - "vfile": "^4.0.0" - } - } - } - }, "remark-emoji": { "version": "2.2.0", "resolved": "https://registry.npmjs.org/remark-emoji/-/remark-emoji-2.2.0.tgz", @@ -23614,14 +24073,27 @@ } }, "semver": { - "version": "5.7.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", - "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==" + "version": "5.7.2", + "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.2.tgz", + "integrity": "sha512-cBznnQ9KjJqU67B52RMC65CMarK2600WFnbkcaiwWq3xy/5haFJlshgnpjovMVJ+Hff49d8GEn0b87C5pDQ10g==" }, "source-map": { "version": "0.5.7", "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz", "integrity": "sha512-LbrmJOMUSdEVxIKvdcJzQC+nQhe8FUZQTXQy6+I75skNgn3OoQ0DZA8YnFa7gp8tqtL3KPf1kmo0R5DoApeSGQ==" + }, + "unified": { + "version": "9.2.0", + "resolved": "https://registry.npmjs.org/unified/-/unified-9.2.0.tgz", + "integrity": "sha512-vx2Z0vY+a3YoTj8+pttM3tiJHCwY5UFbYdiWrwBEbHmK8pvsPj2rtAX2BFfgXen8T39CJWblWRDT4L5WGXtDdg==", + "requires": { + "bail": "^1.0.0", + "extend": "^3.0.0", + "is-buffer": "^2.0.0", + "is-plain-obj": "^2.0.0", + "trough": "^1.0.0", + "vfile": "^4.0.0" + } } } }, @@ -23950,9 +24422,9 @@ "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==" }, "sax": { - "version": "1.2.4", - "resolved": "https://registry.npmjs.org/sax/-/sax-1.2.4.tgz", - "integrity": "sha512-NqVDv9TpANUjFm0N8uM5GxL36UgKi9/atZw+x7YFnQ8ckwFGKrl4xX4yWtrey3UJm5nP1kUbnYgLopqWNSRhWw==" + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/sax/-/sax-1.4.1.tgz", + "integrity": "sha512-+aWOz7yVScEGoKNd4PA10LZ8sk0A/z5+nXQG5giUO5rprX9jgYsTdov9qCchZiPIZezbZH+jRut8nPodFAX4Jg==" }, "scheduler": { "version": "0.20.2", @@ -23997,6 +24469,12 @@ } } }, + "search-insights": { + "version": "2.14.0", + "resolved": "https://registry.npmjs.org/search-insights/-/search-insights-2.14.0.tgz", + "integrity": "sha512-OLN6MsPMCghDOqlCtsIsYgtsC0pnwVTyT9Mu6A3ewOj1DxvzZF6COrn2g86E/c05xbktB0XN04m/t1Z+n+fTGw==", + "peer": true + }, "section-matter": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/section-matter/-/section-matter-1.0.0.tgz", @@ -24020,9 +24498,9 @@ } }, "semver": { - "version": "7.3.7", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.3.7.tgz", - "integrity": "sha512-QlYTucUYOews+WeEujDoEGziz4K6c47V/Bd+LjSSYcA94p+DmINdf7ncaUinThfvZyu13lN9OY1XDxt8C0Tw0g==", + "version": "7.5.4", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz", + "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==", "requires": { "lru-cache": "^6.0.0" } @@ -24036,9 +24514,9 @@ }, "dependencies": { "semver": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==" + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==" } } }, @@ -24098,28 +24576,20 @@ } }, "serve-handler": { - "version": "6.1.3", - "resolved": "https://registry.npmjs.org/serve-handler/-/serve-handler-6.1.3.tgz", - "integrity": "sha512-FosMqFBNrLyeiIDvP1zgO6YoTzFYHxLDEIavhlmQ+knB2Z7l1t+kGLHkZIDN7UVWqQAmKI3D20A6F6jo3nDd4w==", + "version": "6.1.5", + "resolved": "https://registry.npmjs.org/serve-handler/-/serve-handler-6.1.5.tgz", + "integrity": "sha512-ijPFle6Hwe8zfmBxJdE+5fta53fdIY0lHISJvuikXB3VYFafRjMRpOffSPvCYsbKyBA7pvy9oYr/BT1O3EArlg==", "requires": { "bytes": "3.0.0", "content-disposition": "0.5.2", "fast-url-parser": "1.1.3", "mime-types": "2.1.18", - "minimatch": "3.0.4", + "minimatch": "3.1.2", "path-is-inside": "1.0.2", "path-to-regexp": "2.2.1", "range-parser": "1.2.0" }, "dependencies": { - "minimatch": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz", - "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==", - "requires": { - "brace-expansion": "^1.1.7" - } - }, "path-to-regexp": { "version": "2.2.1", "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-2.2.1.tgz", @@ -24203,6 +24673,19 @@ "resolved": "https://registry.npmjs.org/set-blocking/-/set-blocking-2.0.0.tgz", "integrity": "sha512-KiKBS8AnWGEyLzofFfmvKwpdPzqiy16LvQfK3yv/fVH7Bj13/wl3JSR1J+rfgRE9q7xUJK4qvgS8raSOeLUehw==" }, + "set-function-length": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/set-function-length/-/set-function-length-1.2.2.tgz", + "integrity": "sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg==", + "requires": { + "define-data-property": "^1.1.4", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "gopd": "^1.0.1", + "has-property-descriptors": "^1.0.2" + } + }, "setimmediate": { "version": "1.0.5", "resolved": "https://registry.npmjs.org/setimmediate/-/setimmediate-1.0.5.tgz", @@ -24255,13 +24738,14 @@ } }, "side-channel": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.4.tgz", - "integrity": "sha512-q5XPytqFEIKHkGdiMIrY10mvLRvnQh42/+GoBlFW3b2LXLE2xxJpZFdm94we0BaoV3RwJyGqg5wS7epxTv0Zvw==", + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.6.tgz", + "integrity": "sha512-fDW/EZ6Q9RiO8eFG8Hj+7u/oW+XrPTIChwCOM2+th2A6OblDtYYIpve9m+KvI9Z4C9qSEXlaGR6bTEYHReuglA==", "requires": { - "call-bind": "^1.0.0", - "get-intrinsic": "^1.0.2", - "object-inspect": "^1.9.0" + "call-bind": "^1.0.7", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.4", + "object-inspect": "^1.13.1" } }, "signal-exit": { @@ -24285,9 +24769,9 @@ "integrity": "sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg==" }, "sitemap": { - "version": "7.1.1", - "resolved": "https://registry.npmjs.org/sitemap/-/sitemap-7.1.1.tgz", - "integrity": "sha512-mK3aFtjz4VdJN0igpIJrinf3EO8U8mxOPsTBzSsy06UtjZQJ3YY3o3Xa7zSc5nMqcMrRwlChHZ18Kxg0caiPBg==", + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/sitemap/-/sitemap-7.1.2.tgz", + "integrity": "sha512-ARCqzHJ0p4gWt+j7NlU5eDlIO9+Rkr/JhPFZKKQ1l5GCus7rJH4UdrlVAh0xC/gDS/Qir2UMxqYNHtsKr2rpCw==", "requires": { "@types/node": "^17.0.5", "@types/sax": "^1.2.1", @@ -24484,11 +24968,11 @@ } }, "stylehacks": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/stylehacks/-/stylehacks-5.1.0.tgz", - "integrity": "sha512-SzLmvHQTrIWfSgljkQCw2++C9+Ne91d/6Sp92I8c5uHTcy/PgeHamwITIbBW9wnFTY/3ZfSXR9HIL6Ikqmcu6Q==", + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/stylehacks/-/stylehacks-5.1.1.tgz", + "integrity": "sha512-sBpcd5Hx7G6seo7b1LkpttvTz7ikD0LlH5RmdcBNb6fFR0Fl7LQwHDFr300q4cwUqi+IYrFGmsIHieMBfnN/Bw==", "requires": { - "browserslist": "^4.16.6", + "browserslist": "^4.21.4", "postcss-selector-parser": "^6.0.4" } }, @@ -24582,18 +25066,23 @@ "integrity": "sha512-GNzQvQTOIP6RyTfE2Qxb8ZVlNmw0n88vp1szwWRimP02mnTsx3Wtn5qRdqY9w2XduFNUgvOwhNnQsjwCp+kqaQ==" }, "tar": { - "version": "6.1.11", - "resolved": "https://registry.npmjs.org/tar/-/tar-6.1.11.tgz", - "integrity": "sha512-an/KZQzQUkZCkuoAA64hM92X0Urb6VpRhAFllDzz44U2mcD5scmT3zBc4VgVpkugF580+DQn8eAFSyoQt0tznA==", + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/tar/-/tar-6.2.1.tgz", + "integrity": "sha512-DZ4yORTwrbTj/7MZYq2w+/ZFdI6OZ/f9SFHR+71gIVUZhOQPHzVCLpvRnPgyaMpfWxxk/4ONva3GQSyNIKRv6A==", "requires": { "chownr": "^2.0.0", "fs-minipass": "^2.0.0", - "minipass": "^3.0.0", + "minipass": "^5.0.0", "minizlib": "^2.1.1", "mkdirp": "^1.0.3", "yallist": "^4.0.0" }, "dependencies": { + "minipass": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-5.0.0.tgz", + "integrity": "sha512-3FnjYuehv9k6ovOEbyOswadCDPX1piCfhV8ncmYtHOjuPwylVWsghTLo7rabjC3Rx5xD4HDx8Wm1xnMF7S5qFQ==" + }, "mkdirp": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-1.0.4.tgz", @@ -24785,9 +25274,9 @@ } }, "tslib": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.4.0.tgz", - "integrity": "sha512-d6xOpEDfsi2CZVlPQzGeux8XMwLT9hssAsaPYExaQMuYskwb+x1x7J371tWlbBdWHroy99KnVB6qIkUbs5X3UQ==" + "version": "2.6.3", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.3.tgz", + "integrity": "sha512-xNvxJEOUiWPGhUuUdQgAJPKOOJfGnIyKySOc09XkKsgdUV/3E2zvwZYdejjmRgPCgcym1juLH3226yA7sEFJKQ==" }, "type-fest": { "version": "2.19.0", @@ -24818,6 +25307,11 @@ } } }, + "typed.js": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/typed.js/-/typed.js-2.1.0.tgz", + "integrity": "sha512-bDuXEf7YcaKN4g08NMTUM6G90XU25CK3bh6U0THC/Mod/QPKlEt9g/EjvbYB8x2Qwr2p6J6I3NrsoYaVnY6wsQ==" + }, "typedarray-to-buffer": { "version": "3.1.5", "resolved": "https://registry.npmjs.org/typedarray-to-buffer/-/typedarray-to-buffer-3.1.5.tgz", @@ -24832,9 +25326,9 @@ "integrity": "sha512-goMHfm00nWPa8UvR/CPSvykqf6dVV8x/dp0c5mFTMTIu0u0FlGWRioyy7Nn0PGAdHxpJZnuO/ut+PpQ8UiHAig==" }, "ua-parser-js": { - "version": "0.7.31", - "resolved": "https://registry.npmjs.org/ua-parser-js/-/ua-parser-js-0.7.31.tgz", - "integrity": "sha512-qLK/Xe9E2uzmYI3qLeOmI0tEOt+TBBQyUIAh4aAgU05FVYzeZrKUdkAZfBNVGRaHVgV0TDkdEngJSw/SyQchkQ==" + "version": "1.0.38", + "resolved": "https://registry.npmjs.org/ua-parser-js/-/ua-parser-js-1.0.38.tgz", + "integrity": "sha512-Aq5ppTOfvrCMgAPneW1HfWj66Xi7XL+/mIy996R1/CLS/rcyJQm6QZdsKrUeivDFQ+Oc9Wyuwor8Ze8peEoUoQ==" }, "unherit": { "version": "1.1.3", @@ -24870,9 +25364,9 @@ "integrity": "sha512-6t3foTQI9qne+OZoVQB/8x8rk2k1eVy1gRXhV3oFQ5T6R1dqQ1xtin3XqSlx3+ATBkliTaR/hHyJBm+LVPNM8w==" }, "unified": { - "version": "9.2.0", - "resolved": "https://registry.npmjs.org/unified/-/unified-9.2.0.tgz", - "integrity": "sha512-vx2Z0vY+a3YoTj8+pttM3tiJHCwY5UFbYdiWrwBEbHmK8pvsPj2rtAX2BFfgXen8T39CJWblWRDT4L5WGXtDdg==", + "version": "9.2.2", + "resolved": "https://registry.npmjs.org/unified/-/unified-9.2.2.tgz", + "integrity": "sha512-Sg7j110mtefBD+qunSLO1lqOEKdrwBFBrR6Qd8f4uwkhWNlbkaqwHse6e7QvD3AP/MNoJdEDLaf8OxYyoWgorQ==", "requires": { "bail": "^1.0.0", "extend": "^3.0.0", @@ -24964,9 +25458,9 @@ "integrity": "sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==" }, "update-browserslist-db": { - "version": "1.0.9", - "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.0.9.tgz", - "integrity": "sha512-/xsqn21EGVdXI3EXSum1Yckj3ZVZugqyOZQ/CxYPBD/R+ko9NSUScf8tFF4dOKY+2pvSSJA/S+5B8s4Zr4kyvg==", + "version": "1.0.11", + "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.0.11.tgz", + "integrity": "sha512-dCwEFf0/oT85M1fHBg4F0jtLwJrutGoHSQXCh7u4o2t1drG+c0a9Flnqww6XUKSfQMPpJBRjU8d4RXB09qtvaA==", "requires": { "escalade": "^3.1.1", "picocolors": "^1.0.0" @@ -25196,6 +25690,12 @@ "use-isomorphic-layout-effect": "^1.1.1" } }, + "use-sync-external-store": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/use-sync-external-store/-/use-sync-external-store-1.2.2.tgz", + "integrity": "sha512-PElTlVMwpblvbNqQ82d2n6RjStvdSoNe9FG28kNfz3WiXilJm4DdNkEzRhCZuIDwY8U08WVihhGR5iRqAwfDiw==", + "requires": {} + }, "util-deprecate": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", @@ -25312,9 +25812,9 @@ "integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==" }, "webpack": { - "version": "5.74.0", - "resolved": "https://registry.npmjs.org/webpack/-/webpack-5.74.0.tgz", - "integrity": "sha512-A2InDwnhhGN4LYctJj6M1JEaGL7Luj6LOmyBHjcI8529cm5p6VXiTIW2sn6ffvEAKmveLzvu4jrihwXtPojlAA==", + "version": "5.76.1", + "resolved": "https://registry.npmjs.org/webpack/-/webpack-5.76.1.tgz", + "integrity": "sha512-4+YIK4Abzv8172/SGqObnUjaIHjLEuUasz9EwQj/9xmPPkYJy2Mh03Q/lJfSD3YLzbxy5FeTq5Uw0323Oh6SJQ==", "requires": { "@types/eslint-scope": "^3.7.3", "@types/estree": "^0.0.51", @@ -25456,9 +25956,9 @@ } }, "webpack-dev-middleware": { - "version": "5.3.3", - "resolved": "https://registry.npmjs.org/webpack-dev-middleware/-/webpack-dev-middleware-5.3.3.tgz", - "integrity": "sha512-hj5CYrY0bZLB+eTO+x/j67Pkrquiy7kWepMHmUMoPsmcUaeEnQJqFzHJOyxgWlq746/wUuA64p9ta34Kyb01pA==", + "version": "5.3.4", + "resolved": "https://registry.npmjs.org/webpack-dev-middleware/-/webpack-dev-middleware-5.3.4.tgz", + "integrity": "sha512-BVdTqhhs+0IfoeAf7EoH5WE+exCmqGerHfDM0IL096Px60Tq2Mn9MAbnaGUe6HiMa41KMCYF19gyzZmBcq/o4Q==", "requires": { "colorette": "^2.0.10", "memfs": "^3.4.3", @@ -25546,9 +26046,9 @@ } }, "ws": { - "version": "8.8.1", - "resolved": "https://registry.npmjs.org/ws/-/ws-8.8.1.tgz", - "integrity": "sha512-bGy2JzvzkPowEJV++hF07hAD6niYSr0JzBNo/J29WsB57A2r7Wlc1UFcTR9IzrPvuNVO4B8LGqF8qcpsVOhJCA==", + "version": "8.18.0", + "resolved": "https://registry.npmjs.org/ws/-/ws-8.18.0.tgz", + "integrity": "sha512-8VbfWfHLbbwu3+N6OKsOMpBdT4kXPDDB9cJk2bJ6mh9ucxdlnNvH1e+roYkKmN9Nxw2yjz7VzeO9oOz2zJ04Pw==", "requires": {} } } @@ -25694,9 +26194,9 @@ "integrity": "sha512-JcKqAHLPxcdb9KM49dufGXn2x3ssnfjbcaQdLlfZsL9rH9wgDQjUtDxbo8NE0F6SFvydeu1VhZe7hZuHsB2/pw==" }, "word-wrap": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.3.tgz", - "integrity": "sha512-Hz/mrNwitNRh/HUAtM/VT/5VH+ygD6DV7mYKZAtHOrbs8U7lvPS6xf7EJKMF0uW1KJCl0H701g3ZGus+muE5vQ==", + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.4.tgz", + "integrity": "sha512-2V81OA4ugVo5pRo46hAoD2ivUJx8jXmWXfUkY4KFNw0hEptvN0QfH3K4nHiwzGeKl5rFKedV48QVoqYavy4YpA==", "dev": true }, "wrap-ansi": { @@ -25746,9 +26246,9 @@ } }, "ws": { - "version": "7.5.9", - "resolved": "https://registry.npmjs.org/ws/-/ws-7.5.9.tgz", - "integrity": "sha512-F+P9Jil7UiSKSkppIiD94dN07AwvFixvLIj1Og1Rl9GGMuNipJnV9JzjD6XuqmAeiswGvUmNLjr5cFuXwNS77Q==", + "version": "7.5.10", + "resolved": "https://registry.npmjs.org/ws/-/ws-7.5.10.tgz", + "integrity": "sha512-+dbF1tHwZpXcbOJdVOkzLDxZP1ailvSxM6ZweXTegylPny803bFhA+vqBYw4s31NSAk4S2Qz+AKXK9a4wkdjcQ==", "requires": {} }, "xdg-basedir": { diff --git a/package.json b/package.json index 91e997aa..9a34c6d8 100644 --- a/package.json +++ b/package.json @@ -14,14 +14,17 @@ "write-heading-ids": "docusaurus write-heading-ids" }, "dependencies": { - "@docusaurus/core": "2.0.0-beta.17", - "@docusaurus/plugin-content-docs": "2.0.0-beta.17", - "@docusaurus/preset-classic": "2.0.0-beta.17", - "@docusaurus/theme-search-algolia": "^2.0.0-beta.20", + "@cmfcmf/docusaurus-search-local": "^1.2.0", + "@docusaurus/core": "^2.4.1", + "@docusaurus/plugin-content-docs": "^2.4.1", + "@docusaurus/preset-classic": "^2.4.1", + "@docusaurus/theme-search-algolia": "^2.4.1", "@mdx-js/react": "^1.6.21", + "@node-rs/jieba": "^1.10.3", "@svgr/webpack": "^6.3.1", "clsx": "^1.1.1", "file-loader": "^6.2.0", + "flat-color-icons": "^1.1.0", "hast-util-is-element": "^1.1.0", "minimist": "^1.2.6", "nodejieba": "^2.6.0", @@ -29,10 +32,14 @@ "prism-react-renderer": "^1.2.1", "react": "^17.0.1", "react-dom": "^17.0.1", + "react-github-btn": "^1.4.0", + "react-icons": "^4.9.0", + "react-player": "^2.16.0", "remark-math": "^3.0.1", + "throttle-debounce": "3.0.1", "trim": "0.0.3", - "url-loader": "^4.1.1", - "throttle-debounce": "3.0.1" + "typed.js": "^2.1.0", + "url-loader": "^4.1.1" }, "browserslist": { "production": [ diff --git a/sidebars.js b/sidebars.js deleted file mode 100644 index d523464b..00000000 --- a/sidebars.js +++ /dev/null @@ -1,56 +0,0 @@ -/** - * Creating a sidebar enables you to: - - create an ordered group of docs - - render a sidebar for each doc of that group - - provide next/previous navigation - - The sidebars can be generated from the filesystem, or explicitly defined here. - - Create as many sidebars as you want. - */ - -// @ts-check - -/** @type {import('@docusaurus/plugin-content-docs').SidebarsConfig} */ -const sidebars = { - // By default, Docusaurus generates a sidebar from the docs folder structure - docs: [{type: 'autogenerated', dirName: '.'}], - - user_docs: [ - { - type: 'autogenerated', - dirName: 'user_docs', - }, - ], - reference: [ - { - type: 'autogenerated', - dirName: 'reference', - }, - ], - develop: [ - { - type: 'autogenerated', - dirName: 'develop', - }, - ], - governance: [ - { - type: 'autogenerated', - dirName: 'governance', - }, - { - type: 'link', - href: '/changelog', - label: 'changelog', - }, - ], - events: [ - { - type: 'autogenerated', - dirName: 'events', - }, - ], -}; - -module.exports = sidebars; diff --git a/sidebars/community.js b/sidebars/community.js new file mode 100644 index 00000000..3ca7c363 --- /dev/null +++ b/sidebars/community.js @@ -0,0 +1,25 @@ +/** + * Creating a sidebar enables you to: + - create an ordered group of docs + - render a sidebar for each doc of that group + - provide next/previous navigation + + The sidebars can be generated from the filesystem, or explicitly defined here. + + Create as many sidebars as you want. + */ + +// @ts-check + +/** @type {import('@docusaurus/plugin-content-docs').SidebarsConfig} */ +const sidebars = { + // By default, Docusaurus generates a sidebar from the docs folder structure + community: [ + { + type: 'autogenerated', + dirName: '.', + }, + ], +}; + +module.exports = sidebars; diff --git a/sidebars/ctrlmesh.js b/sidebars/ctrlmesh.js new file mode 100644 index 00000000..00df66a5 --- /dev/null +++ b/sidebars/ctrlmesh.js @@ -0,0 +1,24 @@ +/** + * Creating a sidebar enables you to: + - create an ordered group of docs + - render a sidebar for each doc of that group + - provide next/previous navigation + + The sidebars can be generated from the filesystem, or explicitly defined here. + + Create as many sidebars as you want. + */ + +// @ts-check + +/** @type {import('@docusaurus/plugin-content-docs').SidebarsConfig} */ +const sidebars = { + ctrlmesh: [ + { + type: 'autogenerated', + dirName: '.', + }, + ], +}; + +module.exports = sidebars; diff --git a/sidebars/karpor.js b/sidebars/karpor.js new file mode 100644 index 00000000..fa45274e --- /dev/null +++ b/sidebars/karpor.js @@ -0,0 +1,24 @@ +/** + * Creating a sidebar enables you to: + - create an ordered group of docs + - render a sidebar for each doc of that group + - provide next/previous navigation + + The sidebars can be generated from the filesystem, or explicitly defined here. + + Create as many sidebars as you want. + */ + +// @ts-check + +/** @type {import('@docusaurus/plugin-content-docs').SidebarsConfig} */ +const sidebars = { + karpor: [ + { + type: "autogenerated", + dirName: ".", + }, + ], +}; + +module.exports = sidebars; diff --git a/sidebars/kuperator.js b/sidebars/kuperator.js new file mode 100644 index 00000000..a665dedd --- /dev/null +++ b/sidebars/kuperator.js @@ -0,0 +1,25 @@ +/** + * Creating a sidebar enables you to: + - create an ordered group of docs + - render a sidebar for each doc of that group + - provide next/previous navigation + + The sidebars can be generated from the filesystem, or explicitly defined here. + + Create as many sidebars as you want. + */ + +// @ts-check + +/** @type {import('@docusaurus/plugin-content-docs').SidebarsConfig} */ +const sidebars = { + // By default, Docusaurus generates a sidebar from the docs folder structure + operating: [ + { + type: 'autogenerated', + dirName: '.', + }, + ], +}; + +module.exports = sidebars; diff --git a/sidebars/kusion.js b/sidebars/kusion.js new file mode 100644 index 00000000..e6b15c03 --- /dev/null +++ b/sidebars/kusion.js @@ -0,0 +1,26 @@ +/** + * Creating a sidebar enables you to: + - create an ordered group of docs + - render a sidebar for each doc of that group + - provide next/previous navigation + + The sidebars can be generated from the filesystem, or explicitly defined here. + + Create as many sidebars as you want. + */ + +// @ts-check + +/** @type {import('@docusaurus/plugin-content-docs').SidebarsConfig} */ +const sidebars = { + // By default, Docusaurus generates a sidebar from the docs folder structure + + kusion: [ + { + type: 'autogenerated', + dirName: '.', + }, + ], +}; + +module.exports = sidebars; diff --git a/src/components/DocsRating.css b/src/components/DocsRating.css new file mode 100644 index 00000000..5ece4b6e --- /dev/null +++ b/src/components/DocsRating.css @@ -0,0 +1,60 @@ +/* Docs Rating Box */ +.docsRatingContainer { + display: flex; + justify-content: center; +} + +.docsRating { + display: flex; + align-items: center; + justify-content: center; + width: 337px; + height: 75px; + padding: 16px 40px 16px 40px; + border: solid; + border-radius: 8px !important; + min-height: 66px; + margin-top: 42px; + text-align: center; + color: #057594; + line-height: 32px; + font-weight: 500; + border-radius: 0 var(--ifm-global-radius) var(--ifm-global-radius) 0; +} + +.docsRating svg { + height: 2.5em; + width: 2.5em; + + &:hover, + &:focus { + cursor: pointer; + fill: var(--ifm-color-primary); + } +} + +.docsRating .i_thumbsup { + fill: #56a211; + margin-left: 16px; + cursor: pointer; + /* transform: translateY(0.25em); */ +} + +.docsRating .i_thumbsdown { + fill: #e9430f; + margin-left: 10px; + cursor: pointer; + /* transform: scale(-1, -1) translateY(-0.25em); */ +} + +html[data-theme="dark"] .docsRating { + color: #cee9f3; +} + +@media only screen and (max-width: 996px) { + .docsRating { + width: 100%; + margin: 36px 0 0 !important; + border-radius: var(--ifm-global-radius); + } +} diff --git a/src/components/DocsRating.js b/src/components/DocsRating.js new file mode 100644 index 00000000..bd7f8241 --- /dev/null +++ b/src/components/DocsRating.js @@ -0,0 +1,131 @@ +import React, { useState } from "react"; +import useIsBrowser from "@docusaurus/useIsBrowser"; +import "./DocsRating.css"; + +const DocsRating = ({ label }) => { + const isBrowser = useIsBrowser(); + const [haveVoted, setHaveVoted] = useState(false); + if (!isBrowser) { + return null; + } + + const giveFeedback = (value) => { + if (window.gtag) { + console.info("Google Analytics 4 API is available."); + window.gtag("event", "feedback", { + event_category: "button", + event_label: label, + value: value, + }); + } else { + console.warn("Google Analytics 4 API is not available."); + } + setHaveVoted(true); + }; + + return ( +
+
+ {haveVoted ? ( + "Thanks for letting us know!" + ) : ( + <> + Is this page useful? + giveFeedback(1)} + xmlns="http://www.w3.org/2000/svg" + viewBox="0 0 297 297" + > + + + + + + + + + + + + + + + + + + giveFeedback(0)} + xmlns="http://www.w3.org/2000/svg" + viewBox="0 0 297 297" + > + + + + + + + + + + + + + + + + + + + )} +
+
+ ); +}; + +export default DocsRating; diff --git a/src/components/ExampleScroller/index.tsx b/src/components/ExampleScroller/index.tsx index bd6991a6..8d666d8b 100644 --- a/src/components/ExampleScroller/index.tsx +++ b/src/components/ExampleScroller/index.tsx @@ -113,24 +113,6 @@ export const ExampleScroller = () => { seCss["section--showcase"], )} > -

- Codify Your Modern Delivery -

- -

- With configs, models, functions and rules -

-
{ + // Fetch the gitHub star number + fetch(`https://api.github.com/repos/${repo}`) + .then((response) => response.json()) + .then((data) => { + if (data.stargazers_count) { + setStars(data.stargazers_count); + } + }); + }, [repo]); + + return ( + + ); +} + +export default GithubStars; diff --git a/src/components/GithubStars.module.css b/src/components/GithubStars.module.css new file mode 100644 index 00000000..5354bdc5 --- /dev/null +++ b/src/components/GithubStars.module.css @@ -0,0 +1,51 @@ +.karpor_star_button { + display: flex; + align-items: center; + margin: 0 10px; + font-size: 14px; +} +.karpor_star_button a { + display: inline-block; + align-items: center; + cursor: pointer; + color: rgb(36, 41, 47); + font-weight: bold; + display: flex; + align-items: center; + text-decoration: none; +} + +.karpor_star_button a .text { + display: flex; + align-items: center; + padding: 5px 10px; + border-radius: 6px 0 0 6px; + border: 1px solid rgb(208, 215, 222); + background: #ebf0f4; +} + +.karpor_star_button a .icon { + margin-right: 5px; +} + +.karpor_star_button a .star { + padding: 5px 10px; + border-radius: 0 4px 4px 0; + border: 1px solid rgb(208, 215, 222); + border-left: none; + background: #fff; +} + +.karpor_star_button a:hover { + text-decoration: none; +} + +.karpor_star_button a .text:hover { + text-decoration: none; + background-color: #fff; + color: rgb(36, 41, 47); +} +.karpor_star_button a .star:hover { + text-decoration: none; + color: #1677ff; +} diff --git a/src/components/HomepageFeatures.js b/src/components/HomepageFeatures.js deleted file mode 100644 index c384c55c..00000000 --- a/src/components/HomepageFeatures.js +++ /dev/null @@ -1,62 +0,0 @@ -import React from 'react'; -import clsx from 'clsx'; -import styles from './HomepageFeatures.module.css'; -import Translate, {translate} from '@docusaurus/Translate'; - -const FeatureList = [ - { - title: translate({'id': 'homepage.feature.kcl', 'message': 'KCL 语言'}), - Svg: require('../../static/img/undraw_docusaurus_mountain.svg').default, - description: ( - - 面向配置和策略场景的基于约束的记录及函数语言 - - ), - }, - { - title: translate({'id': 'homepage.feature.konfig', 'message': 'Konfig'}), - Svg: require('../../static/img/undraw_docusaurus_tree.svg').default, - description: ( - - Konfig 是一个配置大库,存放了用户用 KCL 描述的运维意图。它提供给用户开箱即用的云原生应用配置模型,方便用户快速开始云原生应用发布之旅。 - - ), - }, - { - title: translate({'id': 'homepage.feature.kusion', 'message': 'Kusion 引擎'}), - Svg: require('../../static/img/undraw_docusaurus_react.svg').default, - description: ( - - Kusion 引擎编译、实施 Konfig 中描述的运维意图,屏蔽基础设施复杂性,为多运行时、多云提供一致的运维体验。 - - ), - }, -]; - -function Feature({Svg, title, description}) { - return ( -
-
- -
-
-

{title}

-

{description}

-
-
- ); -} - -export default function HomepageFeatures() { - return ( -
-
-
- {FeatureList.map((props, idx) => ( - - ))} -
-
-
- ); -} diff --git a/src/components/KarporButton/index.js b/src/components/KarporButton/index.js new file mode 100644 index 00000000..2aed32e6 --- /dev/null +++ b/src/components/KarporButton/index.js @@ -0,0 +1,16 @@ +import React from "react"; + +import styles from "./style.module.css"; + +function KarporButton({ url, btnText }) { + return ( + + ); +} + +export default KarporButton; diff --git a/src/components/KarporButton/style.module.css b/src/components/KarporButton/style.module.css new file mode 100644 index 00000000..dc3ed79d --- /dev/null +++ b/src/components/KarporButton/style.module.css @@ -0,0 +1,38 @@ +.karpor_button a { + display: inline-block; + margin: 0 10px; + font-size: 14px; + border-radius: 4px; + cursor: pointer; + color: rgb(36, 41, 47); + font-weight: bold; + display: flex; + align-items: center; + text-decoration: none; +} + +.karpor_button a .text { + text-align: center; + padding: 5px 10px; + border-radius: 6px 0 0 6px; + border: 1px solid rgb(208, 215, 222); + background: #ebf0f4; +} + +.karpor_button a .icon { + padding: 5px 10px; + border-radius: 0 4px 4px 0; + border: 1px solid rgb(208, 215, 222); + border-left: none; + background: #fff; +} + +.karpor_button a:hover { + text-decoration: none; +} + +.karpor_button a .text:hover { + text-decoration: none; + background-color: #fff; + color: rgb(36, 41, 47); +} diff --git a/src/css/custom.css b/src/css/custom.css index 0159a23b..d2300496 100644 --- a/src/css/custom.css +++ b/src/css/custom.css @@ -5,88 +5,97 @@ * LICENSE file in the root directory of this source tree. */ - :root { - --palette-charade: #21222c; - --palette-gray: #32343e; - --palette-green: #66bb6a; - --palette-midnight: #141725; - --palette-pale-blue: #b1b5d3; - --palette-pink: #d14671; - --palette-pink-darker: #be2f5b; - --palette-rock: rgb(var(--palette-rock-raw)); - --palette-rock-raw: 38, 40, 51; - --palette-sky: #81d3f9; - --palette-turquoise: #0cc0df; - --palette-black-10: rgba(0, 0, 0, 0.1); - --palette-white: #fff; - --palette-white-03: rgba(255, 255, 255, 0.03); - --palette-white-05: rgba(255, 255, 255, 0.05); - --palette-white-10: rgba(255, 255, 255, 0.1); - --palette-white-20: rgba(255, 255, 255, 0.2); - --palette-white-30: rgba(255, 255, 255, 0.3); - --palette-white-40: rgba(255, 255, 255, 0.4); - --palette-white-60: rgba(255, 255, 255, 0.6); - --palette-white-darker: #d9d9d9; - --palette-yellow: #ffd54f; - --image-shadow: 0 15px 61px 5px rgba(20, 23, 37, 0.94); - - --font-size-small: 15px; - --font-size-normal: 16px; - --font-size-large: 17px; - --font-size-big-1: 22px; - --font-size-big-2: 24px; - --font-size-big-3: 32px; - --font-size-big-4: 46px; - --font-size-big-5: 64px; + @font-face { + font-family: Segoe UI; + src: + url("/static/fonts/Segoe-UI.ttf") format("truetype") +} + +:root { + --palette-charade: #21222c; + --palette-gray: #32343e; + --palette-green: #66bb6a; + --palette-midnight: #141725; + --palette-pale-blue: #b1b5d3; + --palette-pink: #d14671; + --palette-pink-darker: #be2f5b; + --palette-rock: rgb(var(--palette-rock-raw)); + --palette-rock-raw: 38, 40, 51; + --palette-sky: #81d3f9; + --palette-turquoise: #0cc0df; + --palette-black-10: rgba(0, 0, 0, 0.1); + --palette-white: #fff; + --palette-white-03: rgba(255, 255, 255, 0.03); + --palette-white-05: rgba(255, 255, 255, 0.05); + --palette-white-10: rgba(255, 255, 255, 0.1); + --palette-white-20: rgba(255, 255, 255, 0.2); + --palette-white-30: rgba(255, 255, 255, 0.3); + --palette-white-40: rgba(255, 255, 255, 0.4); + --palette-white-60: rgba(255, 255, 255, 0.6); + --palette-white-darker: #d9d9d9; + --palette-yellow: #ffd54f; + --image-shadow: 0 15px 61px 5px rgba(20, 23, 37, 0.94); + + --font-size-small: 15px; + --font-size-normal: 16px; + --font-size-large: 17px; + --font-size-big-1: 22px; + --font-size-big-2: 24px; + --font-size-big-3: 32px; + --font-size-big-4: 46px; + --font-size-big-5: 64px; /* - See css var + hsl color palette technique: - https://blog.maximeheckel.com/posts/the-power-of-composition-with-css-variables/ - */ - --site-primary-hue-saturation: 167 68%; - --site-primary-hue-saturation-light: 167 56%; /* do we really need this extra one? */ + See css var + hsl color palette technique: + https://blog.maximeheckel.com/posts/the-power-of-composition-with-css-variables/ + */ + --site-primary-hue-saturation: 214 68%; + --site-primary-hue-saturation-light: 214 56%; + /* do we really need this extra one? */ --site-color-favorite-background: #f6fdfd; --site-color-tooltip: #fff; --site-color-tooltip-background: #353738; --site-color-svg-icon-favorite: #e9669e; --site-color-checkbox-checked-bg: hsl(167deg 56% 73% / 25%); --site-color-feedback-background: #fff; + --primary-color: #1677ff; } html[data-theme='dark'] { + --feature-icon-color: #ffffff; --site-color-feedback-background: #f0f8ff; --site-color-favorite-background: #1d1e1e; --site-color-checkbox-checked-bg: hsl(167deg 56% 73% / 10%); } [data-theme='light'] { - --ifm-color-primary: hsl(var(--site-primary-hue-saturation) 30%); - --ifm-color-primary-dark: hsl(var(--site-primary-hue-saturation) 26%); - --ifm-color-primary-darker: hsl(var(--site-primary-hue-saturation) 23%); - --ifm-color-primary-darkest: hsl(var(--site-primary-hue-saturation) 17%); + --feature-icon-color: #000000; + --ifm-color-primary: var(--primary-color); + --ifm-color-secondary: #ffffff; + --ifm-color-primary-dark: var(--primary-color); + --ifm-color-primary-darker: var(--primary-color); + --ifm-color-primary-darkest: var(--primary-color); + --ifm-color-primary-light: var(--primary-color); + --ifm-color-primary-lighter: var(--primary-color); + --ifm-color-primary-lightest: var(--primary-color); + --ifm-code-font-size: 95%; + --ifm-alert-color: #0d3c61; + + --ifm-navbar-background-color: #ffffff; + + --ifm-font-family-base: "Segoe UI", "Roboto", sans-serif; - --ifm-color-primary-light: hsl(var(--site-primary-hue-saturation-light) 39%); - --ifm-color-primary-lighter: hsl( - var(--site-primary-hue-saturation-light) 47% - ); - --ifm-color-primary-lightest: hsl( - var(--site-primary-hue-saturation-light) 58% - ); + --ifm-font-size-base: 16px; } [data-theme='dark'] { - --ifm-color-primary: hsl(var(--site-primary-hue-saturation) 45%); - --ifm-color-primary-dark: hsl(var(--site-primary-hue-saturation) 41%); - --ifm-color-primary-darker: hsl(var(--site-primary-hue-saturation) 38%); - --ifm-color-primary-darkest: hsl(var(--site-primary-hue-saturation) 32%); - - --ifm-color-primary-light: hsl(var(--site-primary-hue-saturation-light) 54%); - --ifm-color-primary-lighter: hsl( - var(--site-primary-hue-saturation-light) 62% - ); - --ifm-color-primary-lightest: hsl( - var(--site-primary-hue-saturation-light) 73% - ); + --ifm-color-primary: var(--primary-color); + --ifm-color-primary-dark: var(--primary-color); + --ifm-color-primary-darker: var(--primary-color); + --ifm-color-primary-darkest: var(--primary-color); + --ifm-color-primary-light: var(--primary-color); + --ifm-color-primary-lighter: var(--primary-color); + --ifm-color-primary-lightest: var(--primary-color); } .docusaurus-highlight-code-line { @@ -100,6 +109,10 @@ html[data-theme='dark'] { background-color: rgb(66 66 66 / 30%); } +.navbar__title { + display: none; +} + .header-github-link:hover { opacity: 0.6; } @@ -109,13 +122,15 @@ html[data-theme='dark'] { width: 24px; height: 24px; display: flex; - background: url("data:image/svg+xml,%3Csvg viewBox='0 0 24 24' xmlns='http://www.w3.org/2000/svg'%3E%3Cpath d='M12 .297c-6.63 0-12 5.373-12 12 0 5.303 3.438 9.8 8.205 11.385.6.113.82-.258.82-.577 0-.285-.01-1.04-.015-2.04-3.338.724-4.042-1.61-4.042-1.61C4.422 18.07 3.633 17.7 3.633 17.7c-1.087-.744.084-.729.084-.729 1.205.084 1.838 1.236 1.838 1.236 1.07 1.835 2.809 1.305 3.495.998.108-.776.417-1.305.76-1.605-2.665-.3-5.466-1.332-5.466-5.93 0-1.31.465-2.38 1.235-3.22-.135-.303-.54-1.523.105-3.176 0 0 1.005-.322 3.3 1.23.96-.267 1.98-.399 3-.405 1.02.006 2.04.138 3 .405 2.28-1.552 3.285-1.23 3.285-1.23.645 1.653.24 2.873.12 3.176.765.84 1.23 1.91 1.23 3.22 0 4.61-2.805 5.625-5.475 5.92.42.36.81 1.096.81 2.22 0 1.606-.015 2.896-.015 3.286 0 .315.21.69.825.57C20.565 22.092 24 17.592 24 12.297c0-6.627-5.373-12-12-12'/%3E%3C/svg%3E") - no-repeat; + background: url("data:image/svg+xml,%3Csvg viewBox='0 0 24 24' xmlns='http://www.w3.org/2000/svg'%3E%3Cpath d='M12 .297c-6.63 0-12 5.373-12 12 0 5.303 3.438 9.8 8.205 11.385.6.113.82-.258.82-.577 0-.285-.01-1.04-.015-2.04-3.338.724-4.042-1.61-4.042-1.61C4.422 18.07 3.633 17.7 3.633 17.7c-1.087-.744.084-.729.084-.729 1.205.084 1.838 1.236 1.838 1.236 1.07 1.835 2.809 1.305 3.495.998.108-.776.417-1.305.76-1.605-2.665-.3-5.466-1.332-5.466-5.93 0-1.31.465-2.38 1.235-3.22-.135-.303-.54-1.523.105-3.176 0 0 1.005-.322 3.3 1.23.96-.267 1.98-.399 3-.405 1.02.006 2.04.138 3 .405 2.28-1.552 3.285-1.23 3.285-1.23.645 1.653.24 2.873.12 3.176.765.84 1.23 1.91 1.23 3.22 0 4.61-2.805 5.625-5.475 5.92.42.36.81 1.096.81 2.22 0 1.606-.015 2.896-.015 3.286 0 .315.21.69.825.57C20.565 22.092 24 17.592 24 12.297c0-6.627-5.373-12-12-12'/%3E%3C/svg%3E") no-repeat; +} + +.left-align-ul { + --ifm-list-left-padding: 0rem; } [data-theme='dark'] .header-github-link::before { - background: url("data:image/svg+xml,%3Csvg viewBox='0 0 24 24' xmlns='http://www.w3.org/2000/svg'%3E%3Cpath fill='white' d='M12 .297c-6.63 0-12 5.373-12 12 0 5.303 3.438 9.8 8.205 11.385.6.113.82-.258.82-.577 0-.285-.01-1.04-.015-2.04-3.338.724-4.042-1.61-4.042-1.61C4.422 18.07 3.633 17.7 3.633 17.7c-1.087-.744.084-.729.084-.729 1.205.084 1.838 1.236 1.838 1.236 1.07 1.835 2.809 1.305 3.495.998.108-.776.417-1.305.76-1.605-2.665-.3-5.466-1.332-5.466-5.93 0-1.31.465-2.38 1.235-3.22-.135-.303-.54-1.523.105-3.176 0 0 1.005-.322 3.3 1.23.96-.267 1.98-.399 3-.405 1.02.006 2.04.138 3 .405 2.28-1.552 3.285-1.23 3.285-1.23.645 1.653.24 2.873.12 3.176.765.84 1.23 1.91 1.23 3.22 0 4.61-2.805 5.625-5.475 5.92.42.36.81 1.096.81 2.22 0 1.606-.015 2.896-.015 3.286 0 .315.21.69.825.57C20.565 22.092 24 17.592 24 12.297c0-6.627-5.373-12-12-12'/%3E%3C/svg%3E") - no-repeat; + background: url("data:image/svg+xml,%3Csvg viewBox='0 0 24 24' xmlns='http://www.w3.org/2000/svg'%3E%3Cpath fill='white' d='M12 .297c-6.63 0-12 5.373-12 12 0 5.303 3.438 9.8 8.205 11.385.6.113.82-.258.82-.577 0-.285-.01-1.04-.015-2.04-3.338.724-4.042-1.61-4.042-1.61C4.422 18.07 3.633 17.7 3.633 17.7c-1.087-.744.084-.729.084-.729 1.205.084 1.838 1.236 1.838 1.236 1.07 1.835 2.809 1.305 3.495.998.108-.776.417-1.305.76-1.605-2.665-.3-5.466-1.332-5.466-5.93 0-1.31.465-2.38 1.235-3.22-.135-.303-.54-1.523.105-3.176 0 0 1.005-.322 3.3 1.23.96-.267 1.98-.399 3-.405 1.02.006 2.04.138 3 .405 2.28-1.552 3.285-1.23 3.285-1.23.645 1.653.24 2.873.12 3.176.765.84 1.23 1.91 1.23 3.22 0 4.61-2.805 5.625-5.475 5.92.42.36.81 1.096.81 2.22 0 1.606-.015 2.896-.015 3.286 0 .315.21.69.825.57C20.565 22.092 24 17.592 24 12.297c0-6.627-5.373-12-12-12'/%3E%3C/svg%3E") no-repeat; } .footer--dark { @@ -135,11 +150,11 @@ html[data-theme='dark'] { } [data-theme='light'] .themedDocusaurus [fill='#FFFF50'] { - fill: greenyellow; + fill: #1677ff; } [data-theme='dark'] .themedDocusaurus [fill='#FFFF50'] { - fill: seagreen; + fill: #1677ff; } [data-theme='light'] .DocSearch { @@ -175,31 +190,36 @@ html[data-theme='dark'] { --docsearch-hit-background: var(--ifm-color-emphasis-100); /* Footer */ --docsearch-footer-background: var(--ifm-background-surface-color); - --docsearch-key-gradient: linear-gradient( - -26.5deg, - var(--ifm-color-emphasis-200) 0%, - var(--ifm-color-emphasis-100) 100% - ); -} - -div[class^='announcementBar_'] { - --site-announcement-bar-stripe-color1: hsl( - var(--site-primary-hue-saturation) 85% - ); - --site-announcement-bar-stripe-color2: hsl( - var(--site-primary-hue-saturation) 95% - ); - background: repeating-linear-gradient( - 35deg, - var(--site-announcement-bar-stripe-color1), - var(--site-announcement-bar-stripe-color1) 20px, - var(--site-announcement-bar-stripe-color2) 10px, - var(--site-announcement-bar-stripe-color2) 40px - ); + --docsearch-key-gradient: linear-gradient(-26.5deg, + var(--ifm-color-emphasis-200) 0%, + var(--ifm-color-emphasis-100) 100%); +} + +[data-theme='dark'] div[class^='announcementBar_'] { + --site-announcement-bar-stripe-color1: hsl(var(--site-primary-hue-saturation) 15%); + --site-announcement-bar-stripe-color2: hsl(var(--site-primary-hue-saturation) 20%); + background: repeating-linear-gradient(35deg, + var(--site-announcement-bar-stripe-color1), + var(--site-announcement-bar-stripe-color1) 20px, + var(--site-announcement-bar-stripe-color2) 10px, + var(--site-announcement-bar-stripe-color2) 40px); font-weight: bold; + color: var(--ifm-font-color-base); } -.red > a { +[data-theme='light'] div[class^='announcementBar_'] { + --site-announcement-bar-stripe-color1: hsl(var(--site-primary-hue-saturation) 85%); + --site-announcement-bar-stripe-color2: hsl(var(--site-primary-hue-saturation) 95%); + background: repeating-linear-gradient(35deg, + var(--site-announcement-bar-stripe-color1), + var(--site-announcement-bar-stripe-color1) 20px, + var(--site-announcement-bar-stripe-color2) 10px, + var(--site-announcement-bar-stripe-color2) 40px); + font-weight: bold; + color: var(--ifm-font-color-base); +} + +.red>a { color: red; } @@ -244,3 +264,4 @@ div[class^='announcementBar_'] { height: 100%; } } + diff --git a/src/css/index/showcase.module.css b/src/css/index/showcase.module.css index 498ec79c..46c94e67 100644 --- a/src/css/index/showcase.module.css +++ b/src/css/index/showcase.module.css @@ -2,6 +2,7 @@ display: flex; width: 100%; padding: 3rem 2em; + max-width: 1233px; border-radius: 1rem; font-size: var(--font-size-large); background: var(--palette-rock); @@ -30,10 +31,8 @@ left: 0; right: 0; height: 118px; - background: linear-gradient( - var(--palette-rock), - rgba(var(--palette-rock-raw), 0) - ); + background: linear-gradient(var(--palette-rock), + rgba(var(--palette-rock-raw), 0)); z-index: 1; } @@ -44,10 +43,8 @@ left: 0; right: 0; height: 118px; - background: linear-gradient( - rgba(var(--palette-rock-raw), 0), - var(--palette-rock) - ); + background: linear-gradient(rgba(var(--palette-rock-raw), 0), + var(--palette-rock)); } .showcase__left :global(.prism-code) { @@ -87,11 +84,11 @@ transition: top 100ms cubic-bezier(0.17, 0.67, 0.83, 0.67); } -.showcase__offset > div { +.showcase__offset>div { opacity: 0.3; } -.showcase__offset > div:nth-child(even) { +.showcase__offset>div:nth-child(even) { display: none; } @@ -172,7 +169,7 @@ @media (max-width: 800px) { .showcase { - --active-font-size: var(--font-size-large); + --active-font-size: var(--font-size-normal); padding: 3rem 1rem; font-size: var(--font-size-normal); } @@ -184,10 +181,11 @@ } @media (min-width: 622px) { - .showcase__1 > div:nth-child(1), - .showcase__2 > div:nth-child(3), - .showcase__3 > div:nth-child(5), - .showcase__4 > div:nth-child(7) { + + .showcase__1>div:nth-child(1), + .showcase__2>div:nth-child(3), + .showcase__3>div:nth-child(5), + .showcase__4>div:nth-child(7) { font-size: var(--active-font-size); opacity: 1; } @@ -195,9 +193,17 @@ @media (max-width: 622px) { .showcase { - --active-font-size: var(--font-size-normal); + --active-font-size: calc(15px); padding: 0; - font-size: var(--font-size-normal); + font-size: calc(12px); + } + + .showcase__left { + max-height: 288px; + max-width: 400px; + flex: 0 0 385px; + flex-grow: 0; + flex-shrink: 1; } .showcase__inner { @@ -212,31 +218,80 @@ display: flex; } - .showcase__offset > div { + .showcase__offset>div { pointer-events: none; user-select: none; } - .showcase__offset > div:nth-child(even) { + .showcase__offset>div:nth-child(even) { display: flex; } - .showcase__offset > div:nth-child(odd) { + .showcase__offset>div:nth-child(odd) { display: none; } - .showcase__1 > div:nth-child(2), - .showcase__2 > div:nth-child(4), - .showcase__3 > div:nth-child(6), - .showcase__4 > div:nth-child(8) { + .showcase__1>div:nth-child(2), + .showcase__2>div:nth-child(4), + .showcase__3>div:nth-child(6), + .showcase__4>div:nth-child(8) { font-size: var(--active-font-size); opacity: 1; } } @media (max-width: 450px) { - .showcase :global(.prism-code.language-questdb-sql) > div { + .showcase { + --active-font-size: calc(12px); + padding: 0; + font-size: calc(10px); + } + + .showcase__left { + max-height: 192px; + max-width: 300px; + flex: 0 0 385px; + flex-grow: 0; + flex-shrink: 1; + } + + .showcase__inner { + flex-direction: column; + } + + .showcase__right { + display: none; + } + + .showcase__chevron { + display: flex; + } + + .showcase__offset>div { + pointer-events: none; + user-select: none; + } + + .showcase__offset>div:nth-child(even) { + display: flex; + } + + .showcase__offset>div:nth-child(odd) { + display: none; + } + + .showcase__1>div:nth-child(2), + .showcase__2>div:nth-child(4), + .showcase__3>div:nth-child(6), + .showcase__4>div:nth-child(8) { + font-size: var(--active-font-size); + opacity: 1; + } +} + +@media (max-width: 230px) { + .showcase :global(.prism-code.language-questdb-sql)>div { white-space: pre-line; padding: 1rem; } -} +} \ No newline at end of file diff --git a/src/data/whoIsUsing.js b/src/data/whoIsUsing.js index a3a6a19a..c38a88ea 100644 --- a/src/data/whoIsUsing.js +++ b/src/data/whoIsUsing.js @@ -1,7 +1,7 @@ const whoisUsing = [ { name: 'Ant Group', - img: 'img/logos/antgroup.jpeg', + img: 'img/logos/antgroup.png', href: 'https://www.antgroup.com/' }, { diff --git a/src/pages/index.js b/src/pages/index.js index b13a6232..6ba4579e 100644 --- a/src/pages/index.js +++ b/src/pages/index.js @@ -1,107 +1,136 @@ -import Link from '@docusaurus/Link' -import Translate from '@docusaurus/Translate' -import useBaseUrl from '@docusaurus/useBaseUrl' -import useDocusaurusContext from '@docusaurus/useDocusaurusContext' -import Layout from '@theme/Layout' -import clsx from 'clsx' -import React from 'react' -import { ExampleScroller } from "../components/ExampleScroller" +import Link from "@docusaurus/Link"; +import Translate from "@docusaurus/Translate"; +import useBaseUrl from "@docusaurus/useBaseUrl"; +import useDocusaurusContext from "@docusaurus/useDocusaurusContext"; +import Layout from "@theme/Layout"; +import clsx from "clsx"; +import React from "react"; +import ThemedImage from "@theme/ThemedImage"; -import whoIsUsing from '../data/whoIsUsing' -import styles from './index.module.css' +import whoIsUsing from "../data/whoIsUsing"; +import styles from "./index.module.css"; +import { FcShipped, FcCollaboration, FcApproval } from "react-icons/fc"; - -function Feature({ imgUrl, title, description, reverse }) { +function Feature({ icon, title, description, reverse, outStyles }) { return ( -
-
- {title} -
-
+
+
{icon}
+
-

{title}

+

+ {title} +

{description}
- ) + ); } function Home() { - const { siteConfig } = useDocusaurusContext() + const { siteConfig } = useDocusaurusContext(); return ( - +
-
-
-
-

{siteConfig.title}

-

- {siteConfig.tagline} -

-
+
+
+

+ {siteConfig.title} +

+

{siteConfig.tagline}

-
- - - Getting started - - -
-
- - - Install - - -
+
+ + + Getting Started + + +
+
+ + + Install + + +
-
+ -
+

+

-
+
-

- All about your modern app by Platform as Code +

+ + All About Your Modern Apps by Platform Engineering +

-

- KusionStack codifies and unifies platform resources into stacked models and polices. -

-
+

+

+

+

-
+
Easy App Shipping} + icon={} + title={ + + Dynamic Configuration Management + + } description={ <>

- - Fast develop, easy deliver + + Effortlessly manage all application operations in a unified, streamlined manner.

-
    +
    • - - Codify and unifies spec, resources and manifests around modern app - + + Environment-Agnostic Application Configurations + +
    • - - Orchestrate and provision on Kubernetes and Clouds in a managed manner - + + Standardized and flexible platform configurations +
    • - Easy-to-access, Kubernetes-first, lightweight and dev-friendly + Kubernetes-first, lightweight and user-friendly
    @@ -109,30 +138,34 @@ function Home() { } /> Enterprise Declarative DevOps} + outStyles={{ marginLeft: 32 }} + icon={} + title={ + + Enable Developer Self-Service + + } description={ <>

    - - From the first code to production + + Meet customized needs with reusable building blocks.

    -
      +
      • - - Multi teams, projects go across multi phases to multi runtimes, clouds - + + A growing ecosystem of open modules integrated with various cloud-native infrastructures +
      • - - Production ready with scalability, performance and left-shifted stability - + + An efficient collaboration model for App Developers and Platform Engineers +
      • - - Various usages to meet requirements of diverse enterprise scenarios + + Paving the golden path for end-to-end DevOps lifecycle management
      @@ -141,30 +174,33 @@ function Home() { reverse={true} /> Enable Platform Engineering} + icon={} + title={ + + Built-in Security and Compliance + + } description={ <>

      - - Build proper abstraction and golden path + + Ensuring security from the first line of code to production runtime.

      -
        +
        • - - Schema-centric abstraction and constraints to build your models and validations - + + Codified shift-left validation to detect configuration risks early +
        • - - Easy to integrate in CI pipeline, service and product to build your paved road + + Extended check stages throughout the workload lifecycle
        • - - Fast develop on raw platform capabilities with consistency to cope with change + + Enterprise-grade fine-grained cluster control for Kubernetes
        @@ -174,25 +210,36 @@ function Home() {
-
- -
- -
- -
+

+

+

+

-
+
-

- Who is Using KusionStack +

+ Adopted by

{whoIsUsing.map((w) => ( -
- - {w.name} + ))} @@ -201,22 +248,58 @@ function Home() {
-
+

+

+

+

-
+
-

- KusionStack is in Cloud Native Computing Foundation landscape +

+ KusionStack is a {" "} + + Cloud Native Computing Foundation + {" "} + Sandbox Project

-
- +
+

+
+
+
+

+ Originally created by +

+ + + +
+
+
+

+

- ) + ); } -export default Home \ No newline at end of file +export default Home; diff --git a/src/pages/index.module.css b/src/pages/index.module.css index a10f90d4..d6415eb4 100644 --- a/src/pages/index.module.css +++ b/src/pages/index.module.css @@ -5,10 +5,7 @@ * and scoped locally. */ -.containerheroBanner { - padding-top: 6rem; - padding: 6rem 2rem; -} +@import url("https://fonts.googleapis.com/css2?family=Poppins:wght@300;400;500;600;700;800&display=swap"); .heroLogoWrapper { position: relative; @@ -31,13 +28,68 @@ } } -.heroLogo { +.banner { + padding: 10rem 0; width: 80%; - height: 80%; + margin: auto; + text-align: center; + position: relative; + overflow: hidden; +} + +.title { + font-family: "Poppins"; + font-weight: 800; + font-size: clamp(2rem, 5.5vw, 3.3rem); +} + +.subtitle { + font-family: "Poppins"; + font-weight: 600; + font-size: clamp(1.5rem, 4.5vw, 2.4rem); +} + +.description { + font-family: "Poppins"; + font-weight: 500; + width: 70%; + margin: auto; + margin-top: 20px; + margin-bottom: 30px; + color: var(--hero-description-text-color); + font-size: clamp(17px, 3vw, 1rem); } -.heroTitle { - padding-left: 1.5rem; +.button { + font-family: "Poppins"; + font-weight: 500; + padding: 12px 25px; + font-size: 18px; +} + +.poppinsFont, +.featureTitle, +.featureBody { + font-family: "Poppins"; + font-weight: 600; +} + +@media screen and (max-width: 966px) { + .banner { + width: 100%; + padding: 2rem 1rem; + } + + .description { + width: 100%; + } +} + +@media screen and (max-width: 405px) { + .button { + width: 100%; + margin-bottom: 10px; + } } .feature { @@ -69,11 +121,6 @@ } } -.featureContent { - display: flex !important; - align-items: center; -} - .whiteboard { margin-top: 3rem; } @@ -129,6 +176,10 @@ height: 96px; } +[data-theme='dark'] .cncfLogo { + height: 75px; +} + @media screen and (max-width: 768px) { .cncfLogo { height: 64px; diff --git a/src/theme/DocItem/Footer/index.js b/src/theme/DocItem/Footer/index.js new file mode 100644 index 00000000..668fd5e4 --- /dev/null +++ b/src/theme/DocItem/Footer/index.js @@ -0,0 +1,84 @@ +import React from "react"; +import clsx from "clsx"; +import { ThemeClassNames } from "@docusaurus/theme-common"; +import { useDoc } from "@docusaurus/theme-common/internal"; +import LastUpdated from "@theme/LastUpdated"; +import EditThisPage from "@theme/EditThisPage"; +import TagsListInline from "@theme/TagsListInline"; + +import styles from "./styles.module.css"; +import DocsRating from "../../../components/DocsRating"; + +function TagsRow(props) { + return ( +
+
+ +
+
+ ); +} + +function EditMetaRow({ + editUrl, + lastUpdatedAt, + lastUpdatedBy, + formattedLastUpdatedAt, +}) { + return ( +
+
{editUrl && }
+ +
+ {(lastUpdatedAt || lastUpdatedBy) && ( + + )} +
+
+ ); +} + +export default function DocItemFooter() { + const { metadata } = useDoc(); + const { + editUrl, + lastUpdatedAt, + formattedLastUpdatedAt, + lastUpdatedBy, + tags, + } = metadata; + const canDisplayTagsRow = tags.length > 0; + const canDisplayEditMetaRow = !!(editUrl || lastUpdatedAt || lastUpdatedBy); + const canDisplayFooter = canDisplayTagsRow || canDisplayEditMetaRow; + if (!canDisplayFooter) { + return null; + } + + return ( + <> + +
+ {canDisplayTagsRow && } + {canDisplayEditMetaRow && ( + + )} +
+ + ); +} diff --git a/src/theme/DocItem/Footer/styles.module.css b/src/theme/DocItem/Footer/styles.module.css new file mode 100644 index 00000000..7c1e9644 --- /dev/null +++ b/src/theme/DocItem/Footer/styles.module.css @@ -0,0 +1,11 @@ +.lastUpdated { + margin-top: 0.2rem; + font-style: italic; + font-size: smaller; +} + +@media (min-width: 997px) { + .lastUpdated { + text-align: right; + } +} diff --git a/src/theme/NavbarItem.js b/src/theme/NavbarItem.js new file mode 100644 index 00000000..e708f13c --- /dev/null +++ b/src/theme/NavbarItem.js @@ -0,0 +1,26 @@ +import React from "react"; +import OriginalNavBarItem from "@theme-original/NavbarItem"; +import { useLocation } from "@docusaurus/router"; + +export default function NavbarItem(props) { + const { docsPluginId, type } = props; + const { pathname } = useLocation(); + + if ( + type === "docsVersionDropdown" && + pathname.search(new RegExp(`^/${docsPluginId}/`, "g")) === -1 && + !pathname?.includes(docsPluginId) + ) { + return <>; + } + + if (type === "localeDropdown" && !pathname?.includes("karpor")) { + return <>; + } + + return ( + <> + + + ); +} diff --git a/static/CNAME b/static/CNAME deleted file mode 100644 index e9f237b1..00000000 --- a/static/CNAME +++ /dev/null @@ -1 +0,0 @@ -kusionstack.io diff --git a/static/component-base/index.html b/static/component-base/index.html new file mode 100644 index 00000000..0ab240f5 --- /dev/null +++ b/static/component-base/index.html @@ -0,0 +1,20 @@ + + + + KusionStack/component-base + + + + + + +
+

KusionStack/component-base

+

hello https://github.com/KusionStack/component-base

+
diff --git a/static/fonts/Segoe-UI.ttf b/static/fonts/Segoe-UI.ttf new file mode 100644 index 00000000..46b3b993 Binary files /dev/null and b/static/fonts/Segoe-UI.ttf differ diff --git a/static/helm-kcl/index.html b/static/helm-kcl/index.html new file mode 100644 index 00000000..19ae7f13 --- /dev/null +++ b/static/helm-kcl/index.html @@ -0,0 +1,14 @@ + + + + KusionStack/helm-kcl + + + + + + +
+

KusionStack/helm-kcl

+

hello https://github.com/KusionStack/helm-kcl

+
diff --git a/static/img/blog/2022-12-12-post-cloud-native-era-operation/circle.png b/static/img/blog/2022-12-12-post-cloud-native-era-operation/circle.png new file mode 100644 index 00000000..77be06ed Binary files /dev/null and b/static/img/blog/2022-12-12-post-cloud-native-era-operation/circle.png differ diff --git a/static/img/blog/2022-12-12-post-cloud-native-era-operation/classic.png b/static/img/blog/2022-12-12-post-cloud-native-era-operation/classic.png new file mode 100644 index 00000000..532bddd1 Binary files /dev/null and b/static/img/blog/2022-12-12-post-cloud-native-era-operation/classic.png differ diff --git a/static/img/blog/2022-12-12-post-cloud-native-era-operation/modern-app.png b/static/img/blog/2022-12-12-post-cloud-native-era-operation/modern-app.png new file mode 100644 index 00000000..00d85ba4 Binary files /dev/null and b/static/img/blog/2022-12-12-post-cloud-native-era-operation/modern-app.png differ diff --git a/static/img/cncf-color.png b/static/img/cncf-color.png new file mode 100644 index 00000000..a1fa79e9 Binary files /dev/null and b/static/img/cncf-color.png differ diff --git a/static/img/cncf-logo.png b/static/img/cncf-logo.png deleted file mode 100644 index f1199211..00000000 Binary files a/static/img/cncf-logo.png and /dev/null differ diff --git a/static/img/cncf-white.png b/static/img/cncf-white.png new file mode 100644 index 00000000..9c3fd076 Binary files /dev/null and b/static/img/cncf-white.png differ diff --git a/static/img/docs/concept/appconfig.png b/static/img/docs/concept/appconfig.png new file mode 100644 index 00000000..d7a64067 Binary files /dev/null and b/static/img/docs/concept/appconfig.png differ diff --git a/static/img/docs/concept/kusion-module.png b/static/img/docs/concept/kusion-module.png new file mode 100644 index 00000000..c2449689 Binary files /dev/null and b/static/img/docs/concept/kusion-module.png differ diff --git a/static/img/docs/concept/kusion_engine.png b/static/img/docs/concept/kusion_engine.png new file mode 100644 index 00000000..8e09ae77 Binary files /dev/null and b/static/img/docs/concept/kusion_engine.png differ diff --git a/static/img/docs/concept/kusion_workflow.png b/static/img/docs/concept/kusion_workflow.png new file mode 100644 index 00000000..01b9d889 Binary files /dev/null and b/static/img/docs/concept/kusion_workflow.png differ diff --git a/static/img/docs/concept/project-stack-workspace.png b/static/img/docs/concept/project-stack-workspace.png new file mode 100644 index 00000000..234f732a Binary files /dev/null and b/static/img/docs/concept/project-stack-workspace.png differ diff --git a/static/img/docs/concept/workspace-project-stack.png b/static/img/docs/concept/workspace-project-stack.png new file mode 100644 index 00000000..4230c870 Binary files /dev/null and b/static/img/docs/concept/workspace-project-stack.png differ diff --git a/static/img/docs/user_docs/cloud-resources/expose-service/apply-private.png b/static/img/docs/user_docs/cloud-resources/expose-service/apply-private.png new file mode 100644 index 00000000..09e4a6c0 Binary files /dev/null and b/static/img/docs/user_docs/cloud-resources/expose-service/apply-private.png differ diff --git a/static/img/docs/user_docs/cloud-resources/expose-service/apply-public.png b/static/img/docs/user_docs/cloud-resources/expose-service/apply-public.png new file mode 100644 index 00000000..4a589423 Binary files /dev/null and b/static/img/docs/user_docs/cloud-resources/expose-service/apply-public.png differ diff --git a/static/img/docs/user_docs/cloud-resources/expose-service/k8s-resource-private.png b/static/img/docs/user_docs/cloud-resources/expose-service/k8s-resource-private.png new file mode 100644 index 00000000..39145535 Binary files /dev/null and b/static/img/docs/user_docs/cloud-resources/expose-service/k8s-resource-private.png differ diff --git a/static/img/docs/user_docs/cloud-resources/expose-service/k8s-resource-public.png b/static/img/docs/user_docs/cloud-resources/expose-service/k8s-resource-public.png new file mode 100644 index 00000000..8344b064 Binary files /dev/null and b/static/img/docs/user_docs/cloud-resources/expose-service/k8s-resource-public.png differ diff --git a/static/img/docs/user_docs/cloud-resources/expose-service/preview-public.png b/static/img/docs/user_docs/cloud-resources/expose-service/preview-public.png new file mode 100644 index 00000000..62ffba37 Binary files /dev/null and b/static/img/docs/user_docs/cloud-resources/expose-service/preview-public.png differ diff --git a/static/img/docs/user_docs/cloud-resources/expose-service/result-public.png b/static/img/docs/user_docs/cloud-resources/expose-service/result-public.png new file mode 100644 index 00000000..82d46d30 Binary files /dev/null and b/static/img/docs/user_docs/cloud-resources/expose-service/result-public.png differ diff --git a/static/img/docs/user_docs/concepts/high-level-schema.png b/static/img/docs/user_docs/concepts/high-level-schema.png new file mode 100644 index 00000000..ab48dbee Binary files /dev/null and b/static/img/docs/user_docs/concepts/high-level-schema.png differ diff --git a/static/img/docs/user_docs/getting-started/alicloud-rds-instance.png b/static/img/docs/user_docs/getting-started/alicloud-rds-instance.png new file mode 100644 index 00000000..981065fd Binary files /dev/null and b/static/img/docs/user_docs/getting-started/alicloud-rds-instance.png differ diff --git a/static/img/docs/user_docs/getting-started/apply-wordpress-application-with-aws-rds.png b/static/img/docs/user_docs/getting-started/apply-wordpress-application-with-aws-rds.png new file mode 100644 index 00000000..b355a3cf Binary files /dev/null and b/static/img/docs/user_docs/getting-started/apply-wordpress-application-with-aws-rds.png differ diff --git a/static/img/docs/user_docs/getting-started/apply-wordpress-application.png b/static/img/docs/user_docs/getting-started/apply-wordpress-application.png new file mode 100644 index 00000000..9e5ac32f Binary files /dev/null and b/static/img/docs/user_docs/getting-started/apply-wordpress-application.png differ diff --git a/static/img/docs/user_docs/getting-started/apply-wordpress-cloud-rds-alicloud.png b/static/img/docs/user_docs/getting-started/apply-wordpress-cloud-rds-alicloud.png new file mode 100644 index 00000000..07a98129 Binary files /dev/null and b/static/img/docs/user_docs/getting-started/apply-wordpress-cloud-rds-alicloud.png differ diff --git a/static/img/docs/user_docs/getting-started/apply-wordpress-cloud-rds-aws.png b/static/img/docs/user_docs/getting-started/apply-wordpress-cloud-rds-aws.png new file mode 100644 index 00000000..07833533 Binary files /dev/null and b/static/img/docs/user_docs/getting-started/apply-wordpress-cloud-rds-aws.png differ diff --git a/static/img/docs/user_docs/getting-started/apply-wordpress-local-db.gif b/static/img/docs/user_docs/getting-started/apply-wordpress-local-db.gif new file mode 100644 index 00000000..da102673 Binary files /dev/null and b/static/img/docs/user_docs/getting-started/apply-wordpress-local-db.gif differ diff --git a/static/img/docs/user_docs/getting-started/apply-wordpress-with-local-db.gif b/static/img/docs/user_docs/getting-started/apply-wordpress-with-local-db.gif new file mode 100644 index 00000000..93fade06 Binary files /dev/null and b/static/img/docs/user_docs/getting-started/apply-wordpress-with-local-db.gif differ diff --git a/static/img/docs/user_docs/getting-started/apply.gif b/static/img/docs/user_docs/getting-started/apply.gif index 0053e26d..885d2504 100644 Binary files a/static/img/docs/user_docs/getting-started/apply.gif and b/static/img/docs/user_docs/getting-started/apply.gif differ diff --git a/static/img/docs/user_docs/getting-started/assert-rds-instance-type.png b/static/img/docs/user_docs/getting-started/assert-rds-instance-type.png new file mode 100644 index 00000000..a66b9e02 Binary files /dev/null and b/static/img/docs/user_docs/getting-started/assert-rds-instance-type.png differ diff --git a/static/img/docs/user_docs/getting-started/aws-iam-account.png b/static/img/docs/user_docs/getting-started/aws-iam-account.png new file mode 100644 index 00000000..e4dd9f17 Binary files /dev/null and b/static/img/docs/user_docs/getting-started/aws-iam-account.png differ diff --git a/static/img/docs/user_docs/getting-started/aws-rds-instance-detail.png b/static/img/docs/user_docs/getting-started/aws-rds-instance-detail.png new file mode 100644 index 00000000..0f298b1f Binary files /dev/null and b/static/img/docs/user_docs/getting-started/aws-rds-instance-detail.png differ diff --git a/static/img/docs/user_docs/getting-started/aws-rds-instance.png b/static/img/docs/user_docs/getting-started/aws-rds-instance.png new file mode 100644 index 00000000..d6582e8b Binary files /dev/null and b/static/img/docs/user_docs/getting-started/aws-rds-instance.png differ diff --git a/static/img/docs/user_docs/getting-started/choose-template.gif b/static/img/docs/user_docs/getting-started/choose-template.gif index 3eeba95f..45dbfdd5 100644 Binary files a/static/img/docs/user_docs/getting-started/choose-template.gif and b/static/img/docs/user_docs/getting-started/choose-template.gif differ diff --git a/static/img/docs/user_docs/getting-started/cloud-rds-instance-alicloud.png b/static/img/docs/user_docs/getting-started/cloud-rds-instance-alicloud.png new file mode 100644 index 00000000..659ca160 Binary files /dev/null and b/static/img/docs/user_docs/getting-started/cloud-rds-instance-alicloud.png differ diff --git a/static/img/docs/user_docs/getting-started/cloud-rds-instance-aws.png b/static/img/docs/user_docs/getting-started/cloud-rds-instance-aws.png new file mode 100644 index 00000000..7aaf19f1 Binary files /dev/null and b/static/img/docs/user_docs/getting-started/cloud-rds-instance-aws.png differ diff --git a/static/img/docs/user_docs/getting-started/config-diff-apply.gif b/static/img/docs/user_docs/getting-started/config-diff-apply.gif new file mode 100644 index 00000000..39382ccd Binary files /dev/null and b/static/img/docs/user_docs/getting-started/config-diff-apply.gif differ diff --git a/static/img/docs/user_docs/getting-started/create-project.gif b/static/img/docs/user_docs/getting-started/create-project.gif new file mode 100644 index 00000000..b34381ca Binary files /dev/null and b/static/img/docs/user_docs/getting-started/create-project.gif differ diff --git a/static/img/docs/user_docs/getting-started/data-preview.gif b/static/img/docs/user_docs/getting-started/data-preview.gif new file mode 100644 index 00000000..d5d00513 Binary files /dev/null and b/static/img/docs/user_docs/getting-started/data-preview.gif differ diff --git a/static/img/docs/user_docs/getting-started/destroy-wordpress-cloud-rds-alicloud.png b/static/img/docs/user_docs/getting-started/destroy-wordpress-cloud-rds-alicloud.png new file mode 100644 index 00000000..fdf656cd Binary files /dev/null and b/static/img/docs/user_docs/getting-started/destroy-wordpress-cloud-rds-alicloud.png differ diff --git a/static/img/docs/user_docs/getting-started/destroy-wordpress-cloud-rds-aws.png b/static/img/docs/user_docs/getting-started/destroy-wordpress-cloud-rds-aws.png new file mode 100644 index 00000000..9a1115f5 Binary files /dev/null and b/static/img/docs/user_docs/getting-started/destroy-wordpress-cloud-rds-aws.png differ diff --git a/static/img/docs/user_docs/getting-started/destroy-wordpress-local-db.gif b/static/img/docs/user_docs/getting-started/destroy-wordpress-local-db.gif new file mode 100644 index 00000000..0412ecf9 Binary files /dev/null and b/static/img/docs/user_docs/getting-started/destroy-wordpress-local-db.gif differ diff --git a/static/img/docs/user_docs/getting-started/init-gocity.gif b/static/img/docs/user_docs/getting-started/init-gocity.gif new file mode 100644 index 00000000..fe5fbedf Binary files /dev/null and b/static/img/docs/user_docs/getting-started/init-gocity.gif differ diff --git a/static/img/docs/user_docs/getting-started/init-wordpress-cloud-rds.gif b/static/img/docs/user_docs/getting-started/init-wordpress-cloud-rds.gif new file mode 100644 index 00000000..d160c5f8 Binary files /dev/null and b/static/img/docs/user_docs/getting-started/init-wordpress-cloud-rds.gif differ diff --git a/static/img/docs/user_docs/getting-started/init-wordpress-local-db.gif b/static/img/docs/user_docs/getting-started/init-wordpress-local-db.gif new file mode 100644 index 00000000..769e7eff Binary files /dev/null and b/static/img/docs/user_docs/getting-started/init-wordpress-local-db.gif differ diff --git a/static/img/docs/user_docs/getting-started/init-wordpress-with-local-db.gif b/static/img/docs/user_docs/getting-started/init-wordpress-with-local-db.gif new file mode 100644 index 00000000..90fa24d0 Binary files /dev/null and b/static/img/docs/user_docs/getting-started/init-wordpress-with-local-db.gif differ diff --git a/static/img/docs/user_docs/getting-started/init-wordpress-with-rds.gif b/static/img/docs/user_docs/getting-started/init-wordpress-with-rds.gif new file mode 100644 index 00000000..876392a6 Binary files /dev/null and b/static/img/docs/user_docs/getting-started/init-wordpress-with-rds.gif differ diff --git a/static/img/docs/user_docs/getting-started/init-wordpress.gif b/static/img/docs/user_docs/getting-started/init-wordpress.gif new file mode 100644 index 00000000..2eed781e Binary files /dev/null and b/static/img/docs/user_docs/getting-started/init-wordpress.gif differ diff --git a/static/img/docs/user_docs/getting-started/install/codespaces/apply.gif b/static/img/docs/user_docs/getting-started/install/codespaces/apply.gif new file mode 100644 index 00000000..5cc8877d Binary files /dev/null and b/static/img/docs/user_docs/getting-started/install/codespaces/apply.gif differ diff --git a/static/img/docs/user_docs/getting-started/install/codespaces/confirm-codespace.png b/static/img/docs/user_docs/getting-started/install/codespaces/confirm-codespace.png new file mode 100644 index 00000000..31725ee1 Binary files /dev/null and b/static/img/docs/user_docs/getting-started/install/codespaces/confirm-codespace.png differ diff --git a/static/img/docs/user_docs/getting-started/install/codespaces/create-codespace.gif b/static/img/docs/user_docs/getting-started/install/codespaces/create-codespace.gif new file mode 100644 index 00000000..abc8abfc Binary files /dev/null and b/static/img/docs/user_docs/getting-started/install/codespaces/create-codespace.gif differ diff --git a/static/img/docs/user_docs/getting-started/install/codespaces/create-codespace.png b/static/img/docs/user_docs/getting-started/install/codespaces/create-codespace.png new file mode 100644 index 00000000..6cabda85 Binary files /dev/null and b/static/img/docs/user_docs/getting-started/install/codespaces/create-codespace.png differ diff --git a/static/img/docs/user_docs/getting-started/install/codespaces/delete-codespace.gif b/static/img/docs/user_docs/getting-started/install/codespaces/delete-codespace.gif new file mode 100644 index 00000000..a09bee61 Binary files /dev/null and b/static/img/docs/user_docs/getting-started/install/codespaces/delete-codespace.gif differ diff --git a/static/img/docs/user_docs/getting-started/install/codespaces/minikube-start.gif b/static/img/docs/user_docs/getting-started/install/codespaces/minikube-start.gif new file mode 100644 index 00000000..43721dcb Binary files /dev/null and b/static/img/docs/user_docs/getting-started/install/codespaces/minikube-start.gif differ diff --git a/static/img/docs/user_docs/getting-started/install/codespaces/view-code.gif b/static/img/docs/user_docs/getting-started/install/codespaces/view-code.gif new file mode 100644 index 00000000..d3ffab0f Binary files /dev/null and b/static/img/docs/user_docs/getting-started/install/codespaces/view-code.gif differ diff --git a/static/img/docs/user_docs/getting-started/install/codespaces/view-code.jpg b/static/img/docs/user_docs/getting-started/install/codespaces/view-code.jpg new file mode 100644 index 00000000..07561290 Binary files /dev/null and b/static/img/docs/user_docs/getting-started/install/codespaces/view-code.jpg differ diff --git a/static/img/docs/user_docs/getting-started/kcl-assertion-failure.png b/static/img/docs/user_docs/getting-started/kcl-assertion-failure.png new file mode 100644 index 00000000..123a8126 Binary files /dev/null and b/static/img/docs/user_docs/getting-started/kcl-assertion-failure.png differ diff --git a/static/img/docs/user_docs/getting-started/kusion-3-way-diff.png b/static/img/docs/user_docs/getting-started/kusion-3-way-diff.png new file mode 100644 index 00000000..61d3b5ec Binary files /dev/null and b/static/img/docs/user_docs/getting-started/kusion-3-way-diff.png differ diff --git a/static/img/docs/user_docs/getting-started/kusion-destroy-wordpress-with-aws-rds.png b/static/img/docs/user_docs/getting-started/kusion-destroy-wordpress-with-aws-rds.png new file mode 100644 index 00000000..86abcdc5 Binary files /dev/null and b/static/img/docs/user_docs/getting-started/kusion-destroy-wordpress-with-aws-rds.png differ diff --git a/static/img/docs/user_docs/getting-started/kusion-destroy-wordpress.png b/static/img/docs/user_docs/getting-started/kusion-destroy-wordpress.png new file mode 100644 index 00000000..654aa93a Binary files /dev/null and b/static/img/docs/user_docs/getting-started/kusion-destroy-wordpress.png differ diff --git a/static/img/docs/user_docs/getting-started/kusion_apply_quickstart.gif b/static/img/docs/user_docs/getting-started/kusion_apply_quickstart.gif new file mode 100644 index 00000000..1814b8c5 Binary files /dev/null and b/static/img/docs/user_docs/getting-started/kusion_apply_quickstart.gif differ diff --git a/static/img/docs/user_docs/getting-started/kusion_apply_quickstart_0.12.gif b/static/img/docs/user_docs/getting-started/kusion_apply_quickstart_0.12.gif new file mode 100644 index 00000000..f5054006 Binary files /dev/null and b/static/img/docs/user_docs/getting-started/kusion_apply_quickstart_0.12.gif differ diff --git a/static/img/docs/user_docs/getting-started/kusion_destroy_quickstart.gif b/static/img/docs/user_docs/getting-started/kusion_destroy_quickstart.gif new file mode 100644 index 00000000..b2486999 Binary files /dev/null and b/static/img/docs/user_docs/getting-started/kusion_destroy_quickstart.gif differ diff --git a/static/img/docs/user_docs/getting-started/kusion_re_apply_quickstart.gif b/static/img/docs/user_docs/getting-started/kusion_re_apply_quickstart.gif new file mode 100644 index 00000000..fb09e23d Binary files /dev/null and b/static/img/docs/user_docs/getting-started/kusion_re_apply_quickstart.gif differ diff --git a/static/img/docs/user_docs/getting-started/kusion_re_apply_quickstart_0.12.gif b/static/img/docs/user_docs/getting-started/kusion_re_apply_quickstart_0.12.gif new file mode 100644 index 00000000..736be9e4 Binary files /dev/null and b/static/img/docs/user_docs/getting-started/kusion_re_apply_quickstart_0.12.gif differ diff --git a/static/img/docs/user_docs/getting-started/quickstart_mysql_validation.gif b/static/img/docs/user_docs/getting-started/quickstart_mysql_validation.gif new file mode 100644 index 00000000..2cf7c601 Binary files /dev/null and b/static/img/docs/user_docs/getting-started/quickstart_mysql_validation.gif differ diff --git a/static/img/docs/user_docs/getting-started/quickstart_page.png b/static/img/docs/user_docs/getting-started/quickstart_page.png new file mode 100644 index 00000000..3a255928 Binary files /dev/null and b/static/img/docs/user_docs/getting-started/quickstart_page.png differ diff --git a/static/img/docs/user_docs/getting-started/quickstart_page_with_mysql.png b/static/img/docs/user_docs/getting-started/quickstart_page_with_mysql.png new file mode 100644 index 00000000..1aa5e451 Binary files /dev/null and b/static/img/docs/user_docs/getting-started/quickstart_page_with_mysql.png differ diff --git a/static/img/docs/user_docs/getting-started/set-oss-access.png b/static/img/docs/user_docs/getting-started/set-oss-access.png new file mode 100644 index 00000000..18929457 Binary files /dev/null and b/static/img/docs/user_docs/getting-started/set-oss-access.png differ diff --git a/static/img/docs/user_docs/getting-started/set-rds-access.png b/static/img/docs/user_docs/getting-started/set-rds-access.png new file mode 100644 index 00000000..da1a4402 Binary files /dev/null and b/static/img/docs/user_docs/getting-started/set-rds-access.png differ diff --git a/static/img/docs/user_docs/getting-started/wordpress-apply.gif b/static/img/docs/user_docs/getting-started/wordpress-apply.gif new file mode 100644 index 00000000..153c0786 Binary files /dev/null and b/static/img/docs/user_docs/getting-started/wordpress-apply.gif differ diff --git a/static/img/docs/user_docs/getting-started/wordpress-cloud-rds-port-forward.png b/static/img/docs/user_docs/getting-started/wordpress-cloud-rds-port-forward.png new file mode 100644 index 00000000..50de781e Binary files /dev/null and b/static/img/docs/user_docs/getting-started/wordpress-cloud-rds-port-forward.png differ diff --git a/static/img/docs/user_docs/getting-started/wordpress-port-forward.png b/static/img/docs/user_docs/getting-started/wordpress-port-forward.png new file mode 100644 index 00000000..cd574064 Binary files /dev/null and b/static/img/docs/user_docs/getting-started/wordpress-port-forward.png differ diff --git a/static/img/docs/user_docs/getting-started/wordpress-setup.gif b/static/img/docs/user_docs/getting-started/wordpress-setup.gif new file mode 100644 index 00000000..5ada9c75 Binary files /dev/null and b/static/img/docs/user_docs/getting-started/wordpress-setup.gif differ diff --git a/static/img/docs/user_docs/getting-started/wordpress-site-page.png b/static/img/docs/user_docs/getting-started/wordpress-site-page.png new file mode 100644 index 00000000..3ec2d1cf Binary files /dev/null and b/static/img/docs/user_docs/getting-started/wordpress-site-page.png differ diff --git a/static/img/docs/user_docs/getting-started/wordpress-video-cover.png b/static/img/docs/user_docs/getting-started/wordpress-video-cover.png new file mode 100644 index 00000000..a2db8e59 Binary files /dev/null and b/static/img/docs/user_docs/getting-started/wordpress-video-cover.png differ diff --git a/static/img/docs/user_docs/getting-started/wordpress-with-local-db-destroy.gif b/static/img/docs/user_docs/getting-started/wordpress-with-local-db-destroy.gif new file mode 100644 index 00000000..38b58749 Binary files /dev/null and b/static/img/docs/user_docs/getting-started/wordpress-with-local-db-destroy.gif differ diff --git a/static/img/docs/user_docs/getting-started/wordpress-with-local-db-diff.gif b/static/img/docs/user_docs/getting-started/wordpress-with-local-db-diff.gif new file mode 100644 index 00000000..c8d1afd2 Binary files /dev/null and b/static/img/docs/user_docs/getting-started/wordpress-with-local-db-diff.gif differ diff --git a/static/img/docs/user_docs/guides/argocd/out-of-sync.jpg b/static/img/docs/user_docs/guides/argocd/out-of-sync.jpg deleted file mode 100644 index 62924b5e..00000000 Binary files a/static/img/docs/user_docs/guides/argocd/out-of-sync.jpg and /dev/null differ diff --git a/static/img/docs/user_docs/guides/argocd/reconcile-drifted-config.jpg b/static/img/docs/user_docs/guides/argocd/reconcile-drifted-config.jpg deleted file mode 100644 index 8ca04d33..00000000 Binary files a/static/img/docs/user_docs/guides/argocd/reconcile-drifted-config.jpg and /dev/null differ diff --git a/static/img/docs/user_docs/guides/argocd/synced.jpg b/static/img/docs/user_docs/guides/argocd/synced.jpg deleted file mode 100644 index 8ffdebd2..00000000 Binary files a/static/img/docs/user_docs/guides/argocd/synced.jpg and /dev/null differ diff --git a/static/img/docs/user_docs/guides/github-actions/alicloud_oss_storage_backend.png b/static/img/docs/user_docs/guides/github-actions/alicloud_oss_storage_backend.png new file mode 100644 index 00000000..d77b7375 Binary files /dev/null and b/static/img/docs/user_docs/guides/github-actions/alicloud_oss_storage_backend.png differ diff --git a/static/img/docs/user_docs/guides/github-actions/apply.png b/static/img/docs/user_docs/guides/github-actions/apply.png new file mode 100644 index 00000000..577a13a2 Binary files /dev/null and b/static/img/docs/user_docs/guides/github-actions/apply.png differ diff --git a/static/img/docs/user_docs/guides/github-actions/changed-project-stack.png b/static/img/docs/user_docs/guides/github-actions/changed-project-stack.png new file mode 100644 index 00000000..4369a692 Binary files /dev/null and b/static/img/docs/user_docs/guides/github-actions/changed-project-stack.png differ diff --git a/static/img/docs/user_docs/guides/github-actions/check-structure.png b/static/img/docs/user_docs/guides/github-actions/check-structure.png new file mode 100644 index 00000000..d13a227d Binary files /dev/null and b/static/img/docs/user_docs/guides/github-actions/check-structure.png differ diff --git a/static/img/docs/user_docs/guides/github-actions/github_actions_apply.png b/static/img/docs/user_docs/guides/github-actions/github_actions_apply.png new file mode 100644 index 00000000..826599d5 Binary files /dev/null and b/static/img/docs/user_docs/guides/github-actions/github_actions_apply.png differ diff --git a/static/img/docs/user_docs/guides/github-actions/github_actions_apply_details.png b/static/img/docs/user_docs/guides/github-actions/github_actions_apply_details.png new file mode 100644 index 00000000..4cd5ba35 Binary files /dev/null and b/static/img/docs/user_docs/guides/github-actions/github_actions_apply_details.png differ diff --git a/static/img/docs/user_docs/guides/github-actions/github_actions_get_changed_projects_stacks.png b/static/img/docs/user_docs/guides/github-actions/github_actions_get_changed_projects_stacks.png new file mode 100644 index 00000000..95619f4b Binary files /dev/null and b/static/img/docs/user_docs/guides/github-actions/github_actions_get_changed_projects_stacks.png differ diff --git a/static/img/docs/user_docs/guides/github-actions/github_actions_mannual_approval.png b/static/img/docs/user_docs/guides/github-actions/github_actions_mannual_approval.png new file mode 100644 index 00000000..7f0d534a Binary files /dev/null and b/static/img/docs/user_docs/guides/github-actions/github_actions_mannual_approval.png differ diff --git a/static/img/docs/user_docs/guides/github-actions/github_actions_preview.png b/static/img/docs/user_docs/guides/github-actions/github_actions_preview.png new file mode 100644 index 00000000..d6f25357 Binary files /dev/null and b/static/img/docs/user_docs/guides/github-actions/github_actions_preview.png differ diff --git a/static/img/docs/user_docs/guides/github-actions/github_actions_preview_details.png b/static/img/docs/user_docs/guides/github-actions/github_actions_preview_details.png new file mode 100644 index 00000000..884802da Binary files /dev/null and b/static/img/docs/user_docs/guides/github-actions/github_actions_preview_details.png differ diff --git a/static/img/docs/user_docs/guides/github-actions/kusion-changed-project-stack.png b/static/img/docs/user_docs/guides/github-actions/kusion-changed-project-stack.png new file mode 100644 index 00000000..601df98e Binary files /dev/null and b/static/img/docs/user_docs/guides/github-actions/kusion-changed-project-stack.png differ diff --git a/static/img/docs/user_docs/guides/github-actions/preview.png b/static/img/docs/user_docs/guides/github-actions/preview.png new file mode 100644 index 00000000..af6ede28 Binary files /dev/null and b/static/img/docs/user_docs/guides/github-actions/preview.png differ diff --git a/static/img/docs/user_docs/guides/github-actions/test-correctness.png b/static/img/docs/user_docs/guides/github-actions/test-correctness.png new file mode 100644 index 00000000..a2fb1cdc Binary files /dev/null and b/static/img/docs/user_docs/guides/github-actions/test-correctness.png differ diff --git a/static/img/docs/user_docs/guides/github-actions/upload_modules.png b/static/img/docs/user_docs/guides/github-actions/upload_modules.png new file mode 100644 index 00000000..fd7f77d3 Binary files /dev/null and b/static/img/docs/user_docs/guides/github-actions/upload_modules.png differ diff --git a/static/img/docs/user_docs/guides/github-actions/workflow.png b/static/img/docs/user_docs/guides/github-actions/workflow.png new file mode 100644 index 00000000..a476e7fe Binary files /dev/null and b/static/img/docs/user_docs/guides/github-actions/workflow.png differ diff --git a/static/img/docs/user_docs/guides/kubevela/3_way_diff_example.png b/static/img/docs/user_docs/guides/kubevela/3_way_diff_example.png new file mode 100644 index 00000000..df5e56b5 Binary files /dev/null and b/static/img/docs/user_docs/guides/kubevela/3_way_diff_example.png differ diff --git a/static/img/docs/user_docs/guides/kubevela/assert.png b/static/img/docs/user_docs/guides/kubevela/assert.png new file mode 100644 index 00000000..5c90fe4e Binary files /dev/null and b/static/img/docs/user_docs/guides/kubevela/assert.png differ diff --git a/static/img/docs/user_docs/guides/kubevela/aws_iam_account.png b/static/img/docs/user_docs/guides/kubevela/aws_iam_account.png new file mode 100644 index 00000000..4e4fe40b Binary files /dev/null and b/static/img/docs/user_docs/guides/kubevela/aws_iam_account.png differ diff --git a/static/img/docs/user_docs/guides/kubevela/developer_and_platform_collab_example.png b/static/img/docs/user_docs/guides/kubevela/developer_and_platform_collab_example.png new file mode 100644 index 00000000..0328ca10 Binary files /dev/null and b/static/img/docs/user_docs/guides/kubevela/developer_and_platform_collab_example.png differ diff --git a/static/img/docs/user_docs/guides/kubevela/integration_solution.png b/static/img/docs/user_docs/guides/kubevela/integration_solution.png new file mode 100644 index 00000000..9a045bb6 Binary files /dev/null and b/static/img/docs/user_docs/guides/kubevela/integration_solution.png differ diff --git a/static/img/docs/user_docs/guides/kubevela/kubevela_application_example.png b/static/img/docs/user_docs/guides/kubevela/kubevela_application_example.png new file mode 100644 index 00000000..a31cadf4 Binary files /dev/null and b/static/img/docs/user_docs/guides/kubevela/kubevela_application_example.png differ diff --git a/static/img/docs/user_docs/guides/kubevela/kubevela_usage_example.png b/static/img/docs/user_docs/guides/kubevela/kubevela_usage_example.png new file mode 100644 index 00000000..2b630048 Binary files /dev/null and b/static/img/docs/user_docs/guides/kubevela/kubevela_usage_example.png differ diff --git a/static/img/docs/user_docs/guides/kubevela/kusion_apply.png b/static/img/docs/user_docs/guides/kubevela/kusion_apply.png new file mode 100644 index 00000000..79589cdd Binary files /dev/null and b/static/img/docs/user_docs/guides/kubevela/kusion_apply.png differ diff --git a/static/img/docs/user_docs/guides/kubevela/kusion_apply_diff.png b/static/img/docs/user_docs/guides/kubevela/kusion_apply_diff.png new file mode 100644 index 00000000..df5e56b5 Binary files /dev/null and b/static/img/docs/user_docs/guides/kubevela/kusion_apply_diff.png differ diff --git a/static/img/docs/user_docs/guides/kubevela/kusion_destroy.png b/static/img/docs/user_docs/guides/kubevela/kusion_destroy.png new file mode 100644 index 00000000..a46b7588 Binary files /dev/null and b/static/img/docs/user_docs/guides/kubevela/kusion_destroy.png differ diff --git a/static/img/docs/user_docs/guides/kubevela/kusionstack_workflow_example.png b/static/img/docs/user_docs/guides/kubevela/kusionstack_workflow_example.png new file mode 100644 index 00000000..839fb41a Binary files /dev/null and b/static/img/docs/user_docs/guides/kubevela/kusionstack_workflow_example.png differ diff --git a/static/img/docs/user_docs/guides/kubevela/port_forward.png b/static/img/docs/user_docs/guides/kubevela/port_forward.png new file mode 100644 index 00000000..5aa33e2d Binary files /dev/null and b/static/img/docs/user_docs/guides/kubevela/port_forward.png differ diff --git a/static/img/docs/user_docs/guides/kubevela/rds_detailed.png b/static/img/docs/user_docs/guides/kubevela/rds_detailed.png new file mode 100644 index 00000000..38fa9b88 Binary files /dev/null and b/static/img/docs/user_docs/guides/kubevela/rds_detailed.png differ diff --git a/static/img/docs/user_docs/guides/kubevela/rds_info.png b/static/img/docs/user_docs/guides/kubevela/rds_info.png new file mode 100644 index 00000000..e9c3e4ff Binary files /dev/null and b/static/img/docs/user_docs/guides/kubevela/rds_info.png differ diff --git a/static/img/docs/user_docs/guides/kubevela/shift_left_security_example.png b/static/img/docs/user_docs/guides/kubevela/shift_left_security_example.png new file mode 100644 index 00000000..5c90fe4e Binary files /dev/null and b/static/img/docs/user_docs/guides/kubevela/shift_left_security_example.png differ diff --git a/static/img/docs/user_docs/guides/kubevela/unified_description_in_kcl.png b/static/img/docs/user_docs/guides/kubevela/unified_description_in_kcl.png new file mode 100644 index 00000000..18b3a6c9 Binary files /dev/null and b/static/img/docs/user_docs/guides/kubevela/unified_description_in_kcl.png differ diff --git a/static/img/docs/user_docs/guides/kubevela/vela_top.png b/static/img/docs/user_docs/guides/kubevela/vela_top.png new file mode 100644 index 00000000..4eac0920 Binary files /dev/null and b/static/img/docs/user_docs/guides/kubevela/vela_top.png differ diff --git a/static/img/docs/user_docs/guides/kubevela/velaux.png b/static/img/docs/user_docs/guides/kubevela/velaux.png new file mode 100644 index 00000000..14f575a1 Binary files /dev/null and b/static/img/docs/user_docs/guides/kubevela/velaux.png differ diff --git a/static/img/docs/user_docs/guides/llm-ops/inference-test-1.png b/static/img/docs/user_docs/guides/llm-ops/inference-test-1.png new file mode 100644 index 00000000..7b91183e Binary files /dev/null and b/static/img/docs/user_docs/guides/llm-ops/inference-test-1.png differ diff --git a/static/img/docs/user_docs/guides/llm-ops/inference-test-2.png b/static/img/docs/user_docs/guides/llm-ops/inference-test-2.png new file mode 100644 index 00000000..24d450c1 Binary files /dev/null and b/static/img/docs/user_docs/guides/llm-ops/inference-test-2.png differ diff --git a/static/img/docs/user_docs/guides/prometheus/kusion-apply-success.png b/static/img/docs/user_docs/guides/prometheus/kusion-apply-success.png new file mode 100644 index 00000000..0538c602 Binary files /dev/null and b/static/img/docs/user_docs/guides/prometheus/kusion-apply-success.png differ diff --git a/static/img/docs/user_docs/guides/prometheus/kusion-apply-with-monitor.png b/static/img/docs/user_docs/guides/prometheus/kusion-apply-with-monitor.png new file mode 100644 index 00000000..5c43a828 Binary files /dev/null and b/static/img/docs/user_docs/guides/prometheus/kusion-apply-with-monitor.png differ diff --git a/static/img/docs/user_docs/guides/prometheus/prometheus-operated.png b/static/img/docs/user_docs/guides/prometheus/prometheus-operated.png new file mode 100644 index 00000000..9be67fe3 Binary files /dev/null and b/static/img/docs/user_docs/guides/prometheus/prometheus-operated.png differ diff --git a/static/img/docs/user_docs/guides/prometheus/prometheus-portal.png b/static/img/docs/user_docs/guides/prometheus/prometheus-portal.png new file mode 100644 index 00000000..2db89dd9 Binary files /dev/null and b/static/img/docs/user_docs/guides/prometheus/prometheus-portal.png differ diff --git a/static/img/docs/user_docs/guides/prometheus/prometheus-service-discovery.png b/static/img/docs/user_docs/guides/prometheus/prometheus-service-discovery.png new file mode 100644 index 00000000..b1f40ab4 Binary files /dev/null and b/static/img/docs/user_docs/guides/prometheus/prometheus-service-discovery.png differ diff --git a/static/img/docs/user_docs/guides/prometheus/prometheus-simple-query.png b/static/img/docs/user_docs/guides/prometheus/prometheus-simple-query.png new file mode 100644 index 00000000..62784b30 Binary files /dev/null and b/static/img/docs/user_docs/guides/prometheus/prometheus-simple-query.png differ diff --git a/static/img/docs/user_docs/guides/prometheus/prometheus-targets.png b/static/img/docs/user_docs/guides/prometheus/prometheus-targets.png new file mode 100644 index 00000000..e1417421 Binary files /dev/null and b/static/img/docs/user_docs/guides/prometheus/prometheus-targets.png differ diff --git a/static/img/docs/user_docs/guides/prometheus/service-monitor.png b/static/img/docs/user_docs/guides/prometheus/service-monitor.png new file mode 100644 index 00000000..adc44706 Binary files /dev/null and b/static/img/docs/user_docs/guides/prometheus/service-monitor.png differ diff --git a/static/img/docs/user_docs/guides/secret-as-code/apply.jpg b/static/img/docs/user_docs/guides/secret-as-code/apply.jpg new file mode 100644 index 00000000..20627744 Binary files /dev/null and b/static/img/docs/user_docs/guides/secret-as-code/apply.jpg differ diff --git a/static/img/docs/user_docs/guides/working-with-k8s/app-preview.jpg b/static/img/docs/user_docs/guides/working-with-k8s/app-preview.png similarity index 100% rename from static/img/docs/user_docs/guides/working-with-k8s/app-preview.jpg rename to static/img/docs/user_docs/guides/working-with-k8s/app-preview.png diff --git a/static/img/docs/user_docs/intro/engine-arch.png b/static/img/docs/user_docs/intro/engine-arch.png new file mode 100644 index 00000000..8bb70940 Binary files /dev/null and b/static/img/docs/user_docs/intro/engine-arch.png differ diff --git a/static/img/docs/user_docs/intro/kusion-stack-0.png b/static/img/docs/user_docs/intro/kusion-stack-0.png deleted file mode 100644 index 97f764c5..00000000 Binary files a/static/img/docs/user_docs/intro/kusion-stack-0.png and /dev/null differ diff --git a/static/img/docs/user_docs/intro/kusion-stack-1.png b/static/img/docs/user_docs/intro/kusion-stack-1.png index 8c3be512..6b08f4df 100644 Binary files a/static/img/docs/user_docs/intro/kusion-stack-1.png and b/static/img/docs/user_docs/intro/kusion-stack-1.png differ diff --git a/static/img/docs/user_docs/intro/kusion.png b/static/img/docs/user_docs/intro/kusion.png new file mode 100644 index 00000000..2d0d96f5 Binary files /dev/null and b/static/img/docs/user_docs/intro/kusion.png differ diff --git a/static/img/features/easy-white.png b/static/img/features/easy-white.png new file mode 100644 index 00000000..ea015862 Binary files /dev/null and b/static/img/features/easy-white.png differ diff --git a/static/img/features/easy.jpeg b/static/img/features/easy.jpeg deleted file mode 100644 index 8a860e9d..00000000 Binary files a/static/img/features/easy.jpeg and /dev/null differ diff --git a/static/img/features/easy.png b/static/img/features/easy.png new file mode 100644 index 00000000..f4aec73b Binary files /dev/null and b/static/img/features/easy.png differ diff --git a/static/img/features/enable-white.png b/static/img/features/enable-white.png new file mode 100644 index 00000000..f6e4a6cb Binary files /dev/null and b/static/img/features/enable-white.png differ diff --git a/static/img/features/enable.jpeg b/static/img/features/enable.jpeg deleted file mode 100644 index e1983657..00000000 Binary files a/static/img/features/enable.jpeg and /dev/null differ diff --git a/static/img/features/enable.png b/static/img/features/enable.png new file mode 100644 index 00000000..e89a7707 Binary files /dev/null and b/static/img/features/enable.png differ diff --git a/static/img/features/teams-white.png b/static/img/features/teams-white.png new file mode 100644 index 00000000..8849e124 Binary files /dev/null and b/static/img/features/teams-white.png differ diff --git a/static/img/features/teams.png b/static/img/features/teams.png index 807975f9..23ee02ca 100644 Binary files a/static/img/features/teams.png and b/static/img/features/teams.png differ diff --git a/static/img/kcl-logo.png b/static/img/kcl-logo.png deleted file mode 100644 index 7d5902de..00000000 Binary files a/static/img/kcl-logo.png and /dev/null differ diff --git a/static/img/kuperator/concepts/podopslifecycle/pod-ops-lifecycle-sequence-diagram.png b/static/img/kuperator/concepts/podopslifecycle/pod-ops-lifecycle-sequence-diagram.png new file mode 100644 index 00000000..2bd55092 Binary files /dev/null and b/static/img/kuperator/concepts/podopslifecycle/pod-ops-lifecycle-sequence-diagram.png differ diff --git a/static/img/kuperator/concepts/podopslifecycle/pod-ops-lifecycle.png b/static/img/kuperator/concepts/podopslifecycle/pod-ops-lifecycle.png new file mode 100644 index 00000000..3dd38a98 Binary files /dev/null and b/static/img/kuperator/concepts/podopslifecycle/pod-ops-lifecycle.png differ diff --git a/static/img/kuperator/manuals/collaset/operation-delay-seconds.png b/static/img/kuperator/manuals/collaset/operation-delay-seconds.png new file mode 100644 index 00000000..e4d9cbbb Binary files /dev/null and b/static/img/kuperator/manuals/collaset/operation-delay-seconds.png differ diff --git a/static/img/kuperator/manuals/operationjob/operationjob-frame.png b/static/img/kuperator/manuals/operationjob/operationjob-frame.png new file mode 100644 index 00000000..94cdf276 Binary files /dev/null and b/static/img/kuperator/manuals/operationjob/operationjob-frame.png differ diff --git a/static/img/kusionstack-icon-square.png b/static/img/kusionstack-icon-square.png new file mode 100644 index 00000000..223f8349 Binary files /dev/null and b/static/img/kusionstack-icon-square.png differ diff --git a/static/img/kusionstack-icon-white.png b/static/img/kusionstack-icon-white.png new file mode 100644 index 00000000..392fdaf3 Binary files /dev/null and b/static/img/kusionstack-icon-white.png differ diff --git a/static/img/kusionstack-icon.png b/static/img/kusionstack-icon.png new file mode 100644 index 00000000..aa72a71e Binary files /dev/null and b/static/img/kusionstack-icon.png differ diff --git a/static/img/logos/antgroup.png b/static/img/logos/antgroup.png new file mode 100644 index 00000000..b7719166 Binary files /dev/null and b/static/img/logos/antgroup.png differ diff --git a/static/img/logos/youzan.png b/static/img/logos/youzan.png index 174f0143..dea4c074 100644 Binary files a/static/img/logos/youzan.png and b/static/img/logos/youzan.png differ diff --git a/static/img/oss_logo_black.svg b/static/img/oss_logo_black.svg new file mode 100644 index 00000000..b4b176c1 --- /dev/null +++ b/static/img/oss_logo_black.svg @@ -0,0 +1,49 @@ + + + logo + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/static/karpor/assets/cluster-mng/cluster-mng-delete-cluster.png b/static/karpor/assets/cluster-mng/cluster-mng-delete-cluster.png new file mode 100644 index 00000000..52953c9f Binary files /dev/null and b/static/karpor/assets/cluster-mng/cluster-mng-delete-cluster.png differ diff --git a/static/karpor/assets/cluster-mng/cluster-mng-edit-cluster.png b/static/karpor/assets/cluster-mng/cluster-mng-edit-cluster.png new file mode 100644 index 00000000..fc62a2bb Binary files /dev/null and b/static/karpor/assets/cluster-mng/cluster-mng-edit-cluster.png differ diff --git a/static/karpor/assets/cluster-mng/cluster-mng-empty.png b/static/karpor/assets/cluster-mng/cluster-mng-empty.png new file mode 100644 index 00000000..69891857 Binary files /dev/null and b/static/karpor/assets/cluster-mng/cluster-mng-empty.png differ diff --git a/static/karpor/assets/cluster-mng/cluster-mng-register-new-cluster.png b/static/karpor/assets/cluster-mng/cluster-mng-register-new-cluster.png new file mode 100644 index 00000000..8dfbceac Binary files /dev/null and b/static/karpor/assets/cluster-mng/cluster-mng-register-new-cluster.png differ diff --git a/static/karpor/assets/cluster-mng/cluster-mng-register-success.png b/static/karpor/assets/cluster-mng/cluster-mng-register-success.png new file mode 100644 index 00000000..7d06d06b Binary files /dev/null and b/static/karpor/assets/cluster-mng/cluster-mng-register-success.png differ diff --git a/static/karpor/assets/cluster-mng/cluster-mng-rotate-cluster-1.png b/static/karpor/assets/cluster-mng/cluster-mng-rotate-cluster-1.png new file mode 100644 index 00000000..3089ca65 Binary files /dev/null and b/static/karpor/assets/cluster-mng/cluster-mng-rotate-cluster-1.png differ diff --git a/static/karpor/assets/cluster-mng/cluster-mng-rotate-cluster-2.png b/static/karpor/assets/cluster-mng/cluster-mng-rotate-cluster-2.png new file mode 100644 index 00000000..076ac7f1 Binary files /dev/null and b/static/karpor/assets/cluster-mng/cluster-mng-rotate-cluster-2.png differ diff --git a/static/karpor/assets/cluster-mng/cluster-mng-rotate-cluster-3.png b/static/karpor/assets/cluster-mng/cluster-mng-rotate-cluster-3.png new file mode 100644 index 00000000..3e28eb9a Binary files /dev/null and b/static/karpor/assets/cluster-mng/cluster-mng-rotate-cluster-3.png differ diff --git a/static/karpor/assets/insight/insight-all-issues.png b/static/karpor/assets/insight/insight-all-issues.png new file mode 100644 index 00000000..ac4ca3a7 Binary files /dev/null and b/static/karpor/assets/insight/insight-all-issues.png differ diff --git a/static/karpor/assets/insight/insight-breadcrumbs.png b/static/karpor/assets/insight/insight-breadcrumbs.png new file mode 100644 index 00000000..793a21d0 Binary files /dev/null and b/static/karpor/assets/insight/insight-breadcrumbs.png differ diff --git a/static/karpor/assets/insight/insight-create-app-resource-group-rule.png b/static/karpor/assets/insight/insight-create-app-resource-group-rule.png new file mode 100644 index 00000000..e2696a12 Binary files /dev/null and b/static/karpor/assets/insight/insight-create-app-resource-group-rule.png differ diff --git a/static/karpor/assets/insight/insight-create-env-resource-group-rule.png b/static/karpor/assets/insight/insight-create-env-resource-group-rule.png new file mode 100644 index 00000000..2db7938e Binary files /dev/null and b/static/karpor/assets/insight/insight-create-env-resource-group-rule.png differ diff --git a/static/karpor/assets/insight/insight-delete-env-resource-group.png b/static/karpor/assets/insight/insight-delete-env-resource-group.png new file mode 100644 index 00000000..1185f9a3 Binary files /dev/null and b/static/karpor/assets/insight/insight-delete-env-resource-group.png differ diff --git a/static/karpor/assets/insight/insight-edit-env-resource-group.png b/static/karpor/assets/insight/insight-edit-env-resource-group.png new file mode 100644 index 00000000..0eee3535 Binary files /dev/null and b/static/karpor/assets/insight/insight-edit-env-resource-group.png differ diff --git a/static/karpor/assets/insight/insight-home-raw.jpg b/static/karpor/assets/insight/insight-home-raw.jpg new file mode 100644 index 00000000..3aaef5f0 Binary files /dev/null and b/static/karpor/assets/insight/insight-home-raw.jpg differ diff --git a/static/karpor/assets/insight/insight-home.png b/static/karpor/assets/insight/insight-home.png new file mode 100644 index 00000000..2ff52c42 Binary files /dev/null and b/static/karpor/assets/insight/insight-home.png differ diff --git a/static/karpor/assets/insight/insight-homepage.png b/static/karpor/assets/insight/insight-homepage.png new file mode 100644 index 00000000..2ba0e921 Binary files /dev/null and b/static/karpor/assets/insight/insight-homepage.png differ diff --git a/static/karpor/assets/insight/insight-linkage.png b/static/karpor/assets/insight/insight-linkage.png new file mode 100644 index 00000000..2f1f467e Binary files /dev/null and b/static/karpor/assets/insight/insight-linkage.png differ diff --git a/static/karpor/assets/insight/insight-list-app-resource-groups.png b/static/karpor/assets/insight/insight-list-app-resource-groups.png new file mode 100644 index 00000000..32bb594f Binary files /dev/null and b/static/karpor/assets/insight/insight-list-app-resource-groups.png differ diff --git a/static/karpor/assets/insight/insight-list-env-resource-groups.png b/static/karpor/assets/insight/insight-list-env-resource-groups.png new file mode 100644 index 00000000..61f4bf43 Binary files /dev/null and b/static/karpor/assets/insight/insight-list-env-resource-groups.png differ diff --git a/static/karpor/assets/insight/insight-search-app-resource-group.png b/static/karpor/assets/insight/insight-search-app-resource-group.png new file mode 100644 index 00000000..c749fa92 Binary files /dev/null and b/static/karpor/assets/insight/insight-search-app-resource-group.png differ diff --git a/static/karpor/assets/insight/insight-single-issue.png b/static/karpor/assets/insight/insight-single-issue.png new file mode 100644 index 00000000..e4d8a6e6 Binary files /dev/null and b/static/karpor/assets/insight/insight-single-issue.png differ diff --git a/static/karpor/assets/insight/insight-summary-cluster.png b/static/karpor/assets/insight/insight-summary-cluster.png new file mode 100644 index 00000000..05b37d21 Binary files /dev/null and b/static/karpor/assets/insight/insight-summary-cluster.png differ diff --git a/static/karpor/assets/insight/insight-summary-custom-resource-group.png b/static/karpor/assets/insight/insight-summary-custom-resource-group.png new file mode 100644 index 00000000..6281be40 Binary files /dev/null and b/static/karpor/assets/insight/insight-summary-custom-resource-group.png differ diff --git a/static/karpor/assets/insight/insight-summary-kind.png b/static/karpor/assets/insight/insight-summary-kind.png new file mode 100644 index 00000000..84b09a98 Binary files /dev/null and b/static/karpor/assets/insight/insight-summary-kind.png differ diff --git a/static/karpor/assets/insight/insight-summary-namespace.png b/static/karpor/assets/insight/insight-summary-namespace.png new file mode 100644 index 00000000..2a7f1873 Binary files /dev/null and b/static/karpor/assets/insight/insight-summary-namespace.png differ diff --git a/static/karpor/assets/insight/insight-summary-resource.png b/static/karpor/assets/insight/insight-summary-resource.png new file mode 100644 index 00000000..0d9131b6 Binary files /dev/null and b/static/karpor/assets/insight/insight-summary-resource.png differ diff --git a/static/karpor/assets/insight/insight-topology-example.png b/static/karpor/assets/insight/insight-topology-example.png new file mode 100644 index 00000000..8cd2b6ad Binary files /dev/null and b/static/karpor/assets/insight/insight-topology-example.png differ diff --git a/static/karpor/assets/insight/insight-topology.png b/static/karpor/assets/insight/insight-topology.png new file mode 100644 index 00000000..91025f13 Binary files /dev/null and b/static/karpor/assets/insight/insight-topology.png differ diff --git a/static/karpor/assets/logo/logo-full.png b/static/karpor/assets/logo/logo-full.png new file mode 100644 index 00000000..75ba3938 Binary files /dev/null and b/static/karpor/assets/logo/logo-full.png differ diff --git a/static/karpor/assets/logo/logo-full.svg b/static/karpor/assets/logo/logo-full.svg new file mode 100644 index 00000000..e88f9529 --- /dev/null +++ b/static/karpor/assets/logo/logo-full.svg @@ -0,0 +1,61 @@ + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/static/karpor/assets/logo/logo.png b/static/karpor/assets/logo/logo.png new file mode 100644 index 00000000..3ec04662 Binary files /dev/null and b/static/karpor/assets/logo/logo.png differ diff --git a/static/karpor/assets/logo/logo.svg b/static/karpor/assets/logo/logo.svg new file mode 100644 index 00000000..3a90d00c --- /dev/null +++ b/static/karpor/assets/logo/logo.svg @@ -0,0 +1,33 @@ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/static/karpor/assets/misc/coming-soon.jpeg b/static/karpor/assets/misc/coming-soon.jpeg new file mode 100644 index 00000000..c07bd0db Binary files /dev/null and b/static/karpor/assets/misc/coming-soon.jpeg differ diff --git a/static/karpor/assets/overview/demo.mp4 b/static/karpor/assets/overview/demo.mp4 new file mode 100644 index 00000000..d8edfd91 Binary files /dev/null and b/static/karpor/assets/overview/demo.mp4 differ diff --git a/static/karpor/assets/overview/insight.png b/static/karpor/assets/overview/insight.png new file mode 100644 index 00000000..3d99e204 Binary files /dev/null and b/static/karpor/assets/overview/insight.png differ diff --git a/static/karpor/assets/overview/search.png b/static/karpor/assets/overview/search.png new file mode 100644 index 00000000..73b05ef8 Binary files /dev/null and b/static/karpor/assets/overview/search.png differ diff --git a/static/karpor/assets/overview/vision.png b/static/karpor/assets/overview/vision.png new file mode 100644 index 00000000..14241ec5 Binary files /dev/null and b/static/karpor/assets/overview/vision.png differ diff --git a/static/karpor/assets/search/search-auto-complete-raw.jpg b/static/karpor/assets/search/search-auto-complete-raw.jpg new file mode 100644 index 00000000..154ff935 Binary files /dev/null and b/static/karpor/assets/search/search-auto-complete-raw.jpg differ diff --git a/static/karpor/assets/search/search-auto-complete.png b/static/karpor/assets/search/search-auto-complete.png new file mode 100644 index 00000000..2288c1e6 Binary files /dev/null and b/static/karpor/assets/search/search-auto-complete.png differ diff --git a/static/karpor/assets/search/search-by-natural-language-result.png b/static/karpor/assets/search/search-by-natural-language-result.png new file mode 100644 index 00000000..a18d1df2 Binary files /dev/null and b/static/karpor/assets/search/search-by-natural-language-result.png differ diff --git a/static/karpor/assets/search/search-by-natural-language.png b/static/karpor/assets/search/search-by-natural-language.png new file mode 100644 index 00000000..cc9c6f0d Binary files /dev/null and b/static/karpor/assets/search/search-by-natural-language.png differ diff --git a/static/karpor/assets/search/search-home-natural-language.png b/static/karpor/assets/search/search-home-natural-language.png new file mode 100644 index 00000000..9c976293 Binary files /dev/null and b/static/karpor/assets/search/search-home-natural-language.png differ diff --git a/static/karpor/assets/search/search-home.png b/static/karpor/assets/search/search-home.png new file mode 100644 index 00000000..43a70041 Binary files /dev/null and b/static/karpor/assets/search/search-home.png differ diff --git a/static/karpor/assets/search/search-result.png b/static/karpor/assets/search/search-result.png new file mode 100644 index 00000000..6a5c6af1 Binary files /dev/null and b/static/karpor/assets/search/search-result.png differ diff --git a/static/kclvm-artifact-go/index.html b/static/kclvm-artifact-go/index.html new file mode 100644 index 00000000..d83414a5 --- /dev/null +++ b/static/kclvm-artifact-go/index.html @@ -0,0 +1,14 @@ + + + + KusionStack/kclvm-artifact-go + + + + + + +
+

KusionStack/kclvm-artifact-go

+

hello https://github.com/KusionStack/kclvm-artifact-go

+
diff --git a/static/kclvm-artifact/index.html b/static/kclvm-artifact/index.html new file mode 100644 index 00000000..f72274cd --- /dev/null +++ b/static/kclvm-artifact/index.html @@ -0,0 +1,14 @@ + + + + KusionStack/kclvm-artifact + + + + + + +
+

KusionStack/kclvm-artifact

+

hello https://github.com/KusionStack/kclvm-artifact

+
diff --git a/static/kpm/index.html b/static/kpm/index.html new file mode 100644 index 00000000..25617cbb --- /dev/null +++ b/static/kpm/index.html @@ -0,0 +1,14 @@ + + + + KusionStack/kpm + + + + + + +
+

KusionStack/kpm

+

hello https://github.com/KusionStack/kpm

+
diff --git a/static/kpt-kcl-sdk/index.html b/static/kpt-kcl-sdk/index.html new file mode 100644 index 00000000..2c612fb1 --- /dev/null +++ b/static/kpt-kcl-sdk/index.html @@ -0,0 +1,14 @@ + + + + KusionStack/kpt-kcl-sdk + + + + + + +
+

KusionStack/kpt-kcl-sdk

+

hello https://github.com/KusionStack/kpt-kcl-sdk

+
diff --git a/static/kube-api/index.html b/static/kube-api/index.html new file mode 100644 index 00000000..283a9b1d --- /dev/null +++ b/static/kube-api/index.html @@ -0,0 +1,14 @@ + + + + KusionStack/kube-api + + + + + + +
+

KusionStack/kube-api

+

hello https://github.com/KusionStack/kube-api

+
diff --git a/static/kube-utils/index.html b/static/kube-utils/index.html new file mode 100644 index 00000000..3aa9fecb --- /dev/null +++ b/static/kube-utils/index.html @@ -0,0 +1,14 @@ + + + + KusionStack/kube-utils + + + + + + +
+

KusionStack/kube-utils

+

hello https://github.com/KusionStack/kube-utils

+
diff --git a/static/kuperator/index.html b/static/kuperator/index.html new file mode 100644 index 00000000..7d120ff0 --- /dev/null +++ b/static/kuperator/index.html @@ -0,0 +1,14 @@ + + + + KusionStack/kuperator + + + + + + +
+

KusionStack/kuperator

+

hello https://github.com/KusionStack/kuperator

+
diff --git a/static/kusion-api-go/index.html b/static/kusion-api-go/index.html new file mode 100644 index 00000000..c47a613c --- /dev/null +++ b/static/kusion-api-go/index.html @@ -0,0 +1,14 @@ + + + + KusionStack/kusion-api-go + + + + + + +
+

KusionStack/kusion-api-go

+

hello https://github.com/KusionStack/kusion-api-go

+
diff --git a/static/kusion-module-framework/index.html b/static/kusion-module-framework/index.html new file mode 100644 index 00000000..7e3cf5e2 --- /dev/null +++ b/static/kusion-module-framework/index.html @@ -0,0 +1,14 @@ + + + + KusionStack/kusion-module-framework + + + + + + +
+

KusionStack/kusion-module-framework

+

hello https://github.com/KusionStack/kusion-module-framework

+
diff --git a/static/kustomize-kcl/index.html b/static/kustomize-kcl/index.html new file mode 100644 index 00000000..2341b0d6 --- /dev/null +++ b/static/kustomize-kcl/index.html @@ -0,0 +1,14 @@ + + + + KusionStack/kustomize-kcl + + + + + + +
+

KusionStack/kustomize-kcl

+

hello https://github.com/KusionStack/kustomize-kcl

+
diff --git a/static/operating/index.html b/static/operating/index.html new file mode 100644 index 00000000..06cffd95 --- /dev/null +++ b/static/operating/index.html @@ -0,0 +1,14 @@ + + + + KusionStack/operating + + + + + + +
+

KusionStack/operating

+

hello https://github.com/KusionStack/operating

+
diff --git a/static/resourceconsist/index.html b/static/resourceconsist/index.html new file mode 100644 index 00000000..7ca64dfa --- /dev/null +++ b/static/resourceconsist/index.html @@ -0,0 +1,14 @@ + + + + KusionStack/resourceconsist + + + + + + +
+

KusionStack/resourceconsist

+

hello https://github.com/KusionStack/resourceconsist

+
diff --git a/static/rollout/index.html b/static/rollout/index.html new file mode 100644 index 00000000..3fdf752b --- /dev/null +++ b/static/rollout/index.html @@ -0,0 +1,14 @@ + + + + KusionStack/rollout + + + + + + +
+

KusionStack/rollout

+

hello https://github.com/KusionStack/rollout

+
diff --git a/static/scripts/install.ps1 b/static/scripts/install.ps1 new file mode 100644 index 00000000..d08afd9b --- /dev/null +++ b/static/scripts/install.ps1 @@ -0,0 +1,194 @@ +# ------------------------------------------------------------ +# Copyright 2024 The KusionStack Authors +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ------------------------------------------------------------ + +param ( + [string]$Version, + [string]$KusionRoot = "$Env:USERPROFILE\.kusion", + [string]$KusionReleaseBaseURL = "" +) + +# GitHub org and repo hosting Kusion CLI +$GitHubOrg = "KusionStack" +$GitHubRepo = "kusion" + +# Escape space of KusionRoot path +$KusionRoot = $KusionRoot -replace ' ', '` ' + +# Kusion related path +$KusionFileName = "kusion.exe" +$KusionPath = "${KusionRoot}\bin" +$KusionFilePath = "${KusionPath}\${KusionFileName}" + +# Temp directory for kusion installation +$KusionTmpDir = "" + +#Escape space of KusionRoot path +$KusionRoot = $KusionRoot -replace ' ', '` ' + +$ErrorActionPreference = 'stop' + +if ((Get-ExecutionPolicy) -gt 'RemoteSigned' -or (Get-ExecutionPolicy) -eq 'ByPass') { + Write-Output "PowerShell requires an execution policy of 'RemoteSigned'." + Write-Output "To make this change please run:" + Write-Output "'Set-ExecutionPolicy RemoteSigned -scope CurrentUser'" + exit +} + +try { + # Set Github request authentication for basic authentication. + if ($Env:GITHUB_USER) { + $basicAuth = [System.Convert]::ToBase64String([System.Text.Encoding]::ASCII.GetBytes($Env:GITHUB_USER + ":" + $Env:GITHUB_TOKEN)); + $githubHeader = @{"Authorization" = "Basic $basicAuth" } + } + else { + $githubHeader = @{} + } + + # Change security protocol to support TLS 1.2 / 1.1 / 1.0 - old powershell uses TLS 1.0 as a default protocol + [Net.ServicePointManager]::SecurityProtocol = "tls12, tls11, tls" + + # Check if Kusion CLI is installed. + if (Test-Path $KusionFilePath -PathType Leaf) { + Write-Output "Kusion is detected - $KusionFilePath" + Invoke-Expression "$KusionFilePath version" + Write-Output "`r`nReinstalling Kusion..." + } + else { + Write-Output "Installing Kusion..." + } + + # Create kusion related directory + Write-Output "Creating $KusionRoot directory..." + New-Item -ErrorAction Ignore -Path $KusionPath -ItemType "directory" | Out-Null + if (!(Test-Path $KusionPath -PathType Container)) { + throw "Cannot create $KusionPath, without admin right." + } + $tempDirName = [System.IO.Path]::GetRandomFileName() + $KusionTmpDir = Join-Path $KusionRoot $tempDirName + New-Item -Path $KusionTmpDir -ItemType "directory" | Out-Null + if (!(Test-Path $KusionTmpDir -PathType Container)) { + throw "Cannot create $KusionTmpDir, without admin right." + } + + # Get the list of release from GitHub + $releaseURL = $KusionReleaseBaseURL + if (!$releaseURL) { + $releaseURL = "https://api.github.com/repos/${GitHubOrg}/${GitHubRepo}/releases" + } + $releases = Invoke-RestMethod -Headers $githubHeader -Uri $releaseURL -Method Get + if ($releases.Count -eq 0) { + throw "No releases from github.com/KusionStack/kusion repo." + } + + # Get latest or specified version info from releases + function GetVersionInfo { + param ( + [string]$Version, + $Releases + ) + # Filter windows binary and download archive + if (!$Version) { + $release = $Releases | Where-Object { $_.tag_name -match '^v\d+\.\d+\.\d+$' } | Select-Object -First 1 + } + else { + $release = $Releases | Where-Object { $_.tag_name -eq "v$Version" } | Select-Object -First 1 + } + + return $release + } + + # Get info about windows asset from release + function GetWindowsAsset { + param ( + $Release + ) + if ($CustomAssetFactory) { + Write-Output "CustomAssetFactory detected, try to invoke it." + return $CustomAssetFactory.Invoke($Release) + } + else { + $windowsAsset = $Release | Select-Object -ExpandProperty assets | Where-Object { $_.name -Like "*windows*.zip" } + if (!$windowsAsset) { + throw "Cannot find the windows Kusion CLI binary." + } + [hashtable]$return = @{} + $return.url = $windowsAsset.url + $return.name = $windowsAsset.name + return $return + }` + } + + # Download kusion windows asset + $release = GetVersionInfo -Version $Version -Releases $releases + if (!$release) { + throw "Cannot find the specified Kusion CLI binary version." + } + $asset = GetWindowsAsset -Release $release + $zipFileUrl = $asset.url + $assetName = $asset.name + + $zipFilePath = Join-Path $KusionTmpDir $assetName + Write-Output "Downloading $zipFileUrl..." + + $githubHeader.Accept = "application/octet-stream" + $oldProgressPreference = $progressPreference; + $progressPreference = 'SilentlyContinue'; + Invoke-WebRequest -Headers $githubHeader -Uri $zipFileUrl -OutFile $zipFilePath + $progressPreference = $oldProgressPreference; + if (!(Test-Path $zipFilePath -PathType Leaf)) { + throw "Failed to download Kusion binary." + } + + # Extract kusion binary to $KusionTmpDir + Write-Output "Extracting $zipFilePath..." + Microsoft.Powershell.Archive\Expand-Archive -Force -Path $zipFilePath -DestinationPath $KusionTmpDir + $kusionTmpFile = Join-Path $kusionTmpDir $KusionFileName + if (!(Test-Path $kusionTmpFile -PathType Leaf)) { + throw "Failed to extract $zipFilePath." + } + Move-Item -Path $zipFilePath -Force + + # Move kusion binary and related files to $KusionRoot + Write-Output "Moving kusion binary from $KusionTmpDir..." + Get-ChildItem -Path $KusionTmpDir | Move-Item -Destination $KusionRoot -Force + $kusioneExtractFile = Join-Path $KusionRoot $KusionFileName + Move-Item -Path $kusioneExtractFile -Destination $KusionFilePath -Force + if (!(Test-Path $KusionFilePath -PathType Leaf)) { + throw "Failed to move Kusion binary." + } + + # Add KusionPath directory to User Path environment variable + $userPathEnvironmentVar = [System.Environment]::GetEnvironmentVariable("PATH", "User") + $userPaths = $UserPathEnvironmentVar.Split(';') + if ($userPaths -contains $KusionPath) { + Write-Output "Skipped to add $KusionPath to user path." + } else { + [System.Environment]::SetEnvironmentVariable("PATH", $UserPathEnvironmentVar + ";$KusionPath", "User") + Write-Output "Added $KusionPath to user path." + } + # SET KUSION_HOME environment variable + [System.Environment]::SetEnvironmentVariable("KUSION_HOME", "$KusionRoot", "User") + +} catch { + Write-Output "$_" + Write-Output "`r`nFailed to install kusion. Please go to https://kusionstack.io for more support." + exit +} finally { + if(Test-Path $KusionTmpDir) { + Remove-Item -Path $KusionTmpDir -Force -Recurse + } +} + +Write-Output "`r`nInstall Kusion into $KusionRoot successfully." +Invoke-Expression "$KusionFilePath version" +Write-Output "`r`nOpen a new terminal, and start using kusion! For more information on how to start, please visit https://kusionstack.io." diff --git a/static/scripts/install.sh b/static/scripts/install.sh new file mode 100755 index 00000000..7ee542b7 --- /dev/null +++ b/static/scripts/install.sh @@ -0,0 +1,392 @@ +#!/bin/sh + +# ------------------------------------------------------------ +# Copyright 2024 The KusionStack Authors +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Reference: https://github.com/dapr/cli/tree/master/install +# ------------------------------------------------------------ + +set -o errexit +set -o nounset + +# Sudo is required to copy binary to KUSION_HOME_DIR +USE_SUDO=${USE_SUDO:-"false"} + +# Specified profile +PROFILE=${PROFILE:-""} + +# SHELL +SHELL=${SHELL:-""} + +# specifed Kusion version to install +KUSION_VERSION=${1:-""} + +# Kusion location +KUSION_HOME_DIR=${KUSION_HOME_DIR:-"$HOME/.kusion"} + +# If set "true", then will not source kusion env in detected profile +SKIP_SOURCE_KUSION_ENV=${SKIP_SOURCE_KUSION_ENV:-"false"} + +# Http request CLI +KUSION_HTTP_REQUEST_CLI="curl" + +# Kusion binary file name +KUSION_CLI="kusion" + +# Kusion env file name +KUSION_ENV_FILE=".env" + +# Kusion binary path +KUSION_CLI_FILE_PATH="${KUSION_HOME_DIR}/bin/${KUSION_CLI}" + +# Kusion github basic information +GITHUB_ORG=KusionStack +GITHUB_REPO=kusion + +# Download path of Kusion installation package +DOWNLOAD_BASE="https://github.com/${GITHUB_ORG}/${GITHUB_REPO}/releases/download" + +# Kusion release URL +RELEASE_URL="https://api.github.com/repos/${GITHUB_ORG}/${GITHUB_REPO}/releases" + +info() { + command printf '\033[1;32mInfo\033[0m: %s\n' "$1" 1>&2 +} + +warn() { + command printf '\033[1;33mWarn\033[0m: %s\n' "$1" 1>&2 +} + +error() { + command printf '\033[1;31mError\033[0m: %s\n' "$1" 1>&2 +} + +runAsRoot() { + local CMD="$*" + + if [ $USE_SUDO = "true" ]; then + CMD="sudo $CMD" + fi + + $CMD +} + +echoFexists() { + [ -f "$1" ] && echo "$1" +} + +getSystemInfo() { + ARCH=$(uname -m) + case $ARCH in + armv7*) ARCH="arm" ;; + aarch64) ARCH="arm64" ;; + x86_64) ARCH="amd64" ;; + esac + + OS=$(echo "$(uname)" | tr '[:upper:]' '[:lower:]') +} + +verifySupported() { + local current_os_arch="${OS}-${ARCH}" + + if [ "${current_os_arch}" = "darwin-amd64" ]; then + info "Your system is ${current_os_arch}." + elif [ "${current_os_arch}" = "darwin-arm64" ]; then + info "Your system is ${current_os_arch}." + elif [ "${current_os_arch}" = "linux-amd64" ]; then + info "Your system is ${current_os_arch}." + else + error "No prebuilt installation package for ${current_os_arch}" + exit 1 + fi +} + +checkHttpRequestCLI() { + if type "curl" >/dev/null; then + KUSION_HTTP_REQUEST_CLI=curl + elif type "wget" >/dev/null; then + KUSION_HTTP_REQUEST_CLI=wget + else + error "Either curl or wget is required to download installation package." + exit 1 + fi +} + +toInstallVersion() { + if [ -z "$KUSION_VERSION" ]; then + info "Getting the latest Kusion version..." + getLatestReleaseVersion + else + echo "$KUSION_VERSION" + fi +} + +getLatestReleaseVersion() { + local KusionReleaseURL="${RELEASE_URL}" + local latest_release="" + + if [ "$KUSION_HTTP_REQUEST_CLI" = "curl" ]; then + latest_release=$(runAsRoot curl -s $KusionReleaseURL | grep \"tag_name\" | awk '{print $2}' | sed -n 's/\"\(.*\)\",/\1/p' | awk '/^v[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$/{print $0}' | head -1) + else + latest_release=$(runAsRoot wget -q --header="Accept: application/json" -O - $KusionReleaseURL | grep \"tag_name\" | awk '{print $2}' | sed -n 's/\"\(.*\)\",/\1/p' | awk '/^v[0-9]{1,}\.[0-9]{1,}\.[0-9]{1,}$/{print $0}' | head -1) + fi + + echo "$latest_release" | cut -c2- +} + +download() { + local kusion_version="$1" + local kusion_tag="v${kusion_version}" + KUSION_CLI_ARTIFACT="${KUSION_CLI}_${kusion_version}_${OS}_${ARCH}.tar.gz" + DOWNLOAD_URL="${DOWNLOAD_BASE}/${kusion_tag}/${KUSION_CLI_ARTIFACT}" + + # Create the temp directory + KUSION_TMP_ROOT=$(mktemp -dt kusion-install-XXXXXX) + ARTIFACT_TMP_FILE="$KUSION_TMP_ROOT/$KUSION_CLI_ARTIFACT" + + info "Downloading installation package from $DOWNLOAD_URL..." + if [ "$KUSION_HTTP_REQUEST_CLI" = "curl" ]; then + runAsRoot curl -SL "$DOWNLOAD_URL" -o "$ARTIFACT_TMP_FILE" + else + runAsRoot wget -O "$ARTIFACT_TMP_FILE" "$DOWNLOAD_URL" + fi + + if [ ! -f "$ARTIFACT_TMP_FILE" ]; then + error "Failed to download installation package." + return 1 + fi +} + +checkExistingKusion() { + if [ -f "$KUSION_CLI_FILE_PATH" ]; then + info "Existing kusion is detected:" + $KUSION_CLI_FILE_PATH version + info "Reinstalling kusion into $KUSION_HOME_DIR..." + else + info "Installing kusion into $KUSION_HOME_DIR..." + fi +} + +install() { + # decompress + info "Decompressing installation package $KUSION_CLI_ARTIFACT..." + tar xf "$ARTIFACT_TMP_FILE" -C "$KUSION_TMP_ROOT" + rm -f "$ARTIFACT_TMP_FILE" + kusion_tmp_cli_path="$KUSION_TMP_ROOT/$KUSION_CLI" + if [ ! -f "$kusion_tmp_cli_path" ]; then + error "Failed to decompress installation package." + return 1 + fi + + # move kusion binary under /bin + kusion_tmp_bin_dir="$KUSION_TMP_ROOT/bin" + runAsRoot mkdir "$kusion_tmp_bin_dir" + kusion_tmp_bin_cli_path="$kusion_tmp_bin_dir/$KUSION_CLI" + runAsRoot mv "$kusion_tmp_cli_path" "$kusion_tmp_bin_cli_path" + if [ ! -f "$kusion_tmp_bin_cli_path" ]; then + error "Failed to move kusion cli binary to /bin." + return 1 + fi + + # detect profile + local shell_name="" + if [ "$SHELL" ]; then + shell_name=$(basename $SHELL) + fi + detected_profile=$(detectProfile "$shell_name" "$(uname -s)") + if [ -z "${detected_profile-}" ]; then + error "No supported user profile found. Already tried \$PROFILE, ~/.bashrc, ~/.bash_profile, ~/.zshrc, ~/.profile, and ~/.config/fish/config.fish." + return 1 + fi + + # build env file + info "Building kusion env file..." + install_dir=$(echo "$KUSION_HOME_DIR" | sed "s:^$HOME:\$HOME:") + kusion_tmp_env_path="$KUSION_TMP_ROOT/$KUSION_ENV_FILE" + buildEnvFile "$detected_profile" "$install_dir" "$kusion_tmp_env_path" + + # clean kusion home + runAsRoot rm -rf "$KUSION_HOME_DIR" + if [ -d "$KUSION_HOME_DIR" ]; then + error "Failed to remove existing kusion in $KUSION_HOME_DIR." + return 1 + fi + + # move from tmp dir to kusion home + runAsRoot mv "$KUSION_TMP_ROOT" "$KUSION_HOME_DIR" + if [ ! -f "$KUSION_CLI_FILE_PATH" ]; then + error "Failed to move binary from tmp folder, $KUSION_CLI_FILE_PATH does not exist." + return 1 + fi + + # update profile + if [ $SKIP_SOURCE_KUSION_ENV = "true" ]; then + info "Skip editing user profile ($detected_profile), cause SKIP_SOURCE_KUSION_ENV is true. Please write source $install_dir/.env in user profile ($detected_profile) manually." + else + env_file_path="$install_dir/$KUSION_ENV_FILE" + updateProfile "$detected_profile" "$env_file_path" + fi +} + +detectProfile() { + local shell_name="$1" + local uname="$2" + + if [ -f "$PROFILE" ]; then + info "Current profile: $PROFILE" + return + fi + + # try to detect the current shell + case "$shell_name" in + bash) + # Shells on macOS default to opening with a login shell, while Linuxes + # default to a *non*-login shell, so if this is macOS we look for + # `.bash_profile` first; if it's Linux, we look for `.bashrc` first. The + # `*` fallthrough covers more than just Linux: it's everything that is not + # macOS (Darwin). It can be made narrower later if need be. + case $uname in + Darwin) + echoFexists "$HOME/.bash_profile" || echoFexists "$HOME/.bashrc" + ;; + *) + echoFexists "$HOME/.bashrc" || echoFexists "$HOME/.bash_profile" + ;; + esac + ;; + zsh) + echo "$HOME/.zshrc" + ;; + fish) + echo "$HOME/.config/fish/config.fish" + ;; + *) + # Fall back to checking for profile file existence. Once again, the order + # differs between macOS and everything else. + case $uname in + Darwin) + if [ -f "$HOME/.profile" ]; then + echo "$HOME/.profile" + elif [ -f "$HOME/.bash_profile" ]; then + echo "$HOME/.bash_profile" + elif [ -f "$HOME/.bashrc" ]; then + echo "$HOME/.bashrc" + elif [ -f "$HOME/.zshrc" ]; then + echo "$HOME/.zshrc" + elif [ -f "$HOME/.config/fish/config.fish" ]; then + echo "$HOME/.config/fish/config.fish" + fi + ;; + *) + if [ -f "$HOME/.profile" ]; then + echo "$HOME/.profile" + elif [ -f "$HOME/.bashrc" ]; then + echo "$HOME/.bashrc" + elif [ -f "$HOME/.bash_profile" ]; then + echo "$HOME/.bash_profile" + elif [ -f "$HOME/.zshrc" ]; then + echo "$HOME/.zshrc" + elif [ -f "$HOME/.config/fish/config.fish" ]; then + echo "$HOME/.config/fish/config.fish" + fi + ;; + esac + ;; + esac +} + +buildEnvFile() { + local profile="$1" + local install_dir="$2" + local env_file_path="$3" + + env_content="$(buildEnvContent "$detected_profile" "$install_dir")" + command printf "$env_content" >>"$env_file_path" + chmod a-wx "$env_file_path" +} + +buildEnvContent() { + local profile="$1" + local install_dir="$2" + + if [ $profile = "$HOME/.config/fish/config.fish" ]; then + # fish uses a little different syntax to modify the PATH + cat <>"$profile" + fi +} + +buildSourceEnvContent() { + local env_file_path="$1" + cat <&2 +} + +warn() { + command printf '\033[1;33mWarn\033[0m: %s\n' "$1" 1>&2 +} + +error() { + command printf '\033[1;31mError\033[0m: %s\n' "$1" 1>&2 +} + +runAsRoot() { + local CMD="$*" + + if [ $USE_SUDO = "true" ]; then + CMD="sudo $CMD" + fi + + $CMD +} + +echoFexists() { + [ -f "$1" ] && echo "$1" +} + +removeInstallationDir() { + info "Removing kusion installation dir $KUSION_HOME_DIR..." + runAsRoot rm -rf "$KUSION_HOME_DIR" + if [ -d "$KUSION_HOME_DIR" ]; then + error "Removing kusion installation dir failed." + return 1 + fi +} + +clearProfileSource() { + if [ $SKIP_CLEAR_SOURCE_KUSION_ENV = true ]; then + info "Skip clearing kusion env in user profile." + return 0 + fi + + # detect profile + local shell_name="" + if [ "$SHELL" ]; then + shell_name=$(basename $SHELL) + fi + detected_profile=$(detectProfile "$shell_name" "$(uname -s)") + if [ -z "${detected_profile-}" ]; then + warn "No supported user profile found. Already tried \$PROFILE ($PROFILE), ~/.bashrc, ~/.bash_profile, ~/.zshrc, ~/.profile, and ~/.config/fish/config.fish. Skip clearing kusion env in user profile." + return 0 + fi + + info "Clearing kusion env in profile $detected_profile..." + deleteProfileSourceContent "$detected_profile" +} + +detectProfile() { + local shell_name="$1" + local uname="$2" + + if [ -f "$PROFILE" ]; then + info "Current profile: $PROFILE" + return + fi + + # try to detect the current shell + case "$shell_name" in + bash) + # Shells on macOS default to opening with a login shell, while Linuxes + # default to a *non*-login shell, so if this is macOS we look for + # `.bash_profile` first; if it's Linux, we look for `.bashrc` first. The + # `*` fallthrough covers more than just Linux: it's everything that is not + # macOS (Darwin). It can be made narrower later if need be. + case $uname in + Darwin) + echoFexists "$HOME/.bash_profile" || echoFexists "$HOME/.bashrc" + ;; + *) + echoFexists "$HOME/.bashrc" || echoFexists "$HOME/.bash_profile" + ;; + esac + ;; + zsh) + echo "$HOME/.zshrc" + ;; + fish) + echo "$HOME/.config/fish/config.fish" + ;; + *) + # Fall back to checking for profile file existence. Once again, the order + # differs between macOS and everything else. + case $uname in + Darwin) + if [ -f "$HOME/.profile" ]; then + echo "$HOME/.profile" + elif [ -f "$HOME/.bash_profile" ]; then + echo "$HOME/.bash_profile" + elif [ -f "$HOME/.bashrc" ]; then + echo "$HOME/.bashrc" + elif [ -f "$HOME/.zshrc" ]; then + echo "$HOME/.zshrc" + elif [ -f "$HOME/.config/fish/config.fish" ]; then + echo "$HOME/.config/fish/config.fish" + fi + ;; + *) + if [ -f "$HOME/.profile" ]; then + echo "$HOME/.profile" + elif [ -f "$HOME/.bashrc" ]; then + echo "$HOME/.bashrc" + elif [ -f "$HOME/.bash_profile" ]; then + echo "$HOME/.bash_profile" + elif [ -f "$HOME/.zshrc" ]; then + echo "$HOME/.zshrc" + elif [ -f "$HOME/.config/fish/config.fish" ]; then + echo "$HOME/.config/fish/config.fish" + fi + ;; + esac + ;; + esac +} + +deleteProfileSourceContent() { + local profile="$1" + local source_line_pattern="^$SOURCE_KUSION_CONTENT$" + local source_line_annotation_pattern="^$SOURCE_KUSION_ANNOTATION_CONTENT$" + + if grep -qc "$source_line_pattern" "$profile"; then + line_number=$(grep -n "$source_line_pattern" "$profile" | cut -d: -f1) + temp_file=$(mktemp) + sed ''"$line_number"'d' "$profile" > "$temp_file" && mv "$temp_file" "$profile" + fi + if grep -q "$source_line_pattern" "$profile"; then + warn "Failed to delete $SOURCE_KUSION_CONTENT in $profile, please delete it manually." + else + info "Delete $SOURCE_KUSION_CONTENT in $profile succeeded." + fi + + if grep -q "$source_line_annotation_pattern" "$profile"; then + annotation_line_number=$(grep -n "$source_line_annotation_pattern" "$profile" | cut -d: -f1) + annotation_temp_file=$(mktemp) + sed ''"$annotation_line_number"'d' "$profile" > "$annotation_temp_file" && mv "$annotation_temp_file" "$profile" + fi +} + +exit_trap() { + result=$? + if [ "$result" != "0" ]; then + error "Failed to uninstall kusion. Please go to https://kusionstack.io for more support." + else + info "Uninstall kusion succeeded. Hope you can use kusion again, visit https://kusionstack.io for more information." + fi + + exit $result +} + +# ----------------------------------------------------------------------------- +# main +# ----------------------------------------------------------------------------- +trap "exit_trap" EXIT + +removeInstallationDir +clearProfileSource \ No newline at end of file diff --git a/static/talks/KusionStack-origin-present-and-future.pdf b/static/talks/KusionStack-origin-present-and-future.pdf index 420b7a59..ea575f30 100644 Binary files a/static/talks/KusionStack-origin-present-and-future.pdf and b/static/talks/KusionStack-origin-present-and-future.pdf differ diff --git a/static/talks/kusionstack-application-scale-operation-solution-in-the-post-cloudnative-era.png b/static/talks/kusionstack-application-scale-operation-solution-in-the-post-cloudnative-era.png new file mode 100644 index 00000000..58fd5569 Binary files /dev/null and b/static/talks/kusionstack-application-scale-operation-solution-in-the-post-cloudnative-era.png differ diff --git a/static/talks/qcon-cover-cn.jpg b/static/talks/qcon-cover-cn.jpg new file mode 100644 index 00000000..779d0beb Binary files /dev/null and b/static/talks/qcon-cover-cn.jpg differ diff --git a/static/talks/qcon-cover-en.jpg b/static/talks/qcon-cover-en.jpg new file mode 100644 index 00000000..95dd7c69 Binary files /dev/null and b/static/talks/qcon-cover-en.jpg differ